*9 Hidden Layer with tensorboard :

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import tensorflow as tf
import numpy as np
tf.set_random_seed(777)  # for reproducibility
 
xy = np.loadtxt('./data/07train.txt')
 
x_data =  xy[:,0:-1]
y_data = xy[:,[-1]]
 
= tf.placeholder(tf.float32, [None, 2])
= tf.placeholder(tf.float32, [None, 1])
 
# 계층은 3단에 뉴런의 개수를 5개로 지정했을 시 :
W1 = tf.Variable(tf.random_uniform([25], -1.1.))
W2 = tf.Variable(tf.random_uniform([55], -1.1.))
W3 = tf.Variable(tf.random_uniform([55], -1.1.))
W4 = tf.Variable(tf.random_uniform([55], -1.1.))
W5 = tf.Variable(tf.random_uniform([55], -1.1.))
W6 = tf.Variable(tf.random_uniform([55], -1.1.))
W7 = tf.Variable(tf.random_uniform([55], -1.1.))
W8 = tf.Variable(tf.random_uniform([55], -1.1.))
W9 = tf.Variable(tf.random_uniform([55], -1.1.))
W10 = tf.Variable(tf.random_uniform([55], -1.1.))
W11 = tf.Variable(tf.random_uniform([51], -1.1.))
 
b1 = tf.Variable(tf.zeros([5]))
b2 = tf.Variable(tf.zeros([5]))
b3 = tf.Variable(tf.zeros([5]))
b4 = tf.Variable(tf.zeros([5]))
b5 = tf.Variable(tf.zeros([5]))
b6 = tf.Variable(tf.zeros([5]))
b7 = tf.Variable(tf.zeros([5]))
b8 = tf.Variable(tf.zeros([5]))
b9 = tf.Variable(tf.zeros([5]))
b10 = tf.Variable(tf.zeros([5]))
b11 = tf.Variable(tf.zeros([1]))
 
# Hypotheis
with  tf.name_scope("layer1") as scope:
    L1 = tf.sigmoid(tf.matmul(X, W1) + b1)
with  tf.name_scope("layer2") as scope:
    L2 = tf.sigmoid(tf.matmul(L1, W2) + b2)
with  tf.name_scope("layer3") as scope:
    L3 = tf.sigmoid(tf.matmul(L2, W3) + b3)
with  tf.name_scope("layer4") as scope:
    L4 = tf.sigmoid(tf.matmul(L3, W4) + b4)
with  tf.name_scope("layer5") as scope:
    L5 = tf.sigmoid(tf.matmul(L4, W5) + b5)
with  tf.name_scope("layer6") as scope:
    L6 = tf.sigmoid(tf.matmul(L5, W6) + b6)
with  tf.name_scope("layer7") as scope:
    L7 = tf.sigmoid(tf.matmul(L6, W7) + b7)
with  tf.name_scope("layer8") as scope:
    L8 = tf.sigmoid(tf.matmul(L7, W8) + b8)
with  tf.name_scope("layer9") as scope:
    L9 = tf.sigmoid(tf.matmul(L8, W9) + b9)
with  tf.name_scope("layer10") as scope:
    L10 = tf.sigmoid(tf.matmul(L9, W10) + b10)
with  tf.name_scope("layer1") as scope:
    hypothesis = tf.sigmoid(tf.matmul(L10, W11) + b11)
 
# 3단으로 쌓을 시 :
L1 = tf.sigmoid(tf.matmul(X,W1) + b1)
L2 = tf.sigmoid(tf.matmul(L1,W2) + b2)
hypothesis = tf.sigmoid(tf.matmul(L2, W3) + b3)
 
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) *
                       tf.log(1 - hypothesis))
 
train = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost)
 
predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32))
 
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
 
    for step in range(10001):
        sess.run(train, feed_dict={X: x_data, Y: y_data})
        if step % 100 == 0:
            print(step, sess.run(cost, feed_dict={
                  X: x_data, Y: y_data}))
 
    h, c, a = sess.run([hypothesis, predicted, accuracy],
                       feed_dict={X: x_data, Y: y_data})
    print("\nHypothesis: ", h, "\nCorrect: ", c, "\nAccuracy: ", a)
 
 
cs





*TensorBoard 란 :
TV logging /debugging tool

=> Visualize your TF Graph

=> Plot quntitative metrics

=> show additional data


*Tensorboard를 사용하는 5가지 방법

=> From TF Graph, decide which tensors you want to log

=> merge all summeries

=> create write and add graph

=> run summary merge and summary

=> lanuch Tensor board




*Tensorboard 출력하기 :


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
#$tensorboard --logdir=/tmp/xor_logs2
import tensorflow as tf
import numpy as np
 
xy = np.loadtxt('./data/07train.txt', unpack=True)
x_data = np.transpose(xy[0:-1])
y_data = np.reshape(xy[-1], (41))
 
print(x_data)
print(y_data)
 
= tf.placeholder(tf.float32, name='x-input')
= tf.placeholder(tf.float32, name='y-input')
 
w1 = tf.Variable(tf.random_uniform([210], -1.01.0), name='weight1')
w2 = tf.Variable(tf.random_uniform([1010], -1.01.0), name='weight2')
w3 = tf.Variable(tf.random_uniform([1010], -1.01.0), name='weight3')
w4 = tf.Variable(tf.random_uniform([1010], -1.01.0), name='weight4')
w5 = tf.Variable(tf.random_uniform([1010], -1.01.0), name='weight5')
w6 = tf.Variable(tf.random_uniform([1010], -1.01.0), name='weight6')
w7 = tf.Variable(tf.random_uniform([1010], -1.01.0), name='weight7')
w8 = tf.Variable(tf.random_uniform([101], -1.01.0), name='weight8')
 
b1 = tf.Variable(tf.zeros([10]), name="Bias1")
b3 = tf.Variable(tf.zeros([10]), name="Bias3")
b2 = tf.Variable(tf.zeros([10]), name="Bias2")
b4 = tf.Variable(tf.zeros([10]), name="Bias4")
b5 = tf.Variable(tf.zeros([10]), name="Bias5")
b6 = tf.Variable(tf.zeros([10]), name="Bias6")
b7 = tf.Variable(tf.zeros([10]), name="Bias7")
b8 = tf.Variable(tf.zeros([1]), name="Bias8")
 
# L2 = tf.nn.relu(tf.matmul(X, w1) + b1)
# L3 = tf.nn.relu(tf.matmul(L2, w2) + b2)
# L4 = tf.nn.relu(tf.matmul(L3, w3) + b3)
# L5 = tf.nn.relu(tf.matmul(L4, w4) + b4)
# L6 = tf.nn.relu(tf.matmul(L5, w5) + b5)
# L7 = tf.nn.relu(tf.matmul(L6, w6) + b6)
# L8 = tf.nn.relu(tf.matmul(L7, w7) + b7)
with tf.name_scope("layer1") as scope:
    L2 = tf.sigmoid(tf.matmul(X, w1) + b1)
with tf.name_scope("layer2") as scope:
    L3 = tf.sigmoid(tf.matmul(L2, w2) + b2)
with tf.name_scope("layer3") as scope:
    L4 = tf.sigmoid(tf.matmul(L3, w3) + b3)
with tf.name_scope("layer4") as scope:  
    L5 = tf.sigmoid(tf.matmul(L4, w4) + b4)
with tf.name_scope("layer5") as scope:
    L6 = tf.sigmoid(tf.matmul(L5, w5) + b5)
with tf.name_scope("layer6") as scope:
    L7 = tf.sigmoid(tf.matmul(L6, w6) + b6)
with tf.name_scope("layer7") as scope:
    L8 = tf.sigmoid(tf.matmul(L7, w7) + b7)
with tf.name_scope("layer8") as scope:
    hypothesis = tf.sigmoid(tf.matmul(L8, w8) + b8)
 
with tf.name_scope('cost') as scope:
    cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1-Y) * tf.log(1 - hypothesis))
    tf.summary.scalar("cost", cost)
 
with tf.name_scope('train') as scope:
    a = tf.Variable(0.003)
    optimizer = tf.train.GradientDescentOptimizer(a)
    train = optimizer.minimize(cost)
 
w1_hist = tf.summary.histogram("weights1", w1)
w2_hist = tf.summary.histogram("weights2", w2)
b1_hist = tf.summary.histogram("biases1", b1)
b2_hist = tf.summary.histogram("biases2", b2)
y_hist = tf.summary.histogram("y", Y)
 
with tf.name_scope('accuracy') as scope:
    correct_prediction = tf.equal(tf.floor(hypothesis+0.5), Y)
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
    tf.summary.scalar("accuracy", accuracy)
 
init = tf.global_variables_initializer()
 
with tf.Session() as sess:
    sess.run(init)
 
    merged = tf.summary.merge_all()
    writer = tf.summary.FileWriter("./tmp/xor_logs3",  sess.graph)
 
    for step in range(20000):
        sess.run(train, feed_dict={X: x_data, Y: y_data})
        if step % 200 == 0:
            summary = sess.run(merged, feed_dict={X: x_data, Y: y_data})
            writer.add_summary(summary, step)
            print(step, sess.run(cost, feed_dict={X: x_data, Y: y_data}), sess.run(w1), sess.run(w2))
 
    print(sess.run([hypothesis, tf.floor(hypothesis+0.5), correct_prediction], feed_dict={X: x_data, Y: y_data}))
    print("accuracy", sess.run(accuracy, feed_dict={X: x_data, Y: y_data}))
 
cs


위코드를 실행 후 

View > Tool Windows > Terminal 선택 후 

아래 Command 실행 :


>>> tensorboard --logdir=./tmp/xor_logs3


https://localhost:6006 브라우저에서 열기






*ReLU 소스코드 :


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import tensorflow as tf
import numpy as np
 
xy = np.loadtxt('./data/07train.txt')
x_data =  xy[:,0:-1]
y_data = xy[:,[-1]]
 
= tf.placeholder(tf.float32, [None, 2])
= tf.placeholder(tf.float32, [None, 1])
 
w1 = tf.Variable(tf.random_uniform([2,  10], -1.01.0), name='weight1')
w2 = tf.Variable(tf.random_uniform([1010], -1.01.0), name='weight2')
w3 = tf.Variable(tf.random_uniform([1010], -1.01.0), name='weight3')
w4 = tf.Variable(tf.random_uniform([1010], -1.01.0), name='weight4')
w5 = tf.Variable(tf.random_uniform([1010], -1.01.0), name='weight5')
w6 = tf.Variable(tf.random_uniform([1010], -1.01.0), name='weight6')
w7 = tf.Variable(tf.random_uniform([1010], -1.01.0), name='weight7')
w8 = tf.Variable(tf.random_uniform([1010], -1.01.0), name='weight8')
w9 = tf.Variable(tf.random_uniform([1010], -1.01.0), name='weight9')
w10 = tf.Variable(tf.random_uniform([1010], -1.01.0), name='weight10')
w11 = tf.Variable(tf.random_uniform([101], -1.01.0), name='weight11')
 
b1 = tf.Variable(tf.zeros([10]), name="Bias1")
b3 = tf.Variable(tf.zeros([10]), name="Bias3")
b2 = tf.Variable(tf.zeros([10]), name="Bias2")
b4 = tf.Variable(tf.zeros([10]), name="Bias4")
b5 = tf.Variable(tf.zeros([10]), name="Bias5")
b6 = tf.Variable(tf.zeros([10]), name="Bias6")
b7 = tf.Variable(tf.zeros([10]), name="Bias7")
b8 = tf.Variable(tf.zeros([10]), name="Bias8")
b9 = tf.Variable(tf.zeros([10]), name="Bias9")
b10 = tf.Variable(tf.zeros([10]), name="Bias10")
b11 = tf.Variable(tf.zeros([1]), name="Bias11")
 
#L1 = tf.sigmoid(tf.matmul(X, w1) + b1)
L1 = tf.nn.relu(tf.matmul(X, w1) + b1)
L2 = tf.nn.relu(tf.matmul(L1, w2) + b2)
L3 = tf.nn.relu(tf.matmul(L2, w3) + b3)
L4 = tf.nn.relu(tf.matmul(L3, w4) + b4)
L5 = tf.nn.relu(tf.matmul(L4, w5) + b5)
L6 = tf.nn.relu(tf.matmul(L5, w6) + b6)
L7 = tf.nn.relu(tf.matmul(L6, w7) + b7)
L8 = tf.nn.relu(tf.matmul(L7, w8) + b8)
L9 = tf.nn.relu(tf.matmul(L8, w9) + b9)
L10 = tf.nn.relu(tf.matmul(L9, w10) + b10)
hypothesis = tf.sigmoid(tf.matmul(L10, w11) + b11)
 
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1-Y) * tf.log(1 - hypothesis))
 
= tf.Variable(0.01)
optimizer = tf.train.GradientDescentOptimizer(a)
train = optimizer.minimize(cost)
 
init = tf.global_variables_initializer()
 
with tf.Session() as sess:
    sess.run(init)
 
    for step in range(10000):
        sess.run(train, feed_dict={X: x_data, Y: y_data})
        if step % 200 == 0:
            print(step, sess.run(cost, feed_dict={X: x_data, Y: y_data}), sess.run(w1), sess.run(w2))
 
    correct_prediction = tf.equal(tf.floor(hypothesis+0.5), Y)
 
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
    print(sess.run([hypothesis, tf.floor(hypothesis+0.5), correct_prediction], feed_dict={X: x_data, Y: y_data}))
    print("accuracy", accuracy.eval({X: x_data, Y: y_data}))
 
cs



*Activation Function : 자극에 반응을 할지 안 할지 결정(뉴런이 하는 역할과 동일)


X-> W-> S -> Y(예측값)

여러 뉴런들이 각 특징들을 잘 뽑아내서 이해를 잘 시킨다.


*CNN 알고리즘(Convolutional Neural Networks)

1980(LeCun)

=>Big Problem : 

사람의 두뇌를 구성하려면, 15층 16층 정도로 깊게 쌓아야 하는데

Neural Network로는 레이어 구성으로 동작이 잘 안된다는 것을 깨달음

=> Breakthrough :

Neural networks with many layers really could be trained well, if the weights are initialized in a clever way rather than randomly




*Geoffrey Hinton's Summary of findings up to today

- Our labeled datasets were thousands of times too small

- 컴퓨터가 너무 느렸다.

- 초기값을 잘못 줬다.

- We used the wrong type of non-linearity


2단의 NN을 쌓으려면 

1단의 출력값을 2단의 입력값으로 쓰는 방식으로 연결해야 한다.


1) K(x) = sigmoid(WX1 + B1)

2) Y + H(x) = sigmoid(K(x) W2 + B2)




*XOR With logistic regression :


07train.txt

1
2
3
4
5
6
# xor
# x1 x2 y
0   0   0
0   1   1
1   0   1
1   1   0
cs



1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import tensorflow as tf
import numpy as np
tf.set_random_seed(777)  # for reproducibility
 
xy = np.loadtxt('./data/07train.txt')
 
x_data =  xy[:,0:-1]
y_data = xy[:,[-1]]
 
= tf.placeholder(tf.float32, [None, 2])
= tf.placeholder(tf.float32, [None, 1])
 
= tf.Variable(tf.random_uniform([21], -1.1.))
= tf.Variable(tf.random_uniform([1], -1.1.))
 
hypothesis = tf.sigmoid(tf.matmul(X, W) + b)
 
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) *
                       tf.log(1 - hypothesis))
 
train = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost)
 
predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32))
 
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
 
    for step in range(10001):
        sess.run(train, feed_dict={X: x_data, Y: y_data})
        if step % 100 == 0:
            print(step, sess.run(cost, feed_dict={
                  X: x_data, Y: y_data}), sess.run(W))
 
    h, c, a = sess.run([hypothesis, predicted, accuracy],
                       feed_dict={X: x_data, Y: y_data})
    print("\nHypothesis: ", h, "\nCorrect: ", c, "\nAccuracy: ", a)
 
 
cs





*Neuron이 2개일 때 소스코드 : 


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import tensorflow as tf
import numpy as np
tf.set_random_seed(777)  # for reproducibility
 
xy = np.loadtxt('./data/07train.txt')
 
x_data =  xy[:,0:-1]
y_data = xy[:,[-1]]
 
= tf.placeholder(tf.float32, [None, 2])
= tf.placeholder(tf.float32, [None, 1])
 
# 2단으로 쌓는다 W 2개 b 2개  - W값은 [2, 2] 와 [2, 1]으로 되어 있는데 대각선으로 값이 맞아야 한다(2 = 2)
# b1, b2의 값은 W의 마지막 열과 일치해야 한다
W1 = tf.Variable(tf.random_uniform([22], -1.1.))
W2 = tf.Variable(tf.random_uniform([21], -1.1.))
b1 = tf.Variable(tf.random_uniform([2], -1.1.))
b2 = tf.Variable(tf.random_uniform([1], -1.1.))
 
L1 = tf.sigmoid(tf.matmul(X,W1) + b1)
hypothesis = tf.sigmoid(tf.matmul(L1, W2) + b2)
 
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) *
                       tf.log(1 - hypothesis))
 
train = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost)
 
predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32))
 
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
 
    for step in range(10001):
        sess.run(train, feed_dict={X: x_data, Y: y_data})
        if step % 100 == 0:
            print(step, sess.run(cost, feed_dict={
                  X: x_data, Y: y_data}))
 
    h, c, a = sess.run([hypothesis, predicted, accuracy],
                       feed_dict={X: x_data, Y: y_data})
    print("\nHypothesis: ", h, "\nCorrect: ", c, "\nAccuracy: ", a)
 
 
cs



*계층은 2단에 뉴런 10개를 사용한다고 했을 때 소스 코드 :

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import tensorflow as tf
import numpy as np
tf.set_random_seed(777)  # for reproducibility
 
xy = np.loadtxt('./data/07train.txt')
 
x_data =  xy[:,0:-1]
y_data = xy[:,[-1]]
 
= tf.placeholder(tf.float32, [None, 2])
= tf.placeholder(tf.float32, [None, 1])
 
# 2단으로 쌓는다 W 2개 b 2개  - W값은 [2, 2] 와 [2, 1]으로 되어 있는데 대각선으로 값이 맞아야 한다(2 = 2)
# b1, b2의 값은 W의 마지막 열과 일치해야 한다
# 계층은 2단에 뉴런의 개수를 10개로 지정했을 시 : 
W1 = tf.Variable(tf.random_uniform([210], -1.1.))
W2 = tf.Variable(tf.random_uniform([101], -1.1.))
b1 = tf.Variable(tf.random_uniform([10], -1.1.))
b2 = tf.Variable(tf.random_uniform([1], -1.1.))
 
L1 = tf.sigmoid(tf.matmul(X,W1) + b1)
hypothesis = tf.sigmoid(tf.matmul(L1, W2) + b2)
 
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) *
                       tf.log(1 - hypothesis))
 
train = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost)
 
predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32))
 
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
 
    for step in range(10001):
        sess.run(train, feed_dict={X: x_data, Y: y_data})
        if step % 100 == 0:
            print(step, sess.run(cost, feed_dict={
                  X: x_data, Y: y_data}))
 
    h, c, a = sess.run([hypothesis, predicted, accuracy],
                       feed_dict={X: x_data, Y: y_data})
    print("\nHypothesis: ", h, "\nCorrect: ", c, "\nAccuracy: ", a)
 
 
cs


*계층은 3단에 뉴런 5개를 사용한다고 했을 때 소스 코드 :


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import tensorflow as tf
import numpy as np
tf.set_random_seed(777)  # for reproducibility
 
xy = np.loadtxt('./data/07train.txt')
 
x_data =  xy[:,0:-1]
y_data = xy[:,[-1]]
 
= tf.placeholder(tf.float32, [None, 2])
= tf.placeholder(tf.float32, [None, 1])
 
# 계층은 3단에 뉴런의 개수를 5개로 지정했을 시 :
W1 = tf.Variable(tf.random_uniform([25], -1.1.))
W2 = tf.Variable(tf.random_uniform([54], -1.1.))
W3 = tf.Variable(tf.random_uniform([41], -1.1.))
b1 = tf.Variable(tf.random_uniform([5], -1.1.))
b2 = tf.Variable(tf.random_uniform([4], -1.1.))
b3 = tf.Variable(tf.random_uniform([1], -1.1.))
 
# 3단으로 쌓을 시 :
L1 = tf.sigmoid(tf.matmul(X,W1) + b1)
L2 = tf.sigmoid(tf.matmul(L1,W2) + b2)
hypothesis = tf.sigmoid(tf.matmul(L2, W3) + b3)
 
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) *
                       tf.log(1 - hypothesis))
 
train = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost)
 
predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32))
 
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
 
    for step in range(10001):
        sess.run(train, feed_dict={X: x_data, Y: y_data})
        if step % 100 == 0:
            print(step, sess.run(cost, feed_dict={
                  X: x_data, Y: y_data}))
 
    h, c, a = sess.run([hypothesis, predicted, accuracy],
                       feed_dict={X: x_data, Y: y_data})
    print("\nHypothesis: ", h, "\nCorrect: ", c, "\nAccuracy: ", a)
 
 
cs




*Learning Rate :

큰 Learning Rate 값은 overshooting을 유발하고

값이 너무 작을 경우에는 오래걸린다.


*Data normalization 필요성(표준화 방법):


1
2
x_data[:,1= (x_data[:,1- x_data[:,1].mean()) / x_data[:,1].std()
 
cs





*Overfitting :

머신러닝 학습의 가장 큰 문제점 중 하나

- Our model is very good with training data set(with memorization)

- Not good at test dataset or in real use


*Solution for overfitting:

- Data가 많으면 많을 수록 좋다.

- Reduce the number of features

- Regularization

=> let's not have too big numbers in the weight


*Underfit VS Overfit


- Training Data 만 잘 표현하는 모델 : Training Error < test Error

- 새로운 데이터까지 잘 표현할 수 있는가 체크

- 적절한 복잡도까지 무엇인지 체크 -> model selection problem


*Bias(퍼져있는 상태)-Variance Tradeoff

Variance가 큰 모델은 좋은 모델이 아니다:

Low variance와 Low Bias가 가장 좋은 모델

bias : 중앙으로부터 떨어진 에러, hbar는 예측의 평균값

variance : 예측의 펴균값과 예측값들의 분산


Bias는 모델의 복잡도가 높아질 수록 error 발생률이 줄어든다

반면 Variance는 모델의 복잡도가 높아질 수록 error 발생률이 늘어난다.

둘의 에러가 가장 적은 상태(model complexity가 중간쯤인)가 가장 최적화된 모델 상태이다.





*Online Learning :
대량데이터에 대해 부분으로 나누어 학습을 시키거나, 학습이 끝난 후 추가적인 데이터에 대해 학습시키는 방법.


*MNIST Example :
=> 손글씨인 이미지 데이터를 학습시켜 예측하도록 하는 예제


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
from tensorflow.examples.tutorials.mnist import input_data
 
import tensorflow as tf
import random
import matplotlib.pylab as plt
 
mnist = input_data.read_data_sets('./MNIST_data/', one_hot=True)
 
sess = tf.InteractiveSession()
 
# Create the model
= tf.placeholder(tf.float32, [None, 784])
= tf.placeholder(tf.float32, [None, 10])
 
= tf.Variable(tf.zeros([78410]))
= tf.Variable(tf.zeros([10]))
hypothesis = tf.nn.softmax(tf.matmul(x, W) + b)
 
# Define loss and optimizer
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y * tf.log(hypothesis), axis=1)) #row
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
 
# Train
tf.global_variables_initializer().run()
 
for i in range(5500):  #5500
    batch_xs, batch_ys = mnist.train.next_batch(100)
    train_step.run({x: batch_xs, y: batch_ys})
    print ("cost:",cross_entropy.eval({x: batch_xs, y: batch_ys}))
  
# Test trained model
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
 
= random.randint(0, mnist.test.num_examples -1)
print('Label:', sess.run(tf.argmax(mnist.test.labels[r:r+1],1)))
print('Prediction:', sess.run(tf.argmax(hypothesis,1),{x:mnist.test.images[r:r+1]}))
 
plt.imshow(mnist.test.images[r:r+1].reshape(28,28)
           , cmap='Greys', interpolation='nearest')
plt.show()
 
 
cs


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
from tensorflow.examples.tutorials.mnist import input_data
 
import tensorflow as tf
import random
import matplotlib.pylab as plt
 
mnist = input_data.read_data_sets('./MNIST_data/', one_hot=True)
 
sess = tf.InteractiveSession()
 
# Create the model
= tf.placeholder(tf.float32, [None, 784]) #열만 784개로 맞춰라
= tf.placeholder(tf.float32, [None, 10])  #열만 10개로 맞춰라
 
= tf.Variable(tf.zeros([78410]))
= tf.Variable(tf.zeros([10]))
hypothesis = tf.nn.softmax(tf.matmul(x, W) + b)
 
# Define loss and optimizer
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y * tf.log(hypothesis), axis=1)) #row
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
 
# Train
tf.global_variables_initializer().run()
 
for i in range(5500):  #5500
    batch_xs, batch_ys = mnist.train.next_batch(100)  #100건씩 끊어서 가져오겠다.
    train_step.run({x: batch_xs, y: batch_ys})
    print ("cost:",cross_entropy.eval({x: batch_xs, y: batch_ys}))
  
# Test trained model
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(y, 1))  # hypothesis와 결과와 비교한다.
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))  # 평균을 내서 accuracy를 구한다
print(accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
 
= random.randint(0, mnist.test.num_examples -1)
print('Label:', sess.run(tf.argmax(mnist.test.labels[r:r+1],1)))
print('Prediction:', sess.run(tf.argmax(hypothesis,1),{x:mnist.test.images[r:r+1]}))
 
plt.imshow(mnist.test.images[r:r+1].reshape(28,28)
           , cmap='Greys', interpolation='nearest')
plt.show()
 
 
cs






*Softmax Classification의 multinominal 개념 :



x1 , x2 , y 값이 주어졌을 때

좌표에 [x1, x2] = y로 표시한다. 


[wa1, wa2, wa3] [x1, x2, x3] (세로)  = [w1x1 + w2x2 + w3x3]  => a인지 아닌지 여부  [2.0]   => 0.7 (2.0/전체값 = softmax)

[wb1, wb2, wb3] [x1, x2, x3] (세로)  = [w1x1 + w2x2 + w3x3] => b인지 아닌지 여부 [1.0] => 0.2 (1.0/전체값 = softmax)

[wc1, wc2, wc3] [x1, x2, x3] (세로)  = [w1x1 + w2x2 + w3x3] => c인지 아닌지 여부 [0.1] =0.1 (0.1/전체값 = softmax)


H(x) = softmax(WX + b)

=> softmax 값이 가장 큰 것을 취한다(위에서는 'A')


argmax라는 함수를 쓴다

argmax에 확률을 집어넣어준다 (위 예시에서는 argmax(0.7) =1 , argmax(0.2) = 0, argmax(0.1) = 0 )




*A, B, C학점 예측 소스코드 :


05train.txt

1
2
3
4
5
6
7
8
9
#x0 x1 x2 y[A   B   C]
1   2   1   0   0   1
1   3   2   0   0   1
1   3   4   0   0   1
1   5   5   0   1   0
1   7   5   0   1   0
1   2   5   0   1   0
1   6   6   1   0   0
1   7   7   1   0   0
cs


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
import tensorflow as tf
import numpy as np
 
xy = np.loadtxt('./data/05train.txt', dtype='float32')
 
x_data = xy[:, 0:3]
y_data = xy[:, 3:]
print(x_data.shape, y_data.shape)
 
= tf.placeholder(tf.float32)
= tf.placeholder(tf.float32)
 
= tf.Variable(tf.zeros([3,3]))
hypothesis = tf.nn.softmax(tf.matmul(X,W))
cost = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(hypothesis), axis=1))
train = tf.train.GradientDescentOptimizer(0.01).minimize(cost)
 
init = tf.global_variables_initializer()
with tf.Session() as sess:
    sess.run(init)
 
    for step in range(5001):
        sess.run(train, feed_dict={X:x_data, Y:y_data})
        if step % 200 == 0:
            print(step, sess.run(cost, feed_dict={X:x_data, Y:y_data}), sess.run(W))
    a = sess.run(hypothesis, feed_dict={X:[[1,11,7]]})
    print(a, sess.run(tf.argmax(a, 1)))
#[[0.7576524  0.2345292  0.00781842]] [0]   0번째 있는 값이 제일 크다!! A일 확율이 76%
    c = sess.run(hypothesis, feed_dict={X:[[1,1,0]]})
    print(c, sess.run(tf.argmax(c, 1)))
#[[0.00227058 0.0237952  0.9739342 ]] [2]   3번째 있는 값이 제일 크다!! C일 확율이 97%
 
cs


*A, B, C학점 예측 소스코드(accuracy 포함) :

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import tensorflow as tf
import numpy as np
 
xy = np.loadtxt('./data/05train.txt', dtype='float32')
 
x_data = xy[:, 0:3]
y_data = xy[:, 3:]
print(x_data.shape, y_data.shape)
 
= tf.placeholder(tf.float32)
= tf.placeholder(tf.float32)
 
= tf.Variable(tf.zeros([3,3]))
hypothesis = tf.nn.softmax(tf.matmul(X,W))
cost = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(hypothesis), axis=1))
train = tf.train.GradientDescentOptimizer(0.01).minimize(cost)
 
init = tf.global_variables_initializer()
with tf.Session() as sess:
    sess.run(init)
 
    for step in range(5001):
        sess.run(train, feed_dict={X:x_data, Y:y_data})
        if step % 200 == 0:
            print(step, sess.run(cost, feed_dict={X:x_data, Y:y_data}), sess.run(W))
    a = sess.run(hypothesis, feed_dict={X:[[1,11,7]]})
    print(a, sess.run(tf.argmax(a, 1)))
#[[0.7576524  0.2345292  0.00781842]] [0]   0번째 있는 값이 제일 크다!! A일 확율이 76%
    c = sess.run(hypothesis, feed_dict={X:[[1,1,0]]})
    print(c, sess.run(tf.argmax(c, 1)))
#[[0.00227058 0.0237952  0.9739342 ]] [2]   3번째 있는 값이 제일 크다!! C일 확율이 97%
 
 
    correct_prediction = tf.equal(tf.argmax(hypothesis, 1),
                                  tf.argmax(Y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction
                                      , tf.float32))
    print(sess.run(accuracy, feed_dict={X:x_data, Y: y_data})) #0.875 87%의 정확도
cs





*Binary Classification :


- 활용분야 :
=> spam email Detection : Spam or Ham

=> Facebook feed : Show or hide

=> Credit Card Fraud Detection : fraud or not


그래프 그리기 online : 

https://www.desmos.com/calculator



=> w값이 클 수록 기울기가 y 축에 가까워진다 :









I = log 1/p 


I = 정보량

1/p = 전체 사건 수


2^ = 6  => 3비트

- 3비트 불확실성이 크다


2^ = 2 => 1비트

- 2비트 불확실성이 낮다


=> 불확실성이 작은 것이 더 좋다!(entropy가 낮은것)


*Cost Function  :


C:(H(x), y) = -ylog(H(x)) - (1-y)log( 1-H(x) )


=> cost(W) = - 1/m SUM (ylog(H(x)) + 1-y) log (1- H(x)) )


*Logistic Regression (Hypothesis & Cost 함수 & Cost 최소화):





*Logistic Regression 소스 에제:


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
import tensorflow as tf
import numpy as np
tf.set_random_seed(888)
 
xy = np.loadtxt('./data/04train.txt', dtype='float32')
x_data = xy[:, 0:-1] # 행은 전체 행, 열은 0열 부터 끝에서 1번째 열까지 x_data로 지정
y_data = xy[:, [-1]] # 행은 전체 행, 열은 끝에서 1번째 열만 y_data로 지정
 
print(x_data.shape, y_data.shape) #(6, 3) (6, 1)  따라서 W는 [3, 1]로 만들어준다
 
= tf.placeholder(tf.float32)
= tf.placeholder(tf.float32)
 
= tf.Variable(tf.random_uniform([3,1], -1.1. )) #(6, 3) (6, 1)  따라서 W는 [3, 1]로 만들어준다
= tf.matmul(X,W)  # matrix multiplication
hypothesis = tf.div(1.1. + tf.exp(-h))
 
cost = -tf.reduce_mean(Y * tf.log(hypothesis) +
                       (1+Y) * tf.log(1-hypothesis))
optimizer = tf.train.GradientDescentOptimizer(0.1)
train = optimizer.minimize(cost)
 
sess = tf.Session()
sess.run(tf.global_variables_initializer())
 
for step in range(2001):
    sess.run(train, feed_dict={X:x_data, Y:y_data})
    if step % 20 == 0:
        print(step, sess.run(cost, feed_dict={X:x_data, Y:y_data})
              , sess.run(W))
cs






* Logistic Regression (with Accuracy) :


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import tensorflow as tf
import numpy as np
tf.set_random_seed(888)
 
xy = np.loadtxt('./data/04train.txt', dtype='float32')
x_data = xy[:, 0:-1]
y_data = xy[:, [-1]]
 
print(x_data.shape, y_data.shape) #(6, 3) (6, 1)  따라서 W는 [3, 1]로 만들어준다
 
= tf.placeholder(tf.float32)
= tf.placeholder(tf.float32)
 
= tf.Variable(tf.random_uniform([3,1], -1.1. )) #(6, 3) (6, 1)  따라서 W는 [3, 1]로 만들어준다
= tf.matmul(X,W)  # matrix multiplication
hypothesis = tf.div(1.1. + tf.exp(-h))
 
cost = -tf.reduce_mean(Y * tf.log(hypothesis) +
                       (1+Y) * tf.log(1-hypothesis))
optimizer = tf.train.GradientDescentOptimizer(0.1)
train = optimizer.minimize(cost)
 
sess = tf.Session()
sess.run(tf.global_variables_initializer())
 
for step in range(2001):
    sess.run(train, feed_dict={X:x_data, Y:y_data})
    if step % 20 == 0:
        print(step, sess.run(cost, feed_dict={X:x_data, Y:y_data})
              , sess.run(W))
 
= sess.run(hypothesis, feed_dict={X:[[1,2,2]]})
print(a > 0.5)
 
#예측과 정답 비율
# 0     0
# 0     1
# 1     1
# 정답 비율은 2/3.
# 이걸 수식으로 옮겨보면 :
 
predicted = tf.cast(hypothesis > 0.5, dtype = tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y)
                                  , dtype=tf.float32))
acc = sess.run(accuracy, feed_dict={X:x_data, Y:y_data})
print(acc)
 
cs




*Diabetes 유관 Dataset을 분석 :



★문제풀이 :

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import tensorflow as tf
import numpy as np
 
xy = np.loadtxt('./data/diabetes.csv', delimiter=',', dtype='float32')
print(xy)
 
x_data = xy[:, 0:-1]
y_data = xy[:, [-1]]
 
# print(x_data, y_data)
print(x_data.shape, y_data.shape)  # [8:1]
 
= tf.placeholder(tf.float32)
= tf.placeholder(tf.float32)
 
= tf.Variable(tf.random_uniform([8,1], 1.1.))
 
= tf.matmul(X,W)  # matrix multiplication
hypothesis = tf.div(1.1. + tf.exp(-h))
 
cost = -tf.reduce_mean(Y * tf.log(hypothesis) +
                       (1+Y) * tf.log(1-hypothesis))
optimizer = tf.train.GradientDescentOptimizer(0.1)
train = optimizer.minimize(cost)
 
sess = tf.Session()
sess.run(tf.global_variables_initializer())
 
for step in range(200001):
    sess.run(train, feed_dict={X:x_data, Y:y_data})
    if step % 20 == 0:
        print(step, sess.run(cost, feed_dict={X:x_data, Y:y_data})
              , sess.run(W))
 
predicted = tf.cast(hypothesis > 0.5, dtype = tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y)
                                  , dtype=tf.float32))
acc = sess.run(accuracy, feed_dict={X:x_data, Y:y_data})
print(acc)
 
cs


★정답 :

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import tensorflow as tf
import numpy as np
tf.set_random_seed(777)  
 
xy = np.loadtxt('./data/diabetes.csv', delimiter=',', dtype=np.float32)
x_data = xy[:, 0:-1]
y_data = xy[:, [-1]]
 
print(x_data.shape, y_data.shape)
 
= tf.placeholder(tf.float32, shape=[None, 8])
= tf.placeholder(tf.float32, shape=[None, 1])
 
= tf.Variable(tf.random_uniform([81], -1.,1.))
= tf.Variable(tf.random_uniform([1], -1.1.))
 
hypothesis = tf.sigmoid(tf.matmul(X, W) + b)
 
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) *
                       tf.log(1 - hypothesis))
 
train = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost)
 
predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32))
 
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
 
    for step in range(30001):
        cost_val, _ = sess.run([cost, train], feed_dict={X: x_data, Y: y_data})
        if step % 200 == 0:
            print(step, cost_val)
 
    h, c, a = sess.run([hypothesis, predicted, accuracy],
                       feed_dict={X: x_data, Y: y_data})
    print("\nHypothesis: ", h, "\nCorrect (Y): ", c, "\nAccuracy: ", a)
 
 
cs

*multi-variable linear regression은 이전 linear regression과 다르게 x값이 여러개이다


1) one-variable regression Hypothesis

H(x) = Wx + b


2) two-variable regression Hypothesis

H(x1, x2) = w1x1 + w2x2 + b


3) multi-variable regression Hypothesis

H(x1,x2...xn) = w1x1 + w2x2 + w3x3 ... + wnxn + b



위 수식은 비효율적 따라서 아래 Matrix Multiplication 수식으로 변경해준다


H(x1, x2...xn) = [w1, w2, w3] [x1 x2 x3] (세로) + b

H(x1, x2...xn) = [x1, x2, x3] [w1 w2 w3] (세로) + b

H(X) = WX + b

H(X) = XW + b


b term을 없앤 simplified된 형태 =>  w 괄호 안으로 넣어준다.


H(x1, x2...xn) = [b w1, w2, w3] [x1 x2 x3] (세로) + b

H(x1, x2...xn) = [x1, x2, x3] [b w1 w2 w3] (세로) + b

H(X) = WX
H(X) = XW

아래와 같은 Transpose 형태로 쓸 수도 있다 :

w = [w1 w2 w3] (세로)   x= [x1 x2 x3] (세로)
H(X) = WtX + b




*Multi-variable linear regression에서의 Cost function :


Cost Function은 이전 linear regression과 똑같다 :

Gradient Descent 알고리즘을 사용한다.


cost(W,b) = 1/m 평균(H(x)- y ) 제곱



* Multi-variable linear regression 구현 실습 : 



1) 2개의 x variable( 비효율적인 방법 = matrix 형태아님 ) :


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
import tensorflow as tf
 
x1_data = [1.,0.,3.,0.,5.]
x2_data = [0.,2.,0.,4.,0.]
y_data = [1,2,3,4,5]
 
W1 = tf.Variable(tf.random_uniform([1], -11))
W2 = tf.Variable(tf.random_uniform([1], -11))
= tf.Variable(tf.random_uniform([1], -11))
 
hypothesis = W1*x1_data + W2*x2_data + b
cost = tf.reduce_mean(tf.square(hypothesis - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.1)
train = optimizer.minimize(cost)
 
sess = tf.Session()
sess.run(tf.global_variables_initializer())
 
for step in range(2001) :
    sess.run(train)
    if step % 20 == 0:
        print(step, sess.run(cost), sess.run(W1), sess.run(W2), sess.run(b))
 
cs


2) 2개의 x variable( 효율적인 방법 = matrix 형태 ) :


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
import tensorflow as tf
 
x_data = [[1.,0.,3.,0.,5.],[0.,2.,0.,4.,0.]]
y_data = [1,2,3,4,5]
 
= tf.Variable(tf.random_uniform([1,2], -11))
= tf.Variable(tf.random_uniform([1], -11))
 
hypothesis = tf.matmul(W, x_data) + b  # H(X) = WX + b
cost = tf.reduce_mean(tf.square(hypothesis - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.1)
train = optimizer.minimize(cost)
 
sess = tf.Session()
sess.run(tf.global_variables_initializer())
 
for step in range(2001) :
    sess.run(train)
    if step % 20 == 0:
        print(step, sess.run(cost), sess.run(W), sess.run(b))
 
cs


1)2) 의 비교 


b term을 없애고 matrix를 [1,3]으로 변경 시 :

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
import tensorflow as tf
 
x_data = [[1,1,1,1,1],
         [1.,0.,3.,0.,5.],
          [0.,2.,0.,4.,0.]]
y_data = [1,2,3,4,5]
 
= tf.Variable(tf.random_uniform([1,3], -11))
 
 
hypothesis = tf.matmul(W, x_data)  # H(X) = WX
cost = tf.reduce_mean(tf.square(hypothesis - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.1)
train = optimizer.minimize(cost)
 
sess = tf.Session()
sess.run(tf.global_variables_initializer())
 
for step in range(2001) :
    sess.run(train)
    if step % 20 == 0:
        print(step, sess.run(cost), sess.run(W))
 
cs






*Data를 load 하여 학습 시키기 : 


=> Pycharm에 data를 추가한다.


data.zip

프로젝트 폴더에 놓는다


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
import tensorflow as tf
import numpy as np
 
xy = np.loadtxt('./data/03train.txt', dtype='float32')
print(xy)
 
x_data = xy[:, 0 : -1]
y_data = xy[:, [-1]]
print(x_data.shape, y_data.shape)
 
= tf.Variable(tf.random_uniform([3,1], -1.1.)) # 3 = X 열의 개수 
hypothesis = tf.matmul(x_data, W)
cost = tf.reduce_mean(tf.square(hypothesis - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.1)
train = optimizer.minimize(cost)
 
sess =tf.Session()
sess.run(tf.global_variables_initializer())
 
for step in range(2001) :
    sess.run(train)
    if step % 20 == 0: # 20 번에 1번씩
        print(step, sess.run(cost), sess.run(W))
cs



*CSV 파일을 읽어서 출력하기(delimiter는 , 콤마로 ) :


1
2
3
4
5
6
7
8
9
10
import tensorflow as tf
import numpy as np
 
xy = np.loadtxt('./data/test-score.csv', delimiter=',', dtype='float32')
print(xy)
 
x_data = xy[:, 0 : -1]
y_data = xy[:, [-1]]
print(x_data.shape, y_data.shape)
 
cs


*File input linear regression :


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import tensorflow as tf
import numpy as np
 
tf.set_random_seed(777)  
 
xy = np.loadtxt('./data/test-score.csv', delimiter=',', dtype=np.float32)
x_data = xy[:, 0:-1]
y_data = xy[:, [-1]]
 
print(x_data.shape, x_data, len(x_data))
print(y_data.shape, y_data)
 
= tf.placeholder(tf.float32, shape=[None, 3])
= tf.placeholder(tf.float32, shape=[None, 1])
 
= tf.Variable(tf.random_normal([31]))
= tf.Variable(tf.random_normal([1]))
 
hypothesis = tf.matmul(X, W) + b
 
cost = tf.reduce_mean(tf.square(hypothesis - Y))
 
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-5)
train = optimizer.minimize(cost)
 
sess = tf.Session()
sess.run(tf.global_variables_initializer())
 
for step in range(2001):
    cost_val, hy_val, _ = sess.run(
        [cost, hypothesis, train], feed_dict={X: x_data, Y: y_data})
    if step % 10 == 0: # 10번에 1번씩
        print(step, "Cost: ", cost_val, sess.run(W), sess.run(b))
 
print("=====prediction=====")
print(sess.run(hypothesis, feed_dict={X: [[10070101]]}))
print(sess.run(hypothesis, feed_dict={X: [[6070110], [9010080]]}))
 
 
 
cs








+ Recent posts