LB > Node Port > Cluster IP


Node Port를 만들면 Cluster IP가 자동으로 만들어진다


*SVC 지우기

1
2
3
4
5
6
7
8
9
[root@host01-4 hk]# k get service
NAME           TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE
headless-svc   ClusterIP   10.96.102.169   <none>        80/TCP    27m
kubernetes     ClusterIP   10.96.0.1       <none>        443/TCP   48m
test-svc       ClusterIP   10.96.25.31     <none>        80/TCP    45m
[root@host01-4 hk]# k delete svc test-svc
service "test-svc" deleted
[root@host01-4 hk]#
 
cs


*NodePort 생성하기(생성뒤에 해당 Port로 curl을 날릴 수 있다) : 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
vi NodePort.yaml
 
apiVersion: v1
kind: Service
metadata:
  name: test-svc
spec:
        type: NodePort
        selector:
          type: test
        ports:
        - port: 80
          targetPort: 8080
 
 
[root@host01-4 hk]# k create -f nodePort.yaml
 
service "test-svc" created
[root@host01-4 hk]#
[root@host01-4 hk]# k get svc
NAME           TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
headless-svc   ClusterIP   10.96.102.169   <none>        80/TCP         31m
kubernetes     ClusterIP   10.96.0.1       <none>        443/TCP        52m
test-svc       NodePort    10.107.213.48   <none>        80:32045/TCP   6s
 
 
#NodePort에서 80포트는 cluster IP port이고 32045는 Node Port
 
 
cs


*Load Balancer 생성

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
k edit svc test-app
 
# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
#
apiVersion: v1
kind: Service
metadata:
  creationTimestamp: 2018-05-25T07:44:57Z
  name: test-svc
  namespace: default
  resourceVersion: "29844"
  selfLink: /api/v1/namespaces/default/services/test-svc
  uid: 85b2c6ea-5fef-11e8-8e09-005056b28b62
spec:
  clusterIP: 10.107.213.48
  externalTrafficPolicy: Cluster
  ports:
  - nodePort: 32045
    port: 80
    protocol: TCP
    targetPort: 8080
  selector:
    type: test
  sessionAffinity: None
  type: LoadBalancer            #LoadBalancer로 수정해주면...
status:
  loadBalancer: {}
 
 
[root@host01-4 hk]# k get svc
NAME           TYPE           CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
headless-svc   ClusterIP      10.96.102.169   <none>        80/TCP         38m
kubernetes     ClusterIP      10.96.0.1       <none>        443/TCP        59m
test-svc       LoadBalancer   10.107.213.48   <pending>     80:32045/TCP   7m  #Node Port에 Cluster IP까지 자동 
[root@host01-4 hk]#
 
cs


상위 네트워크 모듈을 생성하면 하위 매핑 정보는 알아서 구성하게 됨




*TargetPort 설정 이유


1
2
3
4
5
6
7
 
  ports:
  - nodePort: 32045
    targetPort: 8080
 
 
 
cs


=> Cluster의 port와 pod Port 정보가 다르기 때문



*비즈니스 관점에서 Probe가 정상적으로 동작하는지 여부를 체크할 수 있어야 한다....


Client가 접속한다!!

Svc > Pod(X)

정상동작 X 다고 생각하게 된다.

이때 readinessProbe를 활용하면 cluster 정상동작 여부를 체크할 수 있다 :

1
2
3
4
5
6
7
8
9
      containers:
      - image: reg.cloud.com/kubia
        name: kubia
        readinessProbe:
          exec:
            command:
            - ls
            - /var/ready
 
cs


*ReadinessProbe 생성 예제 :


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
#vi rc-probe.yaml 
# /var/ready가 pod 안에 있으면 probe에서 통과 시킨다(Ready 상태로 만들어줌)
apiVersion: v1
kind: ReplicationController
metadata:
  name: rc-readiness
spec:
  replicas: 3
  template:
    metadata:
      labels:
        type: test
    spec:
      containers:
      - image: reg.cloud.com/kubia
        name: kubia
        readinessProbe:
          exec:
            command:
            - ls
            - /var/ready
        ports:
        - containerPort: 8080
          protocol: TCP
 
 
#vi service.yaml
 
apiVersion: v1
kind: Service
metadata:
  name: test-svc
spec:
        selector:
          type: test
        ports:
        - port: 80
          targetPort: 8080
 
 
 
[root@host01-4 hk]# k create -f rc-probe.yaml
 
replicationcontroller "rc-readiness" created
[root@host01-4 hk]# k get po -o wide
NAME                 READY     STATUS    RESTARTS   AGE       IP          NODE
rc-readiness-22hxs   0/1       Running   0          10s       10.36.0.2   host01-3.cloud.com
rc-readiness-2pt29   0/1       Running   0          10s       10.44.0.1   host01-2.cloud.com
rc-readiness-h74t2   0/1       Running   0          10s       10.36.0.1   host01-3.cloud.com
 
 
#Running 상태이지만 Ready가 아무것도 없다
 
 
 
[root@host01-4 hk]# k create -f service.yaml
service "test-svc" created
[root@host01-4 hk]# k get svc
NAME         TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1     <none>        443/TCP   6m
test-svc     ClusterIP   10.98.99.22   <none>        80/TCP    4s
[root@host01-4 hk]# k get ep
NAME         ENDPOINTS          AGE
kubernetes   10.10.12.14:6443   6m
test-svc                        9s
 
 
#Endpoint가 아무것도 안 떠있다.
 
[root@host01-4 hk]# k exec rc-readiness-22hxs -- touch /var/ready
 
#1번 pod에 /var/ready를 만들어준다 (Ready 상태로 만들어주기 위해)
 
[root@host01-4 hk]# k get ep
NAME         ENDPOINTS          AGE
kubernetes   10.10.12.14:6443   8m
test-svc     10.36.0.2:8080     1m
 
#test-svc의 ENDPOINT가 enabled된다
 
[root@host01-4 hk]# k get svc
NAME         TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1     <none>        443/TCP   8m
test-svc     ClusterIP   10.98.99.22   <none>        80/TCP    1m
 
[root@host01-4 hk]# k get po -o wide
NAME                 READY     STATUS    RESTARTS   AGE       IP          NODE
rc-readiness-22hxs   1/1       Running   0          4m        10.36.0.2   host01-3.cloud.com
rc-readiness-2pt29   0/1       Running   0          4m        10.44.0.1   host01-2.cloud.com
rc-readiness-h74t2   0/1       Running   0          4m        10.36.0.1   host01-3.cloud.com
 
#1번 Pod가 Ready 상태로 바뀐다
 
 
[root@host01-4 hk]# k exec rc-readiness-2pt29 -- touch /var/ready
[root@host01-4 hk]# k get po -o wide
NAME                 READY     STATUS    RESTARTS   AGE       IP          NODE
rc-readiness-22hxs   1/1       Running   0          5m        10.36.0.2   host01-3.cloud.com
rc-readiness-2pt29   1/1       Running   0          5m        10.44.0.1   host01-2.cloud.com
rc-readiness-h74t2   0/1       Running   0          5m        10.36.0.1   host01-3.cloud.com
 
#2번 Pod Ready 상태로 바뀐다
 
 
 
 
cs






+ Recent posts