*test라는 이름으로 Kubernetes Namespace 생성 및 리소스 사용 : 


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
[root@host01-4 hk]# k create ns test
namespace "test" created
[root@host01-4 hk]# k get ns
NAME          STATUS    AGE
default       Active    6h
kube-public   Active    6h
kube-system   Active    6h
test          Active    4s
[root@host01-4 hk]# ls -l
total 974676
-rw-------1 root root  50727424 May 25 10:21 dns-kube-dns
-rw-------1 root root  42481152 May 25 10:21 dns-sidecar
-rw-r--r--1 root root       158 May 25 16:18 endpoint.yaml
-rw-------1 root root 193461760 May 25 10:21 etcd-amd64
-rw-r--r--1 root root       362 May 25 12:17 hk.yaml
-rw-------1 root root  41239040 May 25 10:21 k8s-dns
-rw-r--r--1 root root       343 May 25 10:21 kubeadm.host08-1.root.log.INFO.20180525-060140.2620
-rw-r--r--1 root root       343 May 25 10:21 kubeadm.INFO
-rw-------1 root root 225319936 May 25 10:21 kube-apiserver
-rw-------1 root root 148110336 May 25 10:21 kube-controller
-rw-------1 root root  98924032 May 25 10:21 kube-proxy
-rw-------1 root root  50635776 May 25 10:21 kube-scheduler
-rw-r--r--1 root root       715 May 25 13:45 multi.yaml
-rw-r--r--1 root root       185 May 25 16:48 nodePort.yaml
-rw-r--r--1 root root       162 May 25 17:03 probe.yaml
-rw-r--r--1 root root       410 May 25 17:06 rc-probe.yaml
-rw-r--r--1 root root       302 May 25 15:51 rc.yaml
-rw-r--r--1 root root         0 May 25 15:53 service.yalm
-rw-r--r--1 root root       162 May 25 15:55 service.yaml
-rw-r--r--1 root root       322 May 25 14:11 temp.yaml
-rw-r--r--1 root root      2336 May 25 12:06 temp.yarm
-rw-r--r--1 root root        88 May 25 16:13 test-svc-h1.yaml
-rw-------1 root root  99517952 May 25 10:21 weave-kube
-rw-------1 root root  47575552 May 25 10:21 weave-npc
[root@host01-4 hk]# k create -f rc.yaml -n test
replicationcontroller "simple-rc" created
[root@host01-4 hk]# k get rc -n test
NAME        DESIRED   CURRENT   READY     AGE
simple-rc   3         3         3         9s
[root@host01-4 hk]#
[root@host01-4 hk]# k describe ns test
 
cs





LB > Node Port > Cluster IP


Node Port를 만들면 Cluster IP가 자동으로 만들어진다


*SVC 지우기

1
2
3
4
5
6
7
8
9
[root@host01-4 hk]# k get service
NAME           TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE
headless-svc   ClusterIP   10.96.102.169   <none>        80/TCP    27m
kubernetes     ClusterIP   10.96.0.1       <none>        443/TCP   48m
test-svc       ClusterIP   10.96.25.31     <none>        80/TCP    45m
[root@host01-4 hk]# k delete svc test-svc
service "test-svc" deleted
[root@host01-4 hk]#
 
cs


*NodePort 생성하기(생성뒤에 해당 Port로 curl을 날릴 수 있다) : 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
vi NodePort.yaml
 
apiVersion: v1
kind: Service
metadata:
  name: test-svc
spec:
        type: NodePort
        selector:
          type: test
        ports:
        - port: 80
          targetPort: 8080
 
 
[root@host01-4 hk]# k create -f nodePort.yaml
 
service "test-svc" created
[root@host01-4 hk]#
[root@host01-4 hk]# k get svc
NAME           TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
headless-svc   ClusterIP   10.96.102.169   <none>        80/TCP         31m
kubernetes     ClusterIP   10.96.0.1       <none>        443/TCP        52m
test-svc       NodePort    10.107.213.48   <none>        80:32045/TCP   6s
 
 
#NodePort에서 80포트는 cluster IP port이고 32045는 Node Port
 
 
cs


*Load Balancer 생성

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
k edit svc test-app
 
# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
#
apiVersion: v1
kind: Service
metadata:
  creationTimestamp: 2018-05-25T07:44:57Z
  name: test-svc
  namespace: default
  resourceVersion: "29844"
  selfLink: /api/v1/namespaces/default/services/test-svc
  uid: 85b2c6ea-5fef-11e8-8e09-005056b28b62
spec:
  clusterIP: 10.107.213.48
  externalTrafficPolicy: Cluster
  ports:
  - nodePort: 32045
    port: 80
    protocol: TCP
    targetPort: 8080
  selector:
    type: test
  sessionAffinity: None
  type: LoadBalancer            #LoadBalancer로 수정해주면...
status:
  loadBalancer: {}
 
 
[root@host01-4 hk]# k get svc
NAME           TYPE           CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
headless-svc   ClusterIP      10.96.102.169   <none>        80/TCP         38m
kubernetes     ClusterIP      10.96.0.1       <none>        443/TCP        59m
test-svc       LoadBalancer   10.107.213.48   <pending>     80:32045/TCP   7m  #Node Port에 Cluster IP까지 자동 
[root@host01-4 hk]#
 
cs


상위 네트워크 모듈을 생성하면 하위 매핑 정보는 알아서 구성하게 됨




*TargetPort 설정 이유


1
2
3
4
5
6
7
 
  ports:
  - nodePort: 32045
    targetPort: 8080
 
 
 
cs


=> Cluster의 port와 pod Port 정보가 다르기 때문



*비즈니스 관점에서 Probe가 정상적으로 동작하는지 여부를 체크할 수 있어야 한다....


Client가 접속한다!!

Svc > Pod(X)

정상동작 X 다고 생각하게 된다.

이때 readinessProbe를 활용하면 cluster 정상동작 여부를 체크할 수 있다 :

1
2
3
4
5
6
7
8
9
      containers:
      - image: reg.cloud.com/kubia
        name: kubia
        readinessProbe:
          exec:
            command:
            - ls
            - /var/ready
 
cs


*ReadinessProbe 생성 예제 :


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
#vi rc-probe.yaml 
# /var/ready가 pod 안에 있으면 probe에서 통과 시킨다(Ready 상태로 만들어줌)
apiVersion: v1
kind: ReplicationController
metadata:
  name: rc-readiness
spec:
  replicas: 3
  template:
    metadata:
      labels:
        type: test
    spec:
      containers:
      - image: reg.cloud.com/kubia
        name: kubia
        readinessProbe:
          exec:
            command:
            - ls
            - /var/ready
        ports:
        - containerPort: 8080
          protocol: TCP
 
 
#vi service.yaml
 
apiVersion: v1
kind: Service
metadata:
  name: test-svc
spec:
        selector:
          type: test
        ports:
        - port: 80
          targetPort: 8080
 
 
 
[root@host01-4 hk]# k create -f rc-probe.yaml
 
replicationcontroller "rc-readiness" created
[root@host01-4 hk]# k get po -o wide
NAME                 READY     STATUS    RESTARTS   AGE       IP          NODE
rc-readiness-22hxs   0/1       Running   0          10s       10.36.0.2   host01-3.cloud.com
rc-readiness-2pt29   0/1       Running   0          10s       10.44.0.1   host01-2.cloud.com
rc-readiness-h74t2   0/1       Running   0          10s       10.36.0.1   host01-3.cloud.com
 
 
#Running 상태이지만 Ready가 아무것도 없다
 
 
 
[root@host01-4 hk]# k create -f service.yaml
service "test-svc" created
[root@host01-4 hk]# k get svc
NAME         TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1     <none>        443/TCP   6m
test-svc     ClusterIP   10.98.99.22   <none>        80/TCP    4s
[root@host01-4 hk]# k get ep
NAME         ENDPOINTS          AGE
kubernetes   10.10.12.14:6443   6m
test-svc                        9s
 
 
#Endpoint가 아무것도 안 떠있다.
 
[root@host01-4 hk]# k exec rc-readiness-22hxs -- touch /var/ready
 
#1번 pod에 /var/ready를 만들어준다 (Ready 상태로 만들어주기 위해)
 
[root@host01-4 hk]# k get ep
NAME         ENDPOINTS          AGE
kubernetes   10.10.12.14:6443   8m
test-svc     10.36.0.2:8080     1m
 
#test-svc의 ENDPOINT가 enabled된다
 
[root@host01-4 hk]# k get svc
NAME         TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1     <none>        443/TCP   8m
test-svc     ClusterIP   10.98.99.22   <none>        80/TCP    1m
 
[root@host01-4 hk]# k get po -o wide
NAME                 READY     STATUS    RESTARTS   AGE       IP          NODE
rc-readiness-22hxs   1/1       Running   0          4m        10.36.0.2   host01-3.cloud.com
rc-readiness-2pt29   0/1       Running   0          4m        10.44.0.1   host01-2.cloud.com
rc-readiness-h74t2   0/1       Running   0          4m        10.36.0.1   host01-3.cloud.com
 
#1번 Pod가 Ready 상태로 바뀐다
 
 
[root@host01-4 hk]# k exec rc-readiness-2pt29 -- touch /var/ready
[root@host01-4 hk]# k get po -o wide
NAME                 READY     STATUS    RESTARTS   AGE       IP          NODE
rc-readiness-22hxs   1/1       Running   0          5m        10.36.0.2   host01-3.cloud.com
rc-readiness-2pt29   1/1       Running   0          5m        10.44.0.1   host01-2.cloud.com
rc-readiness-h74t2   0/1       Running   0          5m        10.36.0.1   host01-3.cloud.com
 
#2번 Pod Ready 상태로 바뀐다
 
 
 
 
cs






*Kubernetes는 다음을 자동으로 처리하는 common API와 self-heading 프레임워크를 제공한다 : 

- 머신 장애

- 어플리케이션 배포, 로깅 모니터링 간소화


최소한의 통제로 자율성을 얻기 위한 툴이다!


점차 Operation 팀의 역할은 줄어들고 있다...

비즈니스는 점점 서비스와 어플리케이션에 포커스를 두고...인프라는 비중이 작아지고 있음


*Kubernetes의 구성도 : 


1) API Node

Kubernetes의 모든 API서버는 Node들의 모든 정보를 가지고 있어야 한다.

스케줄러가 제일 먼저 하는 일은....어디에 배포할 것인가 결정

API Node 내에 controller manager(replication controller)는 node의 status를 알려준다.

->Kubernetes는 ETCD라는 분산형태 DB를 사용한다,



2) Worker Node 

Core OS가 Rocket(docker가 너무 무거워)이라는 컨테이너를 발표!

Kubelet은 컨테이너에 대한 구성요소 제어를 맡고 있음

각 pod간 통신은 VXLAN(overlay)를 사용

proxy는 app 레벨의 인스턴스가 있고,

실제 호스트에 있는 iptables (kernel 레이어에 있음)에 데이터 처리에 관한 내용이 있음



※ kubelet은 유일하게 시스템 서비스로 들어간다! 나머지는 컨테이너로 설치 가능



- 왜 Swarm을 사용하는가? 사용하기 쉬움

- 왜 Kubernetes를 사용하는가? 많은 Resource를 제어할 수 있음





1. Docker 설치 (모든 노드)


1
2
3
4
5
6
7
8
9
yum install -y docker
systemctl enable docker && systemctl start docker
 
touch /etc/docker/daemon.json
cat <<EOF > /etc/docker/daemon.json
{
"insecure-registries":["10.10.12.0/24"]
}
EOF
cs


2. Kubernetes 설치 (모든 노드)


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
setenforce 0
sed ---follow-symlinks 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux
systemctl stop firewalld.service
systemctl disable firewalld.service
 
cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system
 
swapoff -&& sed -'/swap/s/^/#/' /etc/fstab
 
yum install -y kubelet kubeadm kubectl
systemctl enable kubelet && systemctl start kubelet


*MASTER Node 설치(이미지를 전부 다 Load한다) :

> docker load -i <이미지명>

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
[root@host01-2 ~]# docker images
REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
[root@host01-2 ~]# ls
anaconda-ks.cfg  dns-kube-dns  dns-sidecar  etcd-amd64  hk
[root@host01-2 ~]# cd hk
[root@host01-2 hk]# ls
dns-kube-dns  etcd-amd64  kubeadm.host08-1.root.log.INFO.20180525-060140.2620  kube-apiserver   kube-proxy      weave-kube
dns-sidecar   k8s-dns     kubeadm.INFO                                         kube-controller  kube-scheduler  weave-npc
[root@host01-2 hk]# ls -al
total 974632
drwxr-xr-x. 2 root root       277 May 25 10:27 .
dr-xr-x---5 root root       226 May 25 10:27 ..
-rw-------1 root root  50727424 May 25 10:27 dns-kube-dns
-rw-------1 root root  42481152 May 25 10:27 dns-sidecar
-rw-------1 root root 193461760 May 25 10:27 etcd-amd64
-rw-------1 root root  41239040 May 25 10:27 k8s-dns
-rw-r--r--1 root root       343 May 25 10:27 kubeadm.host08-1.root.log.INFO.20180525-060140.2620
-rw-r--r--1 root root       343 May 25 10:27 kubeadm.INFO
-rw-------1 root root 225319936 May 25 10:27 kube-apiserver
-rw-------1 root root 148110336 May 25 10:27 kube-controller
-rw-------1 root root  98924032 May 25 10:27 kube-proxy
-rw-------1 root root  50635776 May 25 10:27 kube-scheduler
-rw-------1 root root  99517952 May 25 10:27 weave-kube
-rw-------1 root root  47575552 May 25 10:27 weave-npc
[root@host01-2 hk]# docker load -i kube-proxy
582b548209e1: Loading layer [==================================================>]  44.2 MB/44.2 MB
e20569a478ed: Loading layer [==================================================>3.358 MB/3.358 MB
6b4e4941a965: Loading layer [==================================================>51.35 MB/51.35 MB
Loaded image: k8s.gcr.io/kube-proxy-amd64:v1.10.3
[root@host01-2 hk]# docker load -i weave-kube
5bef08742407: Loading layer [==================================================>4.221 MB/4.221 MB
c3355c8b5c3e: Loading layer [==================================================>19.03 MB/19.03 MB
a83fa3df4138: Loading layer [==================================================>29.55 MB/29.55 MB
020fdc01af85: Loading layer [==================================================>]  11.6 MB/11.6 MB
2ea881a632b7: Loading layer [==================================================>2.048 kB/2.048 kB
396aa46bcbea: Loading layer [==================================================>35.09 MB/35.09 MB
Loaded image: docker.io/weaveworks/weave-kube:2.3.0
[root@host01-2 hk]# docker load -i weave-npc
8dccfe2dec8c: Loading layer [==================================================>2.811 MB/2.811 MB
3249ff6df12f: Loading layer [==================================================>40.52 MB/40.52 MB
3dc458d34b22: Loading layer [==================================================>]  2.56 kB/2.56 kB
Loaded image: docker.io/weaveworks/weave-npc:2.3.0
[root@host01-2 hk]#
 
cs

*기타 Node 설치(kube-proxy, weave-kube, weave-npc를 설치한다) :

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
[root@host01-2 ~]# docker images
REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
[root@host01-2 ~]# ls
anaconda-ks.cfg  dns-kube-dns  dns-sidecar  etcd-amd64  hk
[root@host01-2 ~]# cd hk
[root@host01-2 hk]# ls
dns-kube-dns  etcd-amd64  kubeadm.host08-1.root.log.INFO.20180525-060140.2620  kube-apiserver   kube-proxy      weave-kube
dns-sidecar   k8s-dns     kubeadm.INFO                                         kube-controller  kube-scheduler  weave-npc
[root@host01-2 hk]# ls -al
total 974632
drwxr-xr-x. 2 root root       277 May 25 10:27 .
dr-xr-x---5 root root       226 May 25 10:27 ..
-rw-------1 root root  50727424 May 25 10:27 dns-kube-dns
-rw-------1 root root  42481152 May 25 10:27 dns-sidecar
-rw-------1 root root 193461760 May 25 10:27 etcd-amd64
-rw-------1 root root  41239040 May 25 10:27 k8s-dns
-rw-r--r--1 root root       343 May 25 10:27 kubeadm.host08-1.root.log.INFO.20180525-060140.2620
-rw-r--r--1 root root       343 May 25 10:27 kubeadm.INFO
-rw-------1 root root 225319936 May 25 10:27 kube-apiserver
-rw-------1 root root 148110336 May 25 10:27 kube-controller
-rw-------1 root root  98924032 May 25 10:27 kube-proxy
-rw-------1 root root  50635776 May 25 10:27 kube-scheduler
-rw-------1 root root  99517952 May 25 10:27 weave-kube
-rw-------1 root root  47575552 May 25 10:27 weave-npc
[root@host01-2 hk]# docker load -i kube-proxy
582b548209e1: Loading layer [==================================================>]  44.2 MB/44.2 MB
e20569a478ed: Loading layer [==================================================>3.358 MB/3.358 MB
6b4e4941a965: Loading layer [==================================================>51.35 MB/51.35 MB
Loaded image: k8s.gcr.io/kube-proxy-amd64:v1.10.3
[root@host01-2 hk]# docker load -i weave-kube
5bef08742407: Loading layer [==================================================>4.221 MB/4.221 MB
c3355c8b5c3e: Loading layer [==================================================>19.03 MB/19.03 MB
a83fa3df4138: Loading layer [==================================================>29.55 MB/29.55 MB
020fdc01af85: Loading layer [==================================================>]  11.6 MB/11.6 MB
2ea881a632b7: Loading layer [==================================================>2.048 kB/2.048 kB
396aa46bcbea: Loading layer [==================================================>35.09 MB/35.09 MB
Loaded image: docker.io/weaveworks/weave-kube:2.3.0
[root@host01-2 hk]# docker load -i weave-npc
8dccfe2dec8c: Loading layer [==================================================>2.811 MB/2.811 MB
3249ff6df12f: Loading layer [==================================================>40.52 MB/40.52 MB
3dc458d34b22: Loading layer [==================================================>]  2.56 kB/2.56 kB
Loaded image: docker.io/weaveworks/weave-npc:2.3.0
[root@host01-2 hk]#
 
cs


*Master Node에서 실행 :

1
2
3
4
5
kubeadm init
 
mkdir -p $HOME/.kube
cp -/etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
cs


실행방법 :

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
[root@host01-4 ~]# docker images
REPOSITORY                                 TAG                 IMAGE ID            CREATED             SIZE
k8s.gcr.io/kube-proxy-amd64                v1.10.3             4261d315109d        3 days ago          97.1 MB
k8s.gcr.io/kube-apiserver-amd64            v1.10.3             e03746fe22c3        3 days ago          225 MB
k8s.gcr.io/kube-controller-manager-amd64   v1.10.3             40c8d10b2d11        3 days ago          148 MB
k8s.gcr.io/kube-scheduler-amd64            v1.10.3             353b8f1d102e        3 days ago          50.4 MB
docker.io/weaveworks/weave-npc             2.3.0               21545eb3d6f9        6 weeks ago         47.2 MB
docker.io/weaveworks/weave-kube            2.3.0               f15514acce73        6 weeks ago         96.8 MB
k8s.gcr.io/etcd-amd64                      3.1.12              52920ad46f5b        2 months ago        193 MB
k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64     1.14.8              c2ce1ffb51ed        4 months ago        41 MB
k8s.gcr.io/k8s-dns-sidecar-amd64           1.14.8              6f7f2dc7fab5        4 months ago        42.2 MB
k8s.gcr.io/k8s-dns-kube-dns-amd64          1.14.8              80cc5ea4b547        4 months ago        50.5 MB
[root@host01-4 ~]# clear
[root@host01-4 ~]# kubeadm init
[init] Using Kubernetes version: v1.10.3
[init] Using Authorization modes: [Node RBAC]
[preflight] Running pre-flight checks.
        [WARNING FileExisting-crictl]: crictl not found in system path
Suggestion: go get github.com/kubernetes-incubator/cri-tools/cmd/crictl
[certificates] Generated ca certificate and key.
[certificates] Generated apiserver certificate and key.
[certificates] apiserver serving cert is signed for DNS names [host01-4.cloud.com kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.10.12.14]
[certificates] Generated apiserver-kubelet-client certificate and key.
[certificates] Generated etcd/ca certificate and key.
[certificates] Generated etcd/server certificate and key.
[certificates] etcd/server serving cert is signed for DNS names [localhost] and IPs [127.0.0.1]
[certificates] Generated etcd/peer certificate and key.
[certificates] etcd/peer serving cert is signed for DNS names [host01-4.cloud.com] and IPs [10.10.12.14]
[certificates] Generated etcd/healthcheck-client certificate and key.
[certificates] Generated apiserver-etcd-client certificate and key.
[certificates] Generated sa key and public key.
[certificates] Generated front-proxy-ca certificate and key.
[certificates] Generated front-proxy-client certificate and key.
[certificates] Valid certificates and keys now exist in "/etc/kubernetes/pki"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/admin.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/kubelet.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/controller-manager.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/scheduler.conf"
[controlplane] Wrote Static Pod manifest for component kube-apiserver to "/etc/kubernetes/manifests/kube-apiserver.yaml"
[controlplane] Wrote Static Pod manifest for component kube-controller-manager to "/etc/kubernetes/manifests/kube-controller-manager.yaml"
[controlplane] Wrote Static Pod manifest for component kube-scheduler to "/etc/kubernetes/manifests/kube-scheduler.yaml"
[etcd] Wrote Static Pod manifest for a local etcd instance to "/etc/kubernetes/manifests/etcd.yaml"
[init] Waiting for the kubelet to boot up the control plane as Static Pods from directory "/etc/kubernetes/manifests".
[init] This might take a minute or longer if the control plane images have to be pulled.
[apiclient] All control plane components are healthy after 25.003852 seconds
[uploadconfig] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[markmaster] Will mark node host01-4.cloud.com as master by adding a label and a taint
[markmaster] Master host01-4.cloud.com tainted and labelled with key/value: node-role.kubernetes.io/master=""
[bootstraptoken] Using token: hlz7wp.qjrgmsq2yn9f94wa
[bootstraptoken] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstraptoken] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstraptoken] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstraptoken] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: kube-dns
[addons] Applied essential addon: kube-proxy
 
Your Kubernetes master has initialized successfully!
 
To start using your cluster, you need to run the following as a regular user:
 
  mkdir -p $HOME/.kube
  sudo cp -/etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config
 
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/
 
You can now join any number of machines by running the following on each node
as root:
 
  kubeadm join 10.10.12.14:6443 --token hlz7wp.qjrgmsq2yn9f94wa --discovery-token-ca-cert-hash sha256:43e61417b20ede5ca530fe0638990bc1a805b5f2a9e25b5aa2f40023b392fb50
 
[root@host01-4 ~]#
[root@host01-4 ~]# mkdir -p $HOME/.kube
[root@host01-4 ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@host01-4 ~]# chown $(id -u):$(id -g) $HOME/.kube/config
[root@host01-4 ~]#
 
cs


위 실행결과에서 토큰을 복사해놓는다 :

1
2
3
  kubeadm join 10.10.12.14:6443 --token hlz7wp.qjrgmsq2yn9f94wa --discovery-token-ca-cert-hash sha256:43e61417b20ede5ca530fe0638990bc1a805b5f2a9e25b5aa2f40023b392fb50
 
 
cs


*Pod Network 설치 (마스터)

1
2
3
4
5
6
7
sysctl net.bridge.bridge-nf-call-iptables=1
 
export kubever=$(kubectl version | base64 | tr -'\n')
 
kubectl apply -"https://cloud.weave.works/k8s/net?k8s-version=$kubever"
 
kubectl get pods --all-namespaces 
cs


실행결과 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
[root@host01-4 ~]# sysctl net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-iptables = 1
[root@host01-4 ~]#
[root@host01-4 ~]# export kubever=$(kubectl version | base64 | tr -d '\n')
[root@host01-4 ~]#
[root@host01-4 ~]# kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$kubever"
serviceaccount "weave-net" created
clusterrole.rbac.authorization.k8s.io "weave-net" created
clusterrolebinding.rbac.authorization.k8s.io "weave-net" created
role.rbac.authorization.k8s.io "weave-net" created
rolebinding.rbac.authorization.k8s.io "weave-net" created
daemonset.extensions "weave-net" created
[root@host01-4 ~]# kubectl get pods --all-namespaces
NAMESPACE     NAME                                         READY     STATUS    RESTARTS   AGE
kube-system   etcd-host01-4.cloud.com                      1/1       Running   0          4m
kube-system   kube-apiserver-host01-4.cloud.com            1/1       Running   0          4m
kube-system   kube-controller-manager-host01-4.cloud.com   1/1       Running   0          5m
kube-system   kube-dns-86f4d74b45-t9df2                    0/3       Pending   0          5m
kube-system   kube-proxy-fs9d8                             1/1       Running   0          5m
kube-system   kube-scheduler-host01-4.cloud.com            1/1       Running   0          4m
kube-system   weave-net-zr5qr                              2/2       Running   0          11s
[root@host01-4 ~]#
 
 
cs


*이전에 Master Node에서 복사한 토큰을 2개 Worker Node에 각각

입력한다 : 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
[root@host01-3 ~]# kubeadm join 10.10.12.14:6443 --token hlz7wp.qjrgmsq2yn9f94wa --discovery-token-ca-cert-hash sha256:43e61417b20ede5ca530fe0638990bc1a805b5f2a9e25b5aa2f40023b392fb50
[preflight] Running pre-flight checks.
        [WARNING FileExisting-crictl]: crictl not found in system path
Suggestion: go get github.com/kubernetes-incubator/cri-tools/cmd/crictl
[discovery] Trying to connect to API Server "10.10.12.14:6443"
[discovery] Created cluster-info discovery client, requesting info from "https://10.10.12.14:6443"
[discovery] Requesting info from "https://10.10.12.14:6443" again to validate TLS against the pinned public key
[discovery] Cluster info signature and contents are valid and TLS certificate validates against pinned roots, will use API Server "10.10.12.14:6443"
[discovery] Successfully established connection with API Server "10.10.12.14:6443"
 
This node has joined the cluster:
* Certificate signing request was sent to master and a response
  was received.
* The Kubelet was informed of the new secure connection details.
 
Run 'kubectl get nodes' on the master to see this node join the cluster.
[root@host01-3 ~]#
 
cs


*Master Node에서 멤버 Join이 이루어졌는지 확인한다(Status에 Ready가 떠야 한다.)

1
2
3
4
5
6
7
8
 
[root@host01-4 ~]# kubectl get no
NAME                 STATUS     ROLES     AGE       VERSION
host01-2.cloud.com   NotReady   <none>    6s        v1.10.3
host01-3.cloud.com   NotReady   <none>    11s       v1.10.3
host01-4.cloud.com   Ready      master    9m        v1.10.3
[root@host01-4 ~]# ^C
 
cs
1
2
3
4
5
6
7
[root@host01-4 ~]# kubectl get no
NAME                 STATUS    ROLES     AGE       VERSION
host01-2.cloud.com   Ready     <none>    2m        v1.10.3
host01-3.cloud.com   Ready     <none>    2m        v1.10.3
host01-4.cloud.com   Ready     master    11m       v1.10.3
[root@host01-4 ~]#
 
cs






*Tab 자동 입력


AWS, Azure에서는 kubectl 명령어가 그냥 'k'임(alias로 등록되어 있음)


*Alias 등록 : 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
[root@host01-4 ~]# alias k=kubectl
[root@host01-4 ~]# source <(kubectl completion bash | sed  s/kubectl/k/g)
[root@host01-4 ~]# k get
You must specify the type of resource to get. Valid resource types include:
 
  * all
  * certificatesigningrequests (aka 'csr')
  * clusterrolebindings
  * clusterroles
  * componentstatuses (aka 'cs')
  * configmaps (aka 'cm')
  * controllerrevisions
  * cronjobs
  * customresourcedefinition (aka 'crd')
  * daemonsets (aka 'ds')
  * deployments (aka 'deploy')
  * endpoints (aka 'ep')
  * events (aka 'ev')
  * horizontalpodautoscalers (aka 'hpa')
  * ingresses (aka 'ing')
  * jobs
  * limitranges (aka 'limits')
  * namespaces (aka 'ns')
  * networkpolicies (aka 'netpol')
  * nodes (aka 'no')
  * persistentvolumeclaims (aka 'pvc')
  * persistentvolumes (aka 'pv')
  * poddisruptionbudgets (aka 'pdb')
  * podpreset
  * pods (aka 'po')
  * podsecuritypolicies (aka 'psp')
  * podtemplates
  * replicasets (aka 'rs')
  * replicationcontrollers (aka 'rc')
  * resourcequotas (aka 'quota')
  * rolebindings
  * roles
  * secrets
  * serviceaccounts (aka 'sa')
  * services (aka 'svc')
  * statefulsets (aka 'sts')
  * storageclasses (aka 'sc')error: Required resource not specified.
Use "kubectl explain <resource>" for a detailed description of that resource (e.g. kubectl explain pods).
See 'kubectl get -h' for help and examples.
[root@host01-4 ~]# echo "alias l=kubectl" >> ~/.bashrc
[root@host01-4 ~]# echo "source <(kubectl completion bash | sed s/kubectl/k/g)" >> ~/.bashrc
 
cs





*Pod 개념

MS 아키텍처의 기초는 하나의 컨테이너에는 하나의 어플리케이션만 집어넣자!

- 네트워크 공유 : Pod로 구성하면 Container끼리 네트워크를 공유할 수 있게된다(네트워크 Name Space 공유 - localhost로)

- 스토리지 공유 : 볼륨을 pod에 마운트 하여 Storage까지 공유한다!


- 1개 pod 내에서 콘테이너들은 동일 호스트로만 구성이 된다. 똑같은 포트의 서비스를 공유할 수 없다.

(pod를 잘못구성하는 예 : DB Container와 Web Container를 동일 pod 내에 두었을 때 Scale Out 시 이슈가 있음,

따라서 web pod, WAS pod, DB pod 식으로 나눠야 함)

pod가 kubenetes의 최소 단위로 구성해야 함!


각 Work Node의 Pod 들은 Master Node의 replication controller(RC)에서 제어된다.


- 참조/관리 형태

참조를 하는 주체는 selector (key/value 형태)

참조를 받은 대상은 Label (key/value 형태)  = Pod(Node Selector)

Pod/Label의 참조를 받는 대상은 Label (key/value 형태) 

라고 부른다.

- Kubenetes의 Namespace란 : 


1
2
3
4
5
6
7
8
9
10
11
12
13
14
[root@host01-4 hk]# k get po -n kube-system -o  wide
NAME                                         READY     STATUS    RESTARTS   AGE       IP            NODE
etcd-host01-4.cloud.com                      1/1       Running   0          58m       10.10.12.14   host01-4.cloud.com
kube-apiserver-host01-4.cloud.com            1/1       Running   0          58m       10.10.12.14   host01-4.cloud.com
kube-controller-manager-host01-4.cloud.com   1/1       Running   0          59m       10.10.12.14   host01-4.cloud.com
kube-dns-86f4d74b45-t9df2                    3/3       Running   0          1h        10.32.0.2     host01-4.cloud.com
kube-proxy-fs9d8                             1/1       Running   0          1h        10.10.12.14   host01-4.cloud.com
kube-proxy-r5bzj                             1/1       Running   0          51m       10.10.12.13   host01-3.cloud.com
kube-proxy-tvwnv                             1/1       Running   0          51m       10.10.12.12   host01-2.cloud.com
kube-scheduler-host01-4.cloud.com            1/1       Running   0          59m       10.10.12.14   host01-4.cloud.com
weave-net-hf9d5                              2/2       Running   1          51m       10.10.12.12   host01-2.cloud.com
weave-net-p5drv                              2/2       Running   1          51m       10.10.12.13   host01-3.cloud.com
weave-net-zr5qr                              2/2       Running   0          54m       10.10.12.14   host01-4.cloud.com
 
cs

*Cluster PAI 확인 : 

1
2
3
4
5
[root@host01-4 hk]# k cluster-info
Kubernetes master is running at https://10.10.12.14:6443
KubeDNS is running at https://10.10.12.14:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
 
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
cs


*Pod 가 어디있는지 상관없이 연결이 된다

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
[root@host01-4 hk]# ls
dns-kube-dns  etcd-amd64  kubeadm.host08-1.root.log.INFO.20180525-060140.2620  kube-apiserver   kube-proxy      weave-kube
dns-sidecar   k8s-dns     kubeadm.INFO                                         kube-controller  kube-scheduler  weave-npc
[root@host01-4 hk]# k run --image=reg.cloud.com/nginx --port=80 --generator=run/v1
error: NAME is required for run
See 'kubectl run -h' for help and examples.
[root@host01-4 hk]# k run --image=reg.cloud.com/nginx --port=80 --generator=run/v1
error: NAME is required for run
See 'kubectl run -h' for help and examples.
[root@host01-4 hk]# k run --image=reg.cloud.com/nginx nginx-app --port=80 --generator=run/v1
replicationcontroller "nginx-app" created
[root@host01-4 hk]# k get rc
NAME        DESIRED   CURRENT   READY     AGE
nginx-app   1         1         0         8s
[root@host01-4 hk]# k get po
NAME              READY     STATUS    RESTARTS   AGE
nginx-app-gb6ch   1/1       Running   0          15s
[root@host01-4 hk]# get po -o wide
-bash: get: command not found
[root@host01-4 hk]# k get po -o wide
NAME              READY     STATUS    RESTARTS   AGE       IP          NODE
nginx-app-gb6ch   1/1       Running   0          32s       10.36.0.1   host01-3.cloud.com
[root@host01-4 hk]# k logs
error: expected 'logs (POD | TYPE/NAME) [CONTAINER_NAME]'.
POD or TYPE/NAME is a required argument for the logs command
See 'kubectl logs -h' for help and examples.
[root@host01-4 hk]# k logs nginx-app-df618
Error from server (NotFound): pods "nginx-app-df618" not found
[root@host01-4 hk]# k logs nginx-app-gb6ch
[root@host01-4 hk]# k exec -it
error: expected 'exec POD_NAME COMMAND [ARG1] [ARG2] ... [ARGN]'.
POD_NAME and COMMAND are required arguments for the exec command
See 'kubectl exec -h' for help and examples.
[root@host01-4 hk]# k exec -it nginx-app-gb6ch bash
root@nginx-app-gb6ch:/#
 
cs

-podname이 container의 hostname이 된다.



*pod name으로 삭제 해도 다시 살아난다. 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
[root@host01-4 hk]# k get po -o wide
NAME              READY     STATUS    RESTARTS   AGE       IP          NODE
nginx-app-gb6ch   1/1       Running   0          5m        10.36.0.1   host01-3.cloud.com
[root@host01-4 hk]# delete po nginx-app-gb6ch
-bash: delete: command not found
[root@host01-4 hk]# k delete po nginx-app-gb6ch
pod "nginx-app-gb6ch" deleted
[root@host01-4 hk]# k get po -o wide
NAME              READY     STATUS              RESTARTS   AGE       IP        NODE
nginx-app-gnpsd   0/1       ContainerCreating   0          6s        <none>    host01-2.cloud.com
[root@host01-4 hk]# k get rc
NAME        DESIRED   CURRENT   READY     AGE
nginx-app   1         1         1         5m
[root@host01-4 hk]#
 
cs


*Scale in Scale out


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
[root@host01-4 hk]# k describe rc nginx-app
dns-kube-dns                                         kubeadm.host08-1.root.log.INFO.20180525-060140.2620  kube-proxy
dns-sidecar                                          kubeadm.INFO                                         kube-scheduler
etcd-amd64                                           kube-apiserver                                       weave-kube
k8s-dns                                              kube-controller                                      weave-npc
[root@host01-4 hk]# k describe rc nginx-app
Name:         nginx-app
Namespace:    default
Selector:     run=nginx-app
Labels:       run=nginx-app
Annotations:  <none>
Replicas:     1 current / 1 desired
Pods Status:  1 Running / 0 Waiting / 0 Succeeded / 0 Failed
Pod Template:
  Labels:  run=nginx-app
  Containers:
   nginx-app:
    Image:        reg.cloud.com/nginx
    Port:         80/TCP
    Host Port:    0/TCP
    Environment:  <none>
    Mounts:       <none>
  Volumes:        <none>
Events:
  Type    Reason            Age   From                    Message
  ----    ------            ----  ----                    -------
  Normal  SuccessfulCreate  6m    replication-controller  Created pod: nginx-app-gb6ch
  Normal  SuccessfulCreate  1m    replication-controller  Created pod: nginx-app-gnpsd
[root@host01-4 hk]# k get po
NAME              READY     STATUS    RESTARTS   AGE
nginx-app-gnpsd   1/1       Running   0          1m
[root@host01-4 hk]# k describe po nginx-app-gnpsd
Name:           nginx-app-gnpsd
Namespace:      default
Node:           host01-2.cloud.com/10.10.12.12
Start Time:     Fri, 25 May 2018 11:58:20 +0900
Labels:         run=nginx-app
Annotations:    <none>
Status:         Running
IP:             10.44.0.1
Controlled By:  ReplicationController/nginx-app
Containers:
  nginx-app:
    Container ID:   docker://6d0e9cb190b31334dee5dba4877ace52d8afd5a9956d7c50eae35d3107722a58
    Image:          reg.cloud.com/nginx
    Image ID:       docker-pullable://reg.cloud.com/nginx@sha256:a4fb15454c43237dbc6592c4f8e0b50160ceb03e852a10c9895cf2a6d16c7fe2
    Port:           80/TCP
    Host Port:      0/TCP
    State:          Running
      Started:      Fri, 25 May 2018 11:58:29 +0900
    Ready:          True
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-85hdm (ro)
Conditions:
  Type           Status
  Initialized    True
  Ready          True
  PodScheduled   True
Volumes:
  default-token-85hdm:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-85hdm
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type    Reason                 Age   From                         Message
  ----    ------                 ----  ----                         -------
  Normal  SuccessfulMountVolume  1m    kubelet, host01-2.cloud.com  MountVolume.SetUp succeeded for volume "default-token-85hdm"
  Normal  Scheduled              1m    default-scheduler            Successfully assigned nginx-app-gnpsd to host01-2.cloud.com
  Normal  Pulling                1m    kubelet, host01-2.cloud.com  pulling image "reg.cloud.com/nginx"
  Normal  Pulled                 1m    kubelet, host01-2.cloud.com  Successfully pulled image "reg.cloud.com/nginx"
  Normal  Created                1m    kubelet, host01-2.cloud.com  Created container
  Normal  Started                1m    kubelet, host01-2.cloud.com  Started container
[root@host01-4 hk]# k get rc
NAME        DESIRED   CURRENT   READY     AGE
nginx-app   1         1         1         8m
[root@host01-4 hk]# k get po -o wide
NAME              READY     STATUS    RESTARTS   AGE       IP          NODE
nginx-app-gnpsd   1/1       Running   0          3m        10.44.0.1   host01-2.cloud.com
 
[root@host01-4 hk]# k scale rc nginx-app --replicas=3
replicationcontroller "nginx-app" scaled
[root@host01-4 hk]# k get po -o wide
NAME              READY     STATUS    RESTARTS   AGE       IP          NODE
nginx-app-gnpsd   1/1       Running   0          4m        10.44.0.1   host01-2.cloud.com
nginx-app-jfmkd   1/1       Running   0          10s       10.44.0.2   host01-2.cloud.com
nginx-app-ww6sn   1/1       Running   0          10s       10.36.0.1   host01-3.cloud.com
 
[root@host01-4 hk]# k scale rc nginx-app --replicas=0
replicationcontroller "nginx-app" scaled
[root@host01-4 hk]# k get po -o wide
NAME              READY     STATUS        RESTARTS   AGE       IP          NODE
nginx-app-gnpsd   0/1       Terminating   0          5m        10.44.0.1   host01-2.cloud.com
nginx-app-jfmkd   0/1       Terminating   0          34s       10.44.0.2   host01-2.cloud.com
[root@host01-4 hk]# k get po -o wide
No resources found.
[root@host01-4 hk]# rc 0
-bash: rc: command not found
[root@host01-4 hk]# k scale rc nginx-app --replicas=1
replicationcontroller "nginx-app" scaled
[root@host01-4 hk]# k get po -o wide
NAME              READY     STATUS    RESTARTS   AGE       IP          NODE
nginx-app-7qpbv   1/1       Running   0          4s        10.36.0.1   host01-3.cloud.com
[root@host01-4 hk]#
 
cs



*Yaml으로 pod 조회

1
2
[root@host01-4 hk]# k get po nginx-app-7qpbv -o yaml
 
cs


*Yaml 파일로 추출 후 pod 생성

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
apiVersion: v1
kind: Pod
metadata:
  labels:
    type: web
  name: nginx-hk-app
spec:
  containers:
  - image: reg.cloud.com/nginx
    name: nginx-app
    ports:                            #expose와 동일
    - containerPort: 80
      protocol: TCP
 
 
 
[root@host01-4 hk]# k get po nginx-app-7qpbv -o yaml > temp.yaml
[root@host01-4 hk]# vi temp.yaml
[root@host01-4 hk]# k create -f temp.yaml
pod "nginx-hk-app" created
[root@host01-4 hk]# k get po -o wide
NAME              READY     STATUS    RESTARTS   AGE       IP          NODE
nginx-app-7qpbv   1/1       Running   0          6m        10.36.0.1   host01-3.cloud.com
nginx-hk-app      1/1       Running   0          13s       10.44.0.1   host01-2.cloud.com
[root@host01-4 hk]#
 
cs



*Yaml 파일로 rc 생성 후 배포 :


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
[root@host01-4 hk]# k get rc nginx-app -o yaml > hk.yaml
[root@host01-4 hk]# vi hk.yaml
apiVersion: v1
kind: ReplicationController
metadata:
  labels:
    run: nginx-app
  name: nginx-app
spec:
  replicas: 1
  selector:
    type: test
  template:
    metadata:
      labels:
        type: test
    spec:
      containers:
      - image: reg.cloud.com/nginx
        name: nginx-app
        ports:
        - containerPort: 80
          protocol: TCP
 
 
[root@host01-4 hk]# k create -f hk.yaml
replicationcontroller "nginx-app2" created
[root@host01-4 hk]# k get rc
NAME         DESIRED   CURRENT   READY     AGE
nginx-app    1         1         1         24m
nginx-app2   1         1         1         9s
[root@host01-4 hk]# k get po -o wide
NAME               READY     STATUS    RESTARTS   AGE       IP          NODE
nginx-app-7qpbv    1/1       Running   0          14m       10.36.0.1   host01-3.cloud.com
nginx-app2-tgqqf   1/1       Running   0          16s       10.36.0.2   host01-3.cloud.com
nginx-hk-app       1/1       Running   0          7m        10.44.0.1   host01-2.cloud.com
[root@host01-4 hk]#
 
cs


*Linux Container(LXC) 와 Docker의 차이점


Docker = LXC + (Layers + Registry)

=>LXC에 MSA, Devops 방법론을 적용한 것!! (App Container)

=>LXC는 (OS Container)


*Registry 구성요소

Repository, Index


*Docker 환경

dockerd (net + volume + swarm ...)


containerd


runC


*Docker 컨테이너를 실행하면...


1
2
3
[root@host01-2 tmp]# docker run -it reg.cloud.com/centos
[root@a63244e84c0b /]#
 
cs


*Docker Container 위치

1
[root@host01-2 tmp]# ls /var/lib/docker/containers/
cs


*Docker Container 폴더 구조

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
[root@host01-2 tmp]# cd /var/lib/docker/containers/
 
[root@host01-2 a63244e84c0b71b01311d52eda786b395aa77f6510c32e08046d3beb8b1da095]# ls -al
total 28
drwx------4 root root  237 May 23 09:36 .
drwx------5 root root  222 May 23 09:35 ..
-rw-r-----1 root root 1540 May 23 09:33 a63244e84c0b71b01311d52eda786b395aa77f6510c32e08046d3beb8b1da095-json.log
drwx------2 root root    6 May 23 09:32 checkpoints
-rw-------1 root root 2636 May 23 09:33 config.v2.json
-rw-r--r--1 root root 1153 May 23 09:33 hostconfig.json
-rw-r--r--1 root root   13 May 23 09:32 hostname
-rw-r--r--1 root root  174 May 23 09:32 hosts
drwx------3 root root   17 May 23 09:32 mounts
-rw-r--r--1 root root   69 May 23 09:32 resolv.conf
-rw-r--r--1 root root   71 May 23 09:32 resolv.conf.hash
 
cs

a63244e84c0b71b01311d52eda786b395aa77f6510c32e08046d3beb8b1da095 = 로그




hostname=Container가 뜰 때 hostname



*Docker 이미지 조회(이미지 식별 = 이미지이름@...)

1
2
3
4
5
6
[root@host01-2 /]# docker image ls --digests
REPOSITORY             TAG                 DIGEST                                                                    IMAGE ID            CREATED             SIZE
registry               2.5                 sha256:a3a4155bb8a3b32679c10451a55f9754f33b8620c1a8f316dfd913bb91ac746d   36e3b1f8d3f1        4 months ago        37.8MB
reg.cloud.com/ubuntu   latest              <none>                                                                    20c44cd7596f        6 months ago        123MB
reg.cloud.com/centos   latest              sha256:224d7b12549c04bba833d4626a5c51113290e55bd754d39f72d1a437539b3c68   d123f4e55e12        6 months ago        197MB
 
cs


Docker 이미지 생성 시 - ID가 생성된다.

Repository 이름은 옵션임!


DIGEST 값이 없는것은 registry에서 만들어준다(registry에 등록되는 순간 digest가 만들어진다 = Commit + Push 필요!!).


*Docker History(이미지 생성이 어떻게 되었는지 확인 방법):

아래서 위로 순서대로 실행된다 

1
2
3
4
5
6
7
8
9
 
[root@host01-2 /]# docker history reg.cloud.com/ubuntu
IMAGE               CREATED             CREATED BY                                      SIZE                COMMENT
20c44cd7596f        6 months ago        /bin/sh -#(nop)  CMD ["/bin/bash"]            0B
<missing>           6 months ago        /bin/sh -c mkdir -/run/systemd && echo 'do…   7B
<missing>           6 months ago        /bin/sh -c sed -i 's/^#\s*\(deb.*universe\)$…   2.76kB
<missing>           6 months ago        /bin/sh -c rm -rf /var/lib/apt/lists/*          0B
<missing>           6 months ago        /bin/sh -set -xe   && echo '#!/bin/sh' > /…   745B
<missing>           6 months ago        /bin/sh -#(nop) ADD file:280a445783f309c90…   123MB
cs






*Docker 이미지는 최소한의 툴만 가지고 뜬다

1
2
3
4
5
6
7
[root@host01-2 /]# docker attach 07802d6d9bd9
[root@07802d6d9bd9 /]# ip addr
bash: ip: command not found
[root@07802d6d9bd9 /]# ifconfig
bash: ifconfig: command not found
[root@07802d6d9bd9 /]#
 
cs


ifconfig = net-tool

ip addr = ip router 

와 같은 툴이 필요하다


*Docker 이미지의 inspect 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
[root@host01-2 /]# docker ps
CONTAINER ID        IMAGE                  COMMAND             CREATED             STATUS              PORTS               NAMES
aa5929e423cc        reg.cloud.com/centos   "/bin/bash"         18 seconds ago      Up 17 seconds                           sad_kowalevski
[root@host01-2 /]# docker inspect aa5929e423cc
[
    {
        "Id""aa5929e423ccca7c4fe6f42559071f6a2b6b3ae4c8dfbac75c93eff06183384e",
        "Created""2018-05-23T00:53:41.018342623Z",
        "Path""/bin/bash",
        "Args": [],
        "State": {
            "Status""running",
            "Running"true,
            "Paused"false,
            "Restarting"false,
            "OOMKilled"false,
            "Dead"false,
            "Pid"8735,
            "ExitCode"0,
            "Error""",
            "StartedAt""2018-05-23T00:53:41.640998376Z",
            "FinishedAt""0001-01-01T00:00:00Z"
        },
        "Image""sha256:d123f4e55e1200156d9cbcf4421ff6d818576e4f1e29320a408c72f022cfd0b1",
        "ResolvConfPath""/var/lib/docker/containers/aa5929e423ccca7c4fe6f42559071f6a2b6b3ae4c8dfbac75c93eff06183384e/resolv.conf",
        "HostnamePath""/var/lib/docker/containers/aa5929e423ccca7c4fe6f42559071f6a2b6b3ae4c8dfbac75c93eff06183384e/hostname",
        "HostsPath""/var/lib/docker/containers/aa5929e423ccca7c4fe6f42559071f6a2b6b3ae4c8dfbac75c93eff06183384e/hosts",
        "LogPath""/var/lib/docker/containers/aa5929e423ccca7c4fe6f42559071f6a2b6b3ae4c8dfbac75c93eff06183384e/aa5929e423ccca7c4fe6f42559071f6a2b6b3ae4c8dfbac75c93eff06183384e-json.log",
        "Name""/sad_kowalevski",
        "RestartCount"0,
        "Driver""overlay2",
        "Platform""linux",
        "MountLabel""",
        "ProcessLabel""",
        "AppArmorProfile""",
        "ExecIDs": null,
        "HostConfig": {
            "Binds": null,
            "ContainerIDFile""",
            "LogConfig": {
                "Type""json-file",
                "Config": {}
            },
            "NetworkMode""default",
            "PortBindings": {},
            "RestartPolicy": {
                "Name""no",
                "MaximumRetryCount"0
            },
            "AutoRemove"false,
            "VolumeDriver""",
            "VolumesFrom": null,
            "CapAdd": null,
            "CapDrop": null,
            "Dns": [],
            "DnsOptions": [],
            "DnsSearch": [],
            "ExtraHosts": null,
            "GroupAdd": null,
            "IpcMode""shareable",
            "Cgroup""",
            "Links": null,
            "OomScoreAdj"0,
            "PidMode""",
            "Privileged"false,
            "PublishAllPorts"false,
            "ReadonlyRootfs"false,
            "SecurityOpt": null,
            "UTSMode""",
            "UsernsMode""",
            "ShmSize"67108864,
            "Runtime""runc",
            "ConsoleSize": [
                0,
                0
            ],
            "Isolation""",
            "CpuShares"0,
            "Memory"0,
            "NanoCpus"0,
            "CgroupParent""",
            "BlkioWeight"0,
            "BlkioWeightDevice": [],
            "BlkioDeviceReadBps": null,
            "BlkioDeviceWriteBps": null,
            "BlkioDeviceReadIOps": null,
            "BlkioDeviceWriteIOps": null,
            "CpuPeriod"0,
            "CpuQuota"0,
            "CpuRealtimePeriod"0,
            "CpuRealtimeRuntime"0,
            "CpusetCpus""",
            "CpusetMems""",
            "Devices": [],
            "DeviceCgroupRules": null,
            "DiskQuota"0,
            "KernelMemory"0,
            "MemoryReservation"0,
            "MemorySwap"0,
            "MemorySwappiness": null,
            "OomKillDisable"false,
            "PidsLimit"0,
            "Ulimits": null,
            "CpuCount"0,
            "CpuPercent"0,
            "IOMaximumIOps"0,
            "IOMaximumBandwidth"0
        },
        "GraphDriver": {
            "Data": {
                "LowerDir""/var/lib/docker/overlay2/c1f9c94d4aedf4d4f41347837982600e70c211e9956521186a7938397f166ad5-init/diff:/var/lib/docker/overlay2/5654bedcae9b25c616faff203d638b355157830d5431e7fee474feea8d461338/diff",
                "MergedDir""/var/lib/docker/overlay2/c1f9c94d4aedf4d4f41347837982600e70c211e9956521186a7938397f166ad5/merged",
                "UpperDir""/var/lib/docker/overlay2/c1f9c94d4aedf4d4f41347837982600e70c211e9956521186a7938397f166ad5/diff",
                "WorkDir""/var/lib/docker/overlay2/c1f9c94d4aedf4d4f41347837982600e70c211e9956521186a7938397f166ad5/work"
            },
            "Name""overlay2"
        },
        "Mounts": [],
        "Config": {
            "Hostname""aa5929e423cc",
            "Domainname""",
            "User""",
            "AttachStdin"true,
            "AttachStdout"true,
            "AttachStderr"true,
            "Tty"true,
            "OpenStdin"true,
            "StdinOnce"true,
            "Env": [
                "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
            ],
            "Cmd": [
                "/bin/bash"
            ],
            "ArgsEscaped"true,
            "Image""reg.cloud.com/centos",
            "Volumes": null,
            "WorkingDir""",
            "Entrypoint": null,
            "OnBuild": null,
            "Labels": {
                "build-date""20170911",
                "license""GPLv2",
                "name""CentOS Base Image",
                "vendor""CentOS"
            }
        },
        "NetworkSettings": {
            "Bridge""",
            "SandboxID""678fe422029b37a8872fc582881d83b6d3f0c10fdfd28c071458de1622be668c",
            "HairpinMode"false,
            "LinkLocalIPv6Address""",
            "LinkLocalIPv6PrefixLen"0,
            "Ports": {},
            "SandboxKey""/var/run/docker/netns/678fe422029b",
            "SecondaryIPAddresses": null,
            "SecondaryIPv6Addresses": null,
            "EndpointID""71304bd92f81fc9cc2dc54b529533491949f89e0aeaa63a8f524ab7bb73454d4",
            "Gateway""172.17.0.1",
            "GlobalIPv6Address""",
            "GlobalIPv6PrefixLen"0,
            "IPAddress""172.17.0.2",
            "IPPrefixLen"16,
            "IPv6Gateway""",
            "MacAddress""02:42:ac:11:00:02",
            "Networks": {
                "bridge": {
                    "IPAMConfig": null,
                    "Links": null,
                    "Aliases": null,
                    "NetworkID""76d13a20d92c8d68860705f57e8989055660ac78cafea4eaa033cb1fd856e6fa",
                    "EndpointID""71304bd92f81fc9cc2dc54b529533491949f89e0aeaa63a8f524ab7bb73454d4",
                    "Gateway""172.17.0.1",
                    "IPAddress""172.17.0.2",
                    "IPPrefixLen"16,
                    "IPv6Gateway""",
                    "GlobalIPv6Address""",
                    "GlobalIPv6PrefixLen"0,
                    "MacAddress""02:42:ac:11:00:02",
                    "DriverOpts": null
                }
            }
        }
    }
]
 
cs
 "NetworkID""76d13a20d92c8d68860705f57e8989055660ac78cafea4eaa033cb1fd856e6fa",
"EndpointID""71304bd92f81fc9cc2dc54b529533491949f89e0aeaa63a8f524ab7bb73454d4",

=> 종료되지 않은 Container는 위처럼 네트워크가 살아있다


*Docker 이미지란 :
- 읽기 전용 템플릿으로 컨테이너 인스턴스를 저장한 파일

- overlay라는 레이어 파일 시스템을 사용

- 호스트 사이에서 이동성을 제공

- Dockerfile로 자신만의 이미지 생성


1) Docker 이미지 조회 :

1
2
3
4
5
6
[root@host01-2 docker]# docker images
REPOSITORY             TAG                 IMAGE ID            CREATED             SIZE
registry               2.5                 36e3b1f8d3f1        4 months ago        37.8MB
reg.cloud.com/ubuntu   latest              20c44cd7596f        6 months ago        123MB
reg.cloud.com/centos   latest              d123f4e55e12        6 months ago        197MB
 
cs


2) Docker 레이어 파일 시스템 확인 : 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
[root@host01-2 docker]# cd overlay2/
[root@host01-2 overlay2]# ls
14f7fd95b0bc618853bb4aba43a95e9d61e3ea1cbb45d0cf4b7752729995623c       923e521a724c0099b1a35f27383bb4ca484bb90b18a503b2ed468d084fa6dfe8
14f7fd95b0bc618853bb4aba43a95e9d61e3ea1cbb45d0cf4b7752729995623c-init  ac0c9b3f9d8664c6df53bf4e863a78d107a5c054e5a9a277d32e31a0a1b8977b
336d5fadb8f0ac24b795420979393e4e1d54de4ad75ce3255eac00ede7349012       ba37afeb0b3b6b67fb8e9fdd1aaa75f41caff6b23f3b6bc8e76568e307ec6c8b
34dd34b11d511e3309acd2c978aba79f4ea5d218ab08289cff5bee1ef2362977       backingFsBlockDev
34dd34b11d511e3309acd2c978aba79f4ea5d218ab08289cff5bee1ef2362977-init  c1f9c94d4aedf4d4f41347837982600e70c211e9956521186a7938397f166ad5
40b303daad22bc5c74373f2aab144a0109d680307364a4815c01d724be20eab5       c1f9c94d4aedf4d4f41347837982600e70c211e9956521186a7938397f166ad5-init
40b303daad22bc5c74373f2aab144a0109d680307364a4815c01d724be20eab5-init  c228ec2be618735dac211a9b503c2aaea4e4a82943bfd27e0fad1239e83f610b
4ed4d836bf9086e79708240589b09e2a6a44e4e155c3ef06e617e84df7bbe88a       ca77f99e41ba3db530bc57b1b66655a5a02e95120a27b6f609f4537cff456000
5654bedcae9b25c616faff203d638b355157830d5431e7fee474feea8d461338       fd2f482cf2042074824ab14c60d38a98dcaaf03acd41af9b3466f9d0dd9b87fa
7e51f10615f7bccd4c0a5bdfd9c5344d9cff32d78e72781fc325fef061fefeb6       l
90c35895a6516132fe8fcd791bfaa9fc42f9b131bb5ef1bf94cb45fab730e0e9
[root@host01-2 overlay2]# pwd
/var/lib/docker/overlay2
 
cs

/var/lib/docker/overlay2 Path에 있는 레이어들을 합쳐서 단일 레이어로 만들어낸다.


*Container Lifecycle

Container 실행방법은 2가지


docker run

docker start


이 둘의 공통점은 container를 실행상태로 만드는것

그리고 새로운 Container로 실행되는것

Shutdown된 컨테이너를 다시 실행도 가능하다!


run을 하면 실행이된다

run => start => stop

run => start => kill


컨테이너 목록을 보려면 ps 명령어를 사용한다 : 

1
2
3
4
[root@host01-2 overlay2]# docker ps
CONTAINER ID        IMAGE                  COMMAND             CREATED             STATUS              PORTS               NAMES
aa5929e423cc        reg.cloud.com/centos   "/bin/bash"         35 minutes ago      Up 35 minutes                           sad_kowalevski
 
cs


Tag 형태로 이미지 버전 가져오기

1
2
3
4
5
6
7
8
9
10
11
12
[root@host01-2 overlay2]# docker run ubuntu:14.04
Unable to find image 'ubuntu:14.04' locally
14.04: Pulling from library/ubuntu
324d088ce065: Pull complete
2ab951b6c615: Pull complete
9b01635313e2: Pull complete
04510b914a6c: Pull complete
83ab617df7b4: Pull complete
Digest: sha256:b8855dc848e2622653ab557d1ce2f4c34218a9380cceaa51ced85c5f3c8eb201
Status: Downloaded newer image for ubuntu:14.04
[root@host01-2 overlay2]# ls
 
cs


*Docker Shell 로 실행하기( -it  : interactive 옵션)

1
2
3
[root@host01-2 overlay2]# docker run -it reg.cloud.com/busybox sh
/ #
 
cs


*모든 컨테이너 삭제 명령어

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
[root@host01-2 overlay2]# docker ps -a
CONTAINER ID        IMAGE                   COMMAND                 CREATED              STATUS                         PORTS               NAMES
11259166b002        reg.cloud.com/busybox   "sh"                    About a minute ago   Exited (1303 seconds ago                         inspiring_sammet
bb3c126f13d0        reg.cloud.com/busybox   "bash"                  About a minute ago   Created                                            hardcore_lumiere
09c4e0da958a        ubuntu:14.04            "/bin/bash"             4 minutes ago        Exited (04 minutes ago                           jolly_wright
18355d103192        ubuntu:latest           "/bin/bash"             11 minutes ago       Exited (1275 minutes ago                         determined_kalam
aa5929e423cc        reg.cloud.com/centos    "/bin/bash"             41 minutes ago       Up 41 minutes                                      sad_kowalevski
07802d6d9bd9        reg.cloud.com/centos    "/bin/bash"             About an hour ago    Exited (13042 minutes ago                        amazing_curran
a63244e84c0b        reg.cloud.com/centos    "/bin/bash"             About an hour ago    Exited (0) About an hour ago                       sharp_mclean
3e5c8d877885        reg.cloud.com/centos    "cat /etc/os-release"   About an hour ago    Exited (0) About an hour ago                       laughing_carson
[root@host01-2 overlay2]# docker rm -f $(docker ps -aq)
11259166b002
bb3c126f13d0
09c4e0da958a
18355d103192
aa5929e423cc
07802d6d9bd9
a63244e84c0b
3e5c8d877885
 
cs


*이름과 함께 띄우기(docker run -it --name hong  reg.cloud.com/busybox sh)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
[root@host01-2 overlay2]# docker run -it --name hong  reg.cloud.com/busybox sh
/ # ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
40: eth0@if41: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue
    link/ether 02:42:ac:11:00:02 brd ff:ff:ff:ff:ff:ff
    inet 172.17.0.2/16 brd 172.17.255.255 scope global eth0
       valid_lft forever preferred_lft forever
/ # route
Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
default         172.17.0.1      0.0.0.0         UG    0      0        0 eth0
172.17.0.0      *               255.255.0.0     U     0      0        0 eth0
/ # cat /etc/resolv.conf
# Generated by NetworkManager
search cloud.com
nameserver 10.10.12.1
#호스트의 DNS 네임스페이스 값을 가져온다
cs





*Container Commit 하기

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
/ # exit
[root@host01-2 overlay2]# docker ps -a
CONTAINER ID        IMAGE                   COMMAND             CREATED              STATUS                       PORTS               NAMES
5bba4cbd910c        reg.cloud.com/busybox   "sh"                About a minute ago   Exited (1304 seconds ago                       hong
[root@host01-2 overlay2]# docker commit hong hongImages
invalid reference format: repository name must be lowercase
[root@host01-2 overlay2]# docker commit hong hongimages
sha256:f6d8cd4a7d7123996b50966b76c3aae5ad61e294fe0bae6bad451d52f241e27b
[root@host01-2 overlay2]# docker images
REPOSITORY              TAG                 IMAGE ID            CREATED             SIZE
hongimages              latest              f6d8cd4a7d71        6 seconds ago       1.13MB
ubuntu                  14.04               8cef1fa16c77        3 weeks ago         223MB
ubuntu                  latest              452a96d81c30        3 weeks ago         79.6MB
registry                2.5                 36e3b1f8d3f1        4 months ago        37.8MB
reg.cloud.com/ubuntu    latest              20c44cd7596f        6 months ago        123MB
reg.cloud.com/busybox   latest              6ad733544a63        6 months ago        1.13MB
reg.cloud.com/centos    latest              d123f4e55e12        6 months ago        197MB
[root@host01-2 overlay2]#
 
cs


*Commit 후 싱글 레이어로 뽑아내기(Export -> Import)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
[root@host01-2 ~]# docker run -it --name c3 reg.cloud.com/ubuntu bash
root@9274dde20cf2:/# mkdir /test
root@9274dde20cf2:/# ls
bin  boot  dev  etc  home  lib  lib64  media  mnt  opt  proc  root  run  sbin  srv  sys  test  tmp  usr  var
root@9274dde20cf2:/# cd /test
root@9274dde20cf2:/test# ls
root@9274dde20cf2:/test# exit
exit
[root@host01-2 ~]# docker ps -a
CONTAINER ID        IMAGE                   COMMAND             CREATED             STATUS                       PORTS               NAMES
9274dde20cf2        reg.cloud.com/ubuntu    "bash"              32 seconds ago      Exited (03 seconds ago                         c3
e1d6ad627dcc        reg.cloud.com/busybox   "ls /home"          4 minutes ago       Exited (04 minutes ago                         wizardly_engelbart
515222fedcc3        reg.cloud.com/busybox   "ls /home"          4 minutes ago       Exited (04 minutes ago                         affectionate_archimedes
5bba4cbd910c        reg.cloud.com/busybox   "sh"                10 minutes ago      Exited (1308 minutes ago                       hong
[root@host01-2 ~]# docker export c3 > /tmp/ubuntu2.tar
[root@host01-2 ~]# cd /tmp/
[root@host01-2 tmp]# ls
20c44cd7596ff4807aef84273c99588d22749e2a7e15a7545ac96347baa65eda.json  791a7c2dac840f5430500629700bfe3ba0cdb38a824c4623a96b8f95c94178d0  manifest.json  ubuntu.tar
461feffa712154ef19f136c6eba8ede98241426c8cb3633f2c1da0d4e94770b3       b87aa680bd7b8e8cb13b55aa4024f1e0a08f8f81d1315188ac73685b5a295de4  repositories
4df5c9fd64c6dd1f92a0558541b73a813b77da868b19d58cb94cae42ba32c6a8       daee585a59213f889a01c1441466e0f0aeff76d6fc4d80e166145db3e779a3a5  ubuntu2.tar
[root@host01-2 tmp]# docker import /tmp/ubuntu2.tar hongtest
sha256:9a8341b96270a410180c6d176b63f9cf7f561b0f9bf1be5483355cbad66a9871
[root@host01-2 tmp]# docker images
REPOSITORY              TAG                 IMAGE ID            CREATED             SIZE
hongtest                latest              9a8341b96270        7 seconds ago       98.4MB
<none>                  <none>              ab0f85b7f2e6        4 minutes ago       1.13MB
<none>                  <none>              9579d5ccef21        5 minutes ago       1.13MB
hongimages              latest              f6d8cd4a7d71        9 minutes ago       1.13MB
ubuntu                  14.04               8cef1fa16c77        3 weeks ago         223MB
ubuntu                  latest              452a96d81c30        3 weeks ago         79.6MB
registry                2.5                 36e3b1f8d3f1        4 months ago        37.8MB
reg.cloud.com/ubuntu    latest              20c44cd7596f        6 months ago        123MB
reg.cloud.com/busybox   latest              6ad733544a63        6 months ago        1.13MB
reg.cloud.com/centos    latest              d123f4e55e12        6 months ago        197MB
[root@host01-2 tmp]# docker history
"docker history" requires exactly 1 argument.
See 'docker history --help'.
 
Usage:  docker history [OPTIONS] IMAGE [flags]
 
Show the history of an image
[root@host01-2 tmp]# docker history hongtest
IMAGE               CREATED             CREATED BY          SIZE                COMMENT
9a8341b96270        49 seconds ago                          98.4MB              Imported from -
[root@host01-2 tmp]# docker run -it hongtest bash
root@4cbd222f2ce3:/# ls /test/
root@4cbd222f2ce3:/#
 
cs




*PID와 UTS 네임스페이스 공유하기



- 네임스페이스 공유 (컨테이너끼리 공유할 수 있다)

Why?=>컨테이너 안에 보안통제 어플리케이션을 띄운다고 가정했을 때 HOST의 프로세스를 모니터링/확인해야 하기 때문에 필요!


1) HOST와 네임스페이스 PID공유 방법 : 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
[root@host01-2 tmp]# docker run -it reg.cloud.com/ubuntu bash
root@5c47432901b6:/# ps -ef
  PID TTY          TIME CMD
 2681 pts/0    00:00:00 bash
10149 pts/0    00:00:00 bash
10182 pts/0    00:00:00 ps
 
#프로세스가 몇 개 안 
 
root@a36e01acc480:/# exit
exit
 
[root@host01-2 tmp]# docker run -it --pid=host reg.cloud.com/ubuntu bash
root@5c47432901b6:/# ps -ef
UID        PID  PPID  C STIME TTY          TIME CMD
root         1     0  0 May20 ?        00:00:04 /usr/lib/systemd/systemd --system --deserialize 15
root         2     0  0 May20 ?        00:00:00 [kthreadd]
root         3     2  0 May20 ?        00:00:00 [ksoftirqd/0]
root         5     2  0 May20 ?        00:00:00 [kworker/0:0H]
root         7     2  0 May20 ?        00:00:00 [migration/0]
root         8     2  0 May20 ?        00:00:00 [rcu_bh]
root         9     2  0 May20 ?        00:00:01 [rcu_sched]
root        10     2  0 May20 ?        00:00:00 [watchdog/0]
root        11     2  0 May20 ?        00:00:00 [watchdog/1]
root        12     2  0 May20 ?        00:00:00 [migration/1]
root        13     2  0 May20 ?        00:00:00 [ksoftirqd/1]
root        15     2  0 May20 ?        00:00:00 [kworker/1:0H]
root        17     2  0 May20 ?        00:00:00 [kdevtmpfs]
root        18     2  0 May20 ?        00:00:00 [netns]
root        19     2  0 May20 ?        00:00:00 [khungtaskd]
root        20     2  0 May20 ?        00:00:00 [writeback]
root        21     2  0 May20 ?        00:00:00 [kintegrityd]
root        22     2  0 May20 ?        00:00:00 [bioset]
root        23     2  0 May20 ?        00:00:00 [kblockd]
root        24     2  0 May20 ?        00:00:00 [md]
root        31     2  0 May20 ?        00:00:00 [kswapd0]
root        32     2  0 May20 ?        00:00:00 [ksmd]
root        33     2  0 May20 ?        00:00:00 [khugepaged]
root        34     2  0 May20 ?        00:00:00 [crypto]
root        42     2  0 May20 ?        00:00:00 [kthrotld]
root        44     2  0 May20 ?        00:00:00 [kmpath_rdacd]
root        45     2  0 May20 ?        00:00:00 [kpsmoused]
root        47     2  0 May20 ?        00:00:00 [ipv6_addrconf]
root        66     2  0 May20 ?        00:00:00 [deferwq]
root        98     2  0 May20 ?        00:00:00 [kauditd]
root       281     2  0 May20 ?        00:00:00 [scsi_eh_0]
root       282     2  0 May20 ?        00:00:00 [scsi_tmf_0]
root       283     2  0 May20 ?        00:00:00 [ata_sff]
root       284     2  0 May20 ?        00:00:00 [vmw_pvscsi_wq_0]
root       286     2  0 May20 ?        00:00:00 [scsi_eh_1]
root       288     2  0 May20 ?        00:00:00 [scsi_tmf_1]
root       289     2  0 May20 ?        00:00:00 [scsi_eh_2]
root       290     2  0 May20 ?        00:00:00 [scsi_eh_3]
root       291     2  0 May20 ?        00:00:00 [scsi_tmf_2]
root       292     2  0 May20 ?        00:00:00 [scsi_tmf_3]
root       293     2  0 May20 ?        00:00:00 [scsi_eh_4]
root       294     2  0 May20 ?        00:00:00 [scsi_tmf_4]
root       295     2  0 May20 ?        00:00:00 [scsi_eh_5]
root       296     2  0 May20 ?        00:00:00 [scsi_tmf_5]
root       297     2  0 May20 ?        00:00:00 [scsi_eh_6]
root       298     2  0 May20 ?        00:00:00 [scsi_tmf_6]
root       299     2  0 May20 ?        00:00:00 [scsi_eh_7]
root       300     2  0 May20 ?        00:00:00 [scsi_tmf_7]
root       301     2  0 May20 ?        00:00:00 [scsi_eh_8]
root       302     2  0 May20 ?        00:00:00 [scsi_tmf_8]
root       303     2  0 May20 ?        00:00:00 [scsi_eh_9]
root       304     2  0 May20 ?        00:00:00 [scsi_tmf_9]
root       305     2  0 May20 ?        00:00:00 [scsi_eh_10]
root       306     2  0 May20 ?        00:00:00 [scsi_tmf_10]
root       307     2  0 May20 ?        00:00:00 [scsi_eh_11]
root       308     2  0 May20 ?        00:00:00 [scsi_tmf_11]
root       309     2  0 May20 ?        00:00:00 [scsi_eh_12]
root       310     2  0 May20 ?        00:00:00 [scsi_tmf_12]
root       311     2  0 May20 ?        00:00:00 [scsi_eh_13]
root       312     2  0 May20 ?        00:00:00 [scsi_tmf_13]
root       313     2  0 May20 ?        00:00:00 [scsi_eh_14]
root       314     2  0 May20 ?        00:00:00 [scsi_tmf_14]
root       315     2  0 May20 ?        00:00:00 [scsi_eh_15]
root       316     2  0 May20 ?        00:00:00 [scsi_tmf_15]
root       317     2  0 May20 ?        00:00:00 [scsi_eh_16]
root       318     2  0 May20 ?        00:00:00 [scsi_tmf_16]
root       319     2  0 May20 ?        00:00:00 [scsi_eh_17]
root       320     2  0 May20 ?        00:00:00 [scsi_tmf_17]
root       321     2  0 May20 ?        00:00:00 [scsi_eh_18]
root       322     2  0 May20 ?        00:00:00 [scsi_tmf_18]
root       323     2  0 May20 ?        00:00:00 [scsi_eh_19]
root       324     2  0 May20 ?        00:00:00 [scsi_tmf_19]
root       325     2  0 May20 ?        00:00:00 [scsi_eh_20]
root       326     2  0 May20 ?        00:00:00 [scsi_tmf_20]
root       327     2  0 May20 ?        00:00:00 [scsi_eh_21]
root       328     2  0 May20 ?        00:00:00 [scsi_tmf_21]
root       329     2  0 May20 ?        00:00:00 [scsi_eh_22]
root       330     2  0 May20 ?        00:00:00 [scsi_tmf_22]
root       331     2  0 May20 ?        00:00:00 [scsi_eh_23]
root       332     2  0 May20 ?        00:00:00 [scsi_tmf_23]
root       333     2  0 May20 ?        00:00:00 [scsi_eh_24]
root       335     2  0 May20 ?        00:00:00 [scsi_eh_25]
root       336     2  0 May20 ?        00:00:00 [scsi_tmf_24]
root       337     2  0 May20 ?        00:00:00 [scsi_eh_26]
root       338     2  0 May20 ?        00:00:00 [scsi_tmf_26]
root       339     2  0 May20 ?        00:00:00 [scsi_eh_27]
root       340     2  0 May20 ?        00:00:00 [scsi_tmf_27]
root       341     2  0 May20 ?        00:00:00 [scsi_eh_28]
root       342     2  0 May20 ?        00:00:00 [scsi_tmf_28]
root       343     2  0 May20 ?        00:00:00 [scsi_eh_29]
root       344     2  0 May20 ?        00:00:00 [scsi_tmf_29]
root       345     2  0 May20 ?        00:00:00 [scsi_eh_30]
root       346     2  0 May20 ?        00:00:00 [scsi_tmf_30]
root       347     2  0 May20 ?        00:00:00 [scsi_eh_31]
root       348     2  0 May20 ?        00:00:00 [scsi_tmf_31]
root       349     2  0 May20 ?        00:00:00 [scsi_eh_32]
root       350     2  0 May20 ?        00:00:00 [scsi_tmf_32]
root       378     2  0 May20 ?        00:00:00 [kworker/u4:30]
root       379     2  0 May20 ?        00:00:39 [kworker/u4:31]
root       380     2  0 May20 ?        00:00:00 [scsi_tmf_25]
root       396     2  0 May20 ?        00:00:00 [ttm_swap]
root       466     2  0 May20 ?        00:00:00 [kdmflush]
root       467     2  0 May20 ?        00:00:00 [bioset]
root       478     2  0 May20 ?        00:00:00 [kdmflush]
root       479     2  0 May20 ?        00:00:00 [bioset]
root       492     2  0 May20 ?        00:00:00 [bioset]
root       493     2  0 May20 ?        00:00:00 [xfsalloc]
root       494     2  0 May20 ?        00:00:00 [xfs_mru_cache]
root       495     2  0 May20 ?        00:00:00 [xfs-buf/dm-0]
root       496     2  0 May20 ?        00:00:00 [xfs-data/dm-0]
root       497     2  0 May20 ?        00:00:00 [xfs-conv/dm-0]
root       498     2  0 May20 ?        00:00:00 [xfs-cil/dm-0]
root       499     2  0 May20 ?        00:00:00 [xfs-reclaim/dm-]
root       500     2  0 May20 ?        00:00:00 [xfs-log/dm-0]
root       501     2  0 May20 ?        00:00:00 [xfs-eofblocks/d]
root       502     2  0 May20 ?        00:00:12 [xfsaild/dm-0]
root       503     2  0 May20 ?        00:00:00 [kworker/0:1H]
root       572     1  0 May20 ?        00:00:01 /usr/lib/systemd/systemd-journald
root       592     1  0 May20 ?        00:00:00 /usr/sbin/lvmetad -f
root       601     1  0 May20 ?        00:00:00 /usr/lib/systemd/systemd-udevd
root       680     2  0 May20 ?        00:00:00 [xfs-buf/sda1]
root       681     2  0 May20 ?        00:00:00 [xfs-data/sda1]
root       682     2  0 May20 ?        00:00:00 [xfs-conv/sda1]
root       683     2  0 May20 ?        00:00:00 [xfs-cil/sda1]
root       684     2  0 May20 ?        00:00:00 [xfs-reclaim/sda]
root       685     2  0 May20 ?        00:00:00 [xfs-log/sda1]
root       686     2  0 May20 ?        00:00:00 [xfs-eofblocks/s]
root       687     2  0 May20 ?        00:00:00 [xfsaild/sda1]
root       716     2  0 May20 ?        00:00:01 [kworker/1:1H]
81         736     1  0 May20 ?        00:00:00 /bin/dbus-daemon --system --address=systemd: --nofork --nopidfile --systemd-activation
999        738     1  0 May20 ?        00:00:00 /usr/lib/polkit-1/polkitd --no-debug
root       739     1  0 May20 ?        00:00:08 /usr/sbin/irqbalance --foreground
root       742     1  0 May20 ?        00:00:09 /usr/sbin/rsyslogd -n
root       743     1  0 May20 ?        00:00:00 /usr/lib/systemd/systemd-logind
root       761     1  0 May20 ?        00:00:00 /sbin/agetty --noclear tty1 linux
root       772     1  0 May20 ?        00:00:01 /usr/bin/python -Es /usr/sbin/firewalld --nofork --nopid
root       773     1  0 May20 ?        00:00:04 /usr/sbin/NetworkManager --no-daemon
root      1065     1  0 May20 ?        00:00:00 /usr/sbin/sshd -D
root      1066     1  0 May20 ?        00:00:25 /usr/bin/python -Es /usr/sbin/tuned --P
root      1153     1  0 May20 ?        00:00:01 /usr/libexec/postfix/master -w
89        1167  1153  0 May20 ?        00:00:00 qmgr --t unix -u
root      2675  1065  0 May21 ?        00:00:01 sshd: root@pts/0
root      2681  2675  0 May21 pts/0    00:00:00 -bash
root      2894     1  0 May21 ?        00:00:00 /usr/sbin/crond -n
root      3603     1  0 May21 ?        00:00:00 /sbin/auditd
root      3912     1  0 May21 ?        00:03:55 /usr/bin/dockerd
root      3917  3912  0 May21 ?        00:02:39 docker-containerd --config /var/run/docker/containerd/containerd.toml
89        8177  1153  0 00:27 ?        00:00:00 pickup --t unix -u
root      8946     2  0 01:28 ?        00:00:00 [kworker/0:2]
root      9186     2  0 01:33 ?        00:00:00 [kworker/0:3]
root      9421     2  0 01:38 ?        00:00:00 [kworker/1:0]
root      9668  1065  0 01:43 ?        00:00:00 sshd: root@pts/1
root      9672  9668  0 01:43 ?        00:00:00 -bash
root      9878     2  0 01:47 ?        00:00:00 [kworker/1:1]
root      9998     2  0 01:52 ?        00:00:00 [kworker/1:2]
root     10029     2  0 01:54 ?        00:00:00 [kworker/0:0]
root     10121  9672  2 01:56 ?        00:00:00 docker run -it --pid=host reg.cloud.com/ubuntu bash
root     10133  3917  0 01:56 ?        00:00:00 docker-containerd-shim -namespace moby -workdir /var/lib/docker/containerd/daemon/io.containerd.runtime.v1.linux/moby/5
root     10149 10133  0 01:56 pts/0    00:00:00 bash
root     10183 10149  0 01:56 pts/0    00:00:00 ps -ef
 
cs



2) Container와 HOST 간  hostname을(UTS) 공유할 수 있다: 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
[root@host01-2 tmp]# docker ps -a
CONTAINER ID        IMAGE                   COMMAND             CREATED             STATUS                        PORTS               NAMES
ab0611fb1cc8        reg.cloud.com/ubuntu    "bash"              5 minutes ago       Exited (023 seconds ago                         sharp_benz
5c47432901b6        reg.cloud.com/ubuntu    "bash"              10 minutes ago      Exited (1306 minutes ago                        zealous_beaver
a36e01acc480        reg.cloud.com/ubuntu    "bash"              13 minutes ago      Exited (011 minutes ago                         wonderful_gates
4cbd222f2ce3        hongtest                "bash"              15 minutes ago      Exited (12713 minutes ago                       romantic_chandrasekhar
9274dde20cf2        reg.cloud.com/ubuntu    "bash"              21 minutes ago      Exited (020 minutes ago                         c3
e1d6ad627dcc        reg.cloud.com/busybox   "ls /home"          25 minutes ago      Exited (025 minutes ago                         wizardly_engelbart
515222fedcc3        reg.cloud.com/busybox   "ls /home"          25 minutes ago      Exited (025 minutes ago                         affectionate_archimedes
5bba4cbd910c        reg.cloud.com/busybox   "sh"                30 minutes ago      Exited (13028 minutes ago                       hong
 
#--uts 옵션을 통해 host와 hostname을 공유하게 
[root@host01-2 tmp]# docker run -it --uts=host reg.cloud.com/ubuntu
root@host01-2:/# hostname
host01-2.cloud.com
root@host01-2:/# ^C
root@host01-2:/# ^C
root@host01-2:/# exit
exit
[root@host01-2 tmp]# hostname
host01-2.cloud.com
[root@host01-2 tmp]#
 
cs


* Container IP 확인하기(inspect를 통해 얻은 json 포맷의 object에서 필요한 값을 2중 대괄호안에 입력 ) :

1
2
3
[root@host01-2 tmp]# docker inspect -f '{{.NetworkSettings.IPAddress}}' ab0611fb1cc8
172.17.0.2
 
cs


* Container PID 확인하기 :

1
2
3
4
5
6
[root@host01-2 tmp]# docker ps
CONTAINER ID        IMAGE                  COMMAND             CREATED              STATUS              PORTS               NAMES
ab0611fb1cc8        reg.cloud.com/ubuntu   "bash"              About a minute ago   Up 58 seconds                           sharp_benz
[root@host01-2 tmp]# docker inspect -f '{{.State.Pid}}' ab0611fb1cc8
10301
 
cs

▶도커 설치 링크 : 


https://docs.docker.com/toolbox/toolbox_install_windows/





*컨테이너 개념 :
2가지 유형이 있다

1) Linux Container

2) Docker


VM환경에서 Container 개념으로 넘어가는 것에 대해서도 생각해봐야한다.


인프라의 변화에 대해 이야기를 해보면

1.물리머신

2.VM

3.Container

으로 변하고 있다!!


*비즈니스 관점에서 살펴보면

(ex: 어플을 각 인프라에 배포한다고 했을 때)

물리머신에서 VM으로 넘어가게 된 계기는 Enterprise 관점에서 이슈가 있었기 때문이다...이 변화는 우연히 바뀐것은 아님...

- 비즈니스에서 고민거리에 대해 누군가 방법론을 만듬

- 방법론을 해결할 수 있는 솔루션이 나옴

항상 이 순서로 진행되어왔다.


*서버 가상화

물리시스템에서 VMWARE로 바뀌면 하드웨어가 많이 사라지게 된다.

많은 인원이 필요 없어짐.

IT Provider에 있어서 원하지 않는 흐름

하지만 서비스 제공자, Business에 있어서는 비용(COST)을 줄일 수 있기 때문에 이 흐름을 환영.


*기존 물리머신 Hardware 비즈니스 관점 이슈

DATA Center가 계속 생겨나면 비용~전기세가 가장 많이 많이 듬

이걸 Business에 있어서는 VM을 통해 획기적으로 줄일 수 있음...

어플리케이션 유지...다른 Business와 같이 사용...규모의 경제

==>Hardware Consolidation

구성만 잘 하면 성능상에서도 기존 대비 별다른 이슈 없음


*Data Center의 주류가 VM으로 변경됨

70%이상의 시스템이 VM기반으로 넘어가게됨...CLOUD...





*Docker Container가 2013년에 시장에서 처음 언급됨...

2014년에 Release(Container의 등장)


MICRO SERVICE ARCHITECTURE, DEVOPS 관점

최근 IT에 있어서 원하는 희망사항?


1) 대규모 통합 서비스(시스템이 무거워짐, Dependency에 걸리는 Library 충돌이 일어나고...)

2) 모듈러 베이스 아키텍쳐(네트워크를 통해 모두 분산시킴)

3) MSA 방법론(분산컴퓨팅에서...하나의 시스템에는 한개의 어플리케이션만...Micro Service...가장 작은 단위의 서비스로 만들어냄)

RESTFUL...네트워크 단위로 분산


*OPERATION 관점에서 살펴보면 : 

- IT는 COST다

IT COST의 7:3에서 7이 유지보수 비용...


가상화를 통해 하드웨어에 대한 유지보수 비용이 줄어든다...S/W로 들어가면 유지보수 비용도 최소화된다.

7: 혁신에 더 많은 비용을 들이고

3: 유지보수는 최소화....



프로그램을 너무 크게 만들어서 문제(경쟁사에 밀리지 않기 위해 Release를 빨리 해야 함...대규모 시스템을 모듈화...최소화...)

=> 소스코드를 검증해서 바로 Release

Release는 자주하면 할 수록 소프트웨어는 신뢰도가 높아진다.

=>어플리케이션을 가장 작은 UNIT으로 잘라낸다.

단점 : 서로 커뮤니케이션이 안됨~전문성 부재(ex: 개발자<=>테스트)

개발팀을 다시 소규모로 줄임...전문성은 자동화?

"코드 한줄을 바꿔서 Release를 얼마나 할 수 있는가(Dependency ↓)?"




이제 PAAS, MSA, DEVOPS 간의 교집합...

VM을 가지고 이 3가지를 모두 접목하기가 어려워짐...한계!!

=>리눅스 컨테이너의 탄생(2008) 

=>Docker(2014)



구조 1) 


APP1, APP2


Kernel


H/W


그런데 이런 구조라면 네트워크 및 프로세스를 공유...무거워진다.



그럼 아래 같은 구조라면? (커널이 3개)

커널을 2번 타게 되면서...Native 보다 성능이 좋지 않다.

리소스 많이 사용.


구조 2) VM


APP1                                APP2

Kernel(4Core /8G)                    Kernel(4Core /8G)


Kernel OS(10Core/64G을 1개 어플리케이션이 모두 점유할 수 없다)


H/W




//위 2개의 중간~>Name Space 개념 도입(격리, Isolation!!)

네트워크, PID, IPC, User Group, Mount가 VM처럼 모두 독립적임, 

리소스를 상대적으로 적게 쓰게 됨(커널이 1개)

베어메탈에 준하는 성능


구조 3) Container


App1            App2

Centos(OS)    Ubuntu(OS)    


Kernel 공유(10Core/64G를 온전히 1개 어플리케이션이 모두 점유할 수 있다)


H/W




*Linux 컨테이너의 한계: 

Linux 컨테이너는 분산배포를 처리하는 개념이 없었다...단순히 VM을 경량화 시킨것

Docker는 컨테이너로 분산배포 지원

배송/배포가 핵심!!



- Linux Container : OS Container (1개의 OS에 여러 App을 띄움)

- Docker Container : App Container (각 OS에 Application을 독립적으로 띄운다, OS Container로도 구성이 가능 - MSA)




* 컨테이너를 때거지로 모아서 관리하는것...

Multi Container Management Solution : 

- Swarm, Compose (Docker : Multi Container 관리)

- Kubernetes(Google: Multi Container 관리)


가벼운 VM을 만들자!!

+ Recent posts