LB > Node Port > Cluster IP


Node Port를 만들면 Cluster IP가 자동으로 만들어진다


*SVC 지우기

1
2
3
4
5
6
7
8
9
[root@host01-4 hk]# k get service
NAME           TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE
headless-svc   ClusterIP   10.96.102.169   <none>        80/TCP    27m
kubernetes     ClusterIP   10.96.0.1       <none>        443/TCP   48m
test-svc       ClusterIP   10.96.25.31     <none>        80/TCP    45m
[root@host01-4 hk]# k delete svc test-svc
service "test-svc" deleted
[root@host01-4 hk]#
 
cs


*NodePort 생성하기(생성뒤에 해당 Port로 curl을 날릴 수 있다) : 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
vi NodePort.yaml
 
apiVersion: v1
kind: Service
metadata:
  name: test-svc
spec:
        type: NodePort
        selector:
          type: test
        ports:
        - port: 80
          targetPort: 8080
 
 
[root@host01-4 hk]# k create -f nodePort.yaml
 
service "test-svc" created
[root@host01-4 hk]#
[root@host01-4 hk]# k get svc
NAME           TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
headless-svc   ClusterIP   10.96.102.169   <none>        80/TCP         31m
kubernetes     ClusterIP   10.96.0.1       <none>        443/TCP        52m
test-svc       NodePort    10.107.213.48   <none>        80:32045/TCP   6s
 
 
#NodePort에서 80포트는 cluster IP port이고 32045는 Node Port
 
 
cs


*Load Balancer 생성

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
k edit svc test-app
 
# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
#
apiVersion: v1
kind: Service
metadata:
  creationTimestamp: 2018-05-25T07:44:57Z
  name: test-svc
  namespace: default
  resourceVersion: "29844"
  selfLink: /api/v1/namespaces/default/services/test-svc
  uid: 85b2c6ea-5fef-11e8-8e09-005056b28b62
spec:
  clusterIP: 10.107.213.48
  externalTrafficPolicy: Cluster
  ports:
  - nodePort: 32045
    port: 80
    protocol: TCP
    targetPort: 8080
  selector:
    type: test
  sessionAffinity: None
  type: LoadBalancer            #LoadBalancer로 수정해주면...
status:
  loadBalancer: {}
 
 
[root@host01-4 hk]# k get svc
NAME           TYPE           CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
headless-svc   ClusterIP      10.96.102.169   <none>        80/TCP         38m
kubernetes     ClusterIP      10.96.0.1       <none>        443/TCP        59m
test-svc       LoadBalancer   10.107.213.48   <pending>     80:32045/TCP   7m  #Node Port에 Cluster IP까지 자동 
[root@host01-4 hk]#
 
cs


상위 네트워크 모듈을 생성하면 하위 매핑 정보는 알아서 구성하게 됨




*TargetPort 설정 이유


1
2
3
4
5
6
7
 
  ports:
  - nodePort: 32045
    targetPort: 8080
 
 
 
cs


=> Cluster의 port와 pod Port 정보가 다르기 때문



*비즈니스 관점에서 Probe가 정상적으로 동작하는지 여부를 체크할 수 있어야 한다....


Client가 접속한다!!

Svc > Pod(X)

정상동작 X 다고 생각하게 된다.

이때 readinessProbe를 활용하면 cluster 정상동작 여부를 체크할 수 있다 :

1
2
3
4
5
6
7
8
9
      containers:
      - image: reg.cloud.com/kubia
        name: kubia
        readinessProbe:
          exec:
            command:
            - ls
            - /var/ready
 
cs


*ReadinessProbe 생성 예제 :


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
#vi rc-probe.yaml 
# /var/ready가 pod 안에 있으면 probe에서 통과 시킨다(Ready 상태로 만들어줌)
apiVersion: v1
kind: ReplicationController
metadata:
  name: rc-readiness
spec:
  replicas: 3
  template:
    metadata:
      labels:
        type: test
    spec:
      containers:
      - image: reg.cloud.com/kubia
        name: kubia
        readinessProbe:
          exec:
            command:
            - ls
            - /var/ready
        ports:
        - containerPort: 8080
          protocol: TCP
 
 
#vi service.yaml
 
apiVersion: v1
kind: Service
metadata:
  name: test-svc
spec:
        selector:
          type: test
        ports:
        - port: 80
          targetPort: 8080
 
 
 
[root@host01-4 hk]# k create -f rc-probe.yaml
 
replicationcontroller "rc-readiness" created
[root@host01-4 hk]# k get po -o wide
NAME                 READY     STATUS    RESTARTS   AGE       IP          NODE
rc-readiness-22hxs   0/1       Running   0          10s       10.36.0.2   host01-3.cloud.com
rc-readiness-2pt29   0/1       Running   0          10s       10.44.0.1   host01-2.cloud.com
rc-readiness-h74t2   0/1       Running   0          10s       10.36.0.1   host01-3.cloud.com
 
 
#Running 상태이지만 Ready가 아무것도 없다
 
 
 
[root@host01-4 hk]# k create -f service.yaml
service "test-svc" created
[root@host01-4 hk]# k get svc
NAME         TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1     <none>        443/TCP   6m
test-svc     ClusterIP   10.98.99.22   <none>        80/TCP    4s
[root@host01-4 hk]# k get ep
NAME         ENDPOINTS          AGE
kubernetes   10.10.12.14:6443   6m
test-svc                        9s
 
 
#Endpoint가 아무것도 안 떠있다.
 
[root@host01-4 hk]# k exec rc-readiness-22hxs -- touch /var/ready
 
#1번 pod에 /var/ready를 만들어준다 (Ready 상태로 만들어주기 위해)
 
[root@host01-4 hk]# k get ep
NAME         ENDPOINTS          AGE
kubernetes   10.10.12.14:6443   8m
test-svc     10.36.0.2:8080     1m
 
#test-svc의 ENDPOINT가 enabled된다
 
[root@host01-4 hk]# k get svc
NAME         TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1     <none>        443/TCP   8m
test-svc     ClusterIP   10.98.99.22   <none>        80/TCP    1m
 
[root@host01-4 hk]# k get po -o wide
NAME                 READY     STATUS    RESTARTS   AGE       IP          NODE
rc-readiness-22hxs   1/1       Running   0          4m        10.36.0.2   host01-3.cloud.com
rc-readiness-2pt29   0/1       Running   0          4m        10.44.0.1   host01-2.cloud.com
rc-readiness-h74t2   0/1       Running   0          4m        10.36.0.1   host01-3.cloud.com
 
#1번 Pod가 Ready 상태로 바뀐다
 
 
[root@host01-4 hk]# k exec rc-readiness-2pt29 -- touch /var/ready
[root@host01-4 hk]# k get po -o wide
NAME                 READY     STATUS    RESTARTS   AGE       IP          NODE
rc-readiness-22hxs   1/1       Running   0          5m        10.36.0.2   host01-3.cloud.com
rc-readiness-2pt29   1/1       Running   0          5m        10.44.0.1   host01-2.cloud.com
rc-readiness-h74t2   0/1       Running   0          5m        10.36.0.1   host01-3.cloud.com
 
#2번 Pod Ready 상태로 바뀐다
 
 
 
 
cs






*Kubernetes는 다음을 자동으로 처리하는 common API와 self-heading 프레임워크를 제공한다 : 

- 머신 장애

- 어플리케이션 배포, 로깅 모니터링 간소화


최소한의 통제로 자율성을 얻기 위한 툴이다!


점차 Operation 팀의 역할은 줄어들고 있다...

비즈니스는 점점 서비스와 어플리케이션에 포커스를 두고...인프라는 비중이 작아지고 있음


*Kubernetes의 구성도 : 


1) API Node

Kubernetes의 모든 API서버는 Node들의 모든 정보를 가지고 있어야 한다.

스케줄러가 제일 먼저 하는 일은....어디에 배포할 것인가 결정

API Node 내에 controller manager(replication controller)는 node의 status를 알려준다.

->Kubernetes는 ETCD라는 분산형태 DB를 사용한다,



2) Worker Node 

Core OS가 Rocket(docker가 너무 무거워)이라는 컨테이너를 발표!

Kubelet은 컨테이너에 대한 구성요소 제어를 맡고 있음

각 pod간 통신은 VXLAN(overlay)를 사용

proxy는 app 레벨의 인스턴스가 있고,

실제 호스트에 있는 iptables (kernel 레이어에 있음)에 데이터 처리에 관한 내용이 있음



※ kubelet은 유일하게 시스템 서비스로 들어간다! 나머지는 컨테이너로 설치 가능



- 왜 Swarm을 사용하는가? 사용하기 쉬움

- 왜 Kubernetes를 사용하는가? 많은 Resource를 제어할 수 있음





1. Docker 설치 (모든 노드)


1
2
3
4
5
6
7
8
9
yum install -y docker
systemctl enable docker && systemctl start docker
 
touch /etc/docker/daemon.json
cat <<EOF > /etc/docker/daemon.json
{
"insecure-registries":["10.10.12.0/24"]
}
EOF
cs


2. Kubernetes 설치 (모든 노드)


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
setenforce 0
sed ---follow-symlinks 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux
systemctl stop firewalld.service
systemctl disable firewalld.service
 
cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system
 
swapoff -&& sed -'/swap/s/^/#/' /etc/fstab
 
yum install -y kubelet kubeadm kubectl
systemctl enable kubelet && systemctl start kubelet


*MASTER Node 설치(이미지를 전부 다 Load한다) :

> docker load -i <이미지명>

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
[root@host01-2 ~]# docker images
REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
[root@host01-2 ~]# ls
anaconda-ks.cfg  dns-kube-dns  dns-sidecar  etcd-amd64  hk
[root@host01-2 ~]# cd hk
[root@host01-2 hk]# ls
dns-kube-dns  etcd-amd64  kubeadm.host08-1.root.log.INFO.20180525-060140.2620  kube-apiserver   kube-proxy      weave-kube
dns-sidecar   k8s-dns     kubeadm.INFO                                         kube-controller  kube-scheduler  weave-npc
[root@host01-2 hk]# ls -al
total 974632
drwxr-xr-x. 2 root root       277 May 25 10:27 .
dr-xr-x---5 root root       226 May 25 10:27 ..
-rw-------1 root root  50727424 May 25 10:27 dns-kube-dns
-rw-------1 root root  42481152 May 25 10:27 dns-sidecar
-rw-------1 root root 193461760 May 25 10:27 etcd-amd64
-rw-------1 root root  41239040 May 25 10:27 k8s-dns
-rw-r--r--1 root root       343 May 25 10:27 kubeadm.host08-1.root.log.INFO.20180525-060140.2620
-rw-r--r--1 root root       343 May 25 10:27 kubeadm.INFO
-rw-------1 root root 225319936 May 25 10:27 kube-apiserver
-rw-------1 root root 148110336 May 25 10:27 kube-controller
-rw-------1 root root  98924032 May 25 10:27 kube-proxy
-rw-------1 root root  50635776 May 25 10:27 kube-scheduler
-rw-------1 root root  99517952 May 25 10:27 weave-kube
-rw-------1 root root  47575552 May 25 10:27 weave-npc
[root@host01-2 hk]# docker load -i kube-proxy
582b548209e1: Loading layer [==================================================>]  44.2 MB/44.2 MB
e20569a478ed: Loading layer [==================================================>3.358 MB/3.358 MB
6b4e4941a965: Loading layer [==================================================>51.35 MB/51.35 MB
Loaded image: k8s.gcr.io/kube-proxy-amd64:v1.10.3
[root@host01-2 hk]# docker load -i weave-kube
5bef08742407: Loading layer [==================================================>4.221 MB/4.221 MB
c3355c8b5c3e: Loading layer [==================================================>19.03 MB/19.03 MB
a83fa3df4138: Loading layer [==================================================>29.55 MB/29.55 MB
020fdc01af85: Loading layer [==================================================>]  11.6 MB/11.6 MB
2ea881a632b7: Loading layer [==================================================>2.048 kB/2.048 kB
396aa46bcbea: Loading layer [==================================================>35.09 MB/35.09 MB
Loaded image: docker.io/weaveworks/weave-kube:2.3.0
[root@host01-2 hk]# docker load -i weave-npc
8dccfe2dec8c: Loading layer [==================================================>2.811 MB/2.811 MB
3249ff6df12f: Loading layer [==================================================>40.52 MB/40.52 MB
3dc458d34b22: Loading layer [==================================================>]  2.56 kB/2.56 kB
Loaded image: docker.io/weaveworks/weave-npc:2.3.0
[root@host01-2 hk]#
 
cs

*기타 Node 설치(kube-proxy, weave-kube, weave-npc를 설치한다) :

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
[root@host01-2 ~]# docker images
REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
[root@host01-2 ~]# ls
anaconda-ks.cfg  dns-kube-dns  dns-sidecar  etcd-amd64  hk
[root@host01-2 ~]# cd hk
[root@host01-2 hk]# ls
dns-kube-dns  etcd-amd64  kubeadm.host08-1.root.log.INFO.20180525-060140.2620  kube-apiserver   kube-proxy      weave-kube
dns-sidecar   k8s-dns     kubeadm.INFO                                         kube-controller  kube-scheduler  weave-npc
[root@host01-2 hk]# ls -al
total 974632
drwxr-xr-x. 2 root root       277 May 25 10:27 .
dr-xr-x---5 root root       226 May 25 10:27 ..
-rw-------1 root root  50727424 May 25 10:27 dns-kube-dns
-rw-------1 root root  42481152 May 25 10:27 dns-sidecar
-rw-------1 root root 193461760 May 25 10:27 etcd-amd64
-rw-------1 root root  41239040 May 25 10:27 k8s-dns
-rw-r--r--1 root root       343 May 25 10:27 kubeadm.host08-1.root.log.INFO.20180525-060140.2620
-rw-r--r--1 root root       343 May 25 10:27 kubeadm.INFO
-rw-------1 root root 225319936 May 25 10:27 kube-apiserver
-rw-------1 root root 148110336 May 25 10:27 kube-controller
-rw-------1 root root  98924032 May 25 10:27 kube-proxy
-rw-------1 root root  50635776 May 25 10:27 kube-scheduler
-rw-------1 root root  99517952 May 25 10:27 weave-kube
-rw-------1 root root  47575552 May 25 10:27 weave-npc
[root@host01-2 hk]# docker load -i kube-proxy
582b548209e1: Loading layer [==================================================>]  44.2 MB/44.2 MB
e20569a478ed: Loading layer [==================================================>3.358 MB/3.358 MB
6b4e4941a965: Loading layer [==================================================>51.35 MB/51.35 MB
Loaded image: k8s.gcr.io/kube-proxy-amd64:v1.10.3
[root@host01-2 hk]# docker load -i weave-kube
5bef08742407: Loading layer [==================================================>4.221 MB/4.221 MB
c3355c8b5c3e: Loading layer [==================================================>19.03 MB/19.03 MB
a83fa3df4138: Loading layer [==================================================>29.55 MB/29.55 MB
020fdc01af85: Loading layer [==================================================>]  11.6 MB/11.6 MB
2ea881a632b7: Loading layer [==================================================>2.048 kB/2.048 kB
396aa46bcbea: Loading layer [==================================================>35.09 MB/35.09 MB
Loaded image: docker.io/weaveworks/weave-kube:2.3.0
[root@host01-2 hk]# docker load -i weave-npc
8dccfe2dec8c: Loading layer [==================================================>2.811 MB/2.811 MB
3249ff6df12f: Loading layer [==================================================>40.52 MB/40.52 MB
3dc458d34b22: Loading layer [==================================================>]  2.56 kB/2.56 kB
Loaded image: docker.io/weaveworks/weave-npc:2.3.0
[root@host01-2 hk]#
 
cs


*Master Node에서 실행 :

1
2
3
4
5
kubeadm init
 
mkdir -p $HOME/.kube
cp -/etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
cs


실행방법 :

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
[root@host01-4 ~]# docker images
REPOSITORY                                 TAG                 IMAGE ID            CREATED             SIZE
k8s.gcr.io/kube-proxy-amd64                v1.10.3             4261d315109d        3 days ago          97.1 MB
k8s.gcr.io/kube-apiserver-amd64            v1.10.3             e03746fe22c3        3 days ago          225 MB
k8s.gcr.io/kube-controller-manager-amd64   v1.10.3             40c8d10b2d11        3 days ago          148 MB
k8s.gcr.io/kube-scheduler-amd64            v1.10.3             353b8f1d102e        3 days ago          50.4 MB
docker.io/weaveworks/weave-npc             2.3.0               21545eb3d6f9        6 weeks ago         47.2 MB
docker.io/weaveworks/weave-kube            2.3.0               f15514acce73        6 weeks ago         96.8 MB
k8s.gcr.io/etcd-amd64                      3.1.12              52920ad46f5b        2 months ago        193 MB
k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64     1.14.8              c2ce1ffb51ed        4 months ago        41 MB
k8s.gcr.io/k8s-dns-sidecar-amd64           1.14.8              6f7f2dc7fab5        4 months ago        42.2 MB
k8s.gcr.io/k8s-dns-kube-dns-amd64          1.14.8              80cc5ea4b547        4 months ago        50.5 MB
[root@host01-4 ~]# clear
[root@host01-4 ~]# kubeadm init
[init] Using Kubernetes version: v1.10.3
[init] Using Authorization modes: [Node RBAC]
[preflight] Running pre-flight checks.
        [WARNING FileExisting-crictl]: crictl not found in system path
Suggestion: go get github.com/kubernetes-incubator/cri-tools/cmd/crictl
[certificates] Generated ca certificate and key.
[certificates] Generated apiserver certificate and key.
[certificates] apiserver serving cert is signed for DNS names [host01-4.cloud.com kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.10.12.14]
[certificates] Generated apiserver-kubelet-client certificate and key.
[certificates] Generated etcd/ca certificate and key.
[certificates] Generated etcd/server certificate and key.
[certificates] etcd/server serving cert is signed for DNS names [localhost] and IPs [127.0.0.1]
[certificates] Generated etcd/peer certificate and key.
[certificates] etcd/peer serving cert is signed for DNS names [host01-4.cloud.com] and IPs [10.10.12.14]
[certificates] Generated etcd/healthcheck-client certificate and key.
[certificates] Generated apiserver-etcd-client certificate and key.
[certificates] Generated sa key and public key.
[certificates] Generated front-proxy-ca certificate and key.
[certificates] Generated front-proxy-client certificate and key.
[certificates] Valid certificates and keys now exist in "/etc/kubernetes/pki"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/admin.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/kubelet.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/controller-manager.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/scheduler.conf"
[controlplane] Wrote Static Pod manifest for component kube-apiserver to "/etc/kubernetes/manifests/kube-apiserver.yaml"
[controlplane] Wrote Static Pod manifest for component kube-controller-manager to "/etc/kubernetes/manifests/kube-controller-manager.yaml"
[controlplane] Wrote Static Pod manifest for component kube-scheduler to "/etc/kubernetes/manifests/kube-scheduler.yaml"
[etcd] Wrote Static Pod manifest for a local etcd instance to "/etc/kubernetes/manifests/etcd.yaml"
[init] Waiting for the kubelet to boot up the control plane as Static Pods from directory "/etc/kubernetes/manifests".
[init] This might take a minute or longer if the control plane images have to be pulled.
[apiclient] All control plane components are healthy after 25.003852 seconds
[uploadconfig] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[markmaster] Will mark node host01-4.cloud.com as master by adding a label and a taint
[markmaster] Master host01-4.cloud.com tainted and labelled with key/value: node-role.kubernetes.io/master=""
[bootstraptoken] Using token: hlz7wp.qjrgmsq2yn9f94wa
[bootstraptoken] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstraptoken] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstraptoken] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstraptoken] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: kube-dns
[addons] Applied essential addon: kube-proxy
 
Your Kubernetes master has initialized successfully!
 
To start using your cluster, you need to run the following as a regular user:
 
  mkdir -p $HOME/.kube
  sudo cp -/etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config
 
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/
 
You can now join any number of machines by running the following on each node
as root:
 
  kubeadm join 10.10.12.14:6443 --token hlz7wp.qjrgmsq2yn9f94wa --discovery-token-ca-cert-hash sha256:43e61417b20ede5ca530fe0638990bc1a805b5f2a9e25b5aa2f40023b392fb50
 
[root@host01-4 ~]#
[root@host01-4 ~]# mkdir -p $HOME/.kube
[root@host01-4 ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@host01-4 ~]# chown $(id -u):$(id -g) $HOME/.kube/config
[root@host01-4 ~]#
 
cs


위 실행결과에서 토큰을 복사해놓는다 :

1
2
3
  kubeadm join 10.10.12.14:6443 --token hlz7wp.qjrgmsq2yn9f94wa --discovery-token-ca-cert-hash sha256:43e61417b20ede5ca530fe0638990bc1a805b5f2a9e25b5aa2f40023b392fb50
 
 
cs


*Pod Network 설치 (마스터)

1
2
3
4
5
6
7
sysctl net.bridge.bridge-nf-call-iptables=1
 
export kubever=$(kubectl version | base64 | tr -'\n')
 
kubectl apply -"https://cloud.weave.works/k8s/net?k8s-version=$kubever"
 
kubectl get pods --all-namespaces 
cs


실행결과 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
[root@host01-4 ~]# sysctl net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-iptables = 1
[root@host01-4 ~]#
[root@host01-4 ~]# export kubever=$(kubectl version | base64 | tr -d '\n')
[root@host01-4 ~]#
[root@host01-4 ~]# kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$kubever"
serviceaccount "weave-net" created
clusterrole.rbac.authorization.k8s.io "weave-net" created
clusterrolebinding.rbac.authorization.k8s.io "weave-net" created
role.rbac.authorization.k8s.io "weave-net" created
rolebinding.rbac.authorization.k8s.io "weave-net" created
daemonset.extensions "weave-net" created
[root@host01-4 ~]# kubectl get pods --all-namespaces
NAMESPACE     NAME                                         READY     STATUS    RESTARTS   AGE
kube-system   etcd-host01-4.cloud.com                      1/1       Running   0          4m
kube-system   kube-apiserver-host01-4.cloud.com            1/1       Running   0          4m
kube-system   kube-controller-manager-host01-4.cloud.com   1/1       Running   0          5m
kube-system   kube-dns-86f4d74b45-t9df2                    0/3       Pending   0          5m
kube-system   kube-proxy-fs9d8                             1/1       Running   0          5m
kube-system   kube-scheduler-host01-4.cloud.com            1/1       Running   0          4m
kube-system   weave-net-zr5qr                              2/2       Running   0          11s
[root@host01-4 ~]#
 
 
cs


*이전에 Master Node에서 복사한 토큰을 2개 Worker Node에 각각

입력한다 : 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
[root@host01-3 ~]# kubeadm join 10.10.12.14:6443 --token hlz7wp.qjrgmsq2yn9f94wa --discovery-token-ca-cert-hash sha256:43e61417b20ede5ca530fe0638990bc1a805b5f2a9e25b5aa2f40023b392fb50
[preflight] Running pre-flight checks.
        [WARNING FileExisting-crictl]: crictl not found in system path
Suggestion: go get github.com/kubernetes-incubator/cri-tools/cmd/crictl
[discovery] Trying to connect to API Server "10.10.12.14:6443"
[discovery] Created cluster-info discovery client, requesting info from "https://10.10.12.14:6443"
[discovery] Requesting info from "https://10.10.12.14:6443" again to validate TLS against the pinned public key
[discovery] Cluster info signature and contents are valid and TLS certificate validates against pinned roots, will use API Server "10.10.12.14:6443"
[discovery] Successfully established connection with API Server "10.10.12.14:6443"
 
This node has joined the cluster:
* Certificate signing request was sent to master and a response
  was received.
* The Kubelet was informed of the new secure connection details.
 
Run 'kubectl get nodes' on the master to see this node join the cluster.
[root@host01-3 ~]#
 
cs


*Master Node에서 멤버 Join이 이루어졌는지 확인한다(Status에 Ready가 떠야 한다.)

1
2
3
4
5
6
7
8
 
[root@host01-4 ~]# kubectl get no
NAME                 STATUS     ROLES     AGE       VERSION
host01-2.cloud.com   NotReady   <none>    6s        v1.10.3
host01-3.cloud.com   NotReady   <none>    11s       v1.10.3
host01-4.cloud.com   Ready      master    9m        v1.10.3
[root@host01-4 ~]# ^C
 
cs
1
2
3
4
5
6
7
[root@host01-4 ~]# kubectl get no
NAME                 STATUS    ROLES     AGE       VERSION
host01-2.cloud.com   Ready     <none>    2m        v1.10.3
host01-3.cloud.com   Ready     <none>    2m        v1.10.3
host01-4.cloud.com   Ready     master    11m       v1.10.3
[root@host01-4 ~]#
 
cs






*Tab 자동 입력


AWS, Azure에서는 kubectl 명령어가 그냥 'k'임(alias로 등록되어 있음)


*Alias 등록 : 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
[root@host01-4 ~]# alias k=kubectl
[root@host01-4 ~]# source <(kubectl completion bash | sed  s/kubectl/k/g)
[root@host01-4 ~]# k get
You must specify the type of resource to get. Valid resource types include:
 
  * all
  * certificatesigningrequests (aka 'csr')
  * clusterrolebindings
  * clusterroles
  * componentstatuses (aka 'cs')
  * configmaps (aka 'cm')
  * controllerrevisions
  * cronjobs
  * customresourcedefinition (aka 'crd')
  * daemonsets (aka 'ds')
  * deployments (aka 'deploy')
  * endpoints (aka 'ep')
  * events (aka 'ev')
  * horizontalpodautoscalers (aka 'hpa')
  * ingresses (aka 'ing')
  * jobs
  * limitranges (aka 'limits')
  * namespaces (aka 'ns')
  * networkpolicies (aka 'netpol')
  * nodes (aka 'no')
  * persistentvolumeclaims (aka 'pvc')
  * persistentvolumes (aka 'pv')
  * poddisruptionbudgets (aka 'pdb')
  * podpreset
  * pods (aka 'po')
  * podsecuritypolicies (aka 'psp')
  * podtemplates
  * replicasets (aka 'rs')
  * replicationcontrollers (aka 'rc')
  * resourcequotas (aka 'quota')
  * rolebindings
  * roles
  * secrets
  * serviceaccounts (aka 'sa')
  * services (aka 'svc')
  * statefulsets (aka 'sts')
  * storageclasses (aka 'sc')error: Required resource not specified.
Use "kubectl explain <resource>" for a detailed description of that resource (e.g. kubectl explain pods).
See 'kubectl get -h' for help and examples.
[root@host01-4 ~]# echo "alias l=kubectl" >> ~/.bashrc
[root@host01-4 ~]# echo "source <(kubectl completion bash | sed s/kubectl/k/g)" >> ~/.bashrc
 
cs





*Pod 개념

MS 아키텍처의 기초는 하나의 컨테이너에는 하나의 어플리케이션만 집어넣자!

- 네트워크 공유 : Pod로 구성하면 Container끼리 네트워크를 공유할 수 있게된다(네트워크 Name Space 공유 - localhost로)

- 스토리지 공유 : 볼륨을 pod에 마운트 하여 Storage까지 공유한다!


- 1개 pod 내에서 콘테이너들은 동일 호스트로만 구성이 된다. 똑같은 포트의 서비스를 공유할 수 없다.

(pod를 잘못구성하는 예 : DB Container와 Web Container를 동일 pod 내에 두었을 때 Scale Out 시 이슈가 있음,

따라서 web pod, WAS pod, DB pod 식으로 나눠야 함)

pod가 kubenetes의 최소 단위로 구성해야 함!


각 Work Node의 Pod 들은 Master Node의 replication controller(RC)에서 제어된다.


- 참조/관리 형태

참조를 하는 주체는 selector (key/value 형태)

참조를 받은 대상은 Label (key/value 형태)  = Pod(Node Selector)

Pod/Label의 참조를 받는 대상은 Label (key/value 형태) 

라고 부른다.

- Kubenetes의 Namespace란 : 


1
2
3
4
5
6
7
8
9
10
11
12
13
14
[root@host01-4 hk]# k get po -n kube-system -o  wide
NAME                                         READY     STATUS    RESTARTS   AGE       IP            NODE
etcd-host01-4.cloud.com                      1/1       Running   0          58m       10.10.12.14   host01-4.cloud.com
kube-apiserver-host01-4.cloud.com            1/1       Running   0          58m       10.10.12.14   host01-4.cloud.com
kube-controller-manager-host01-4.cloud.com   1/1       Running   0          59m       10.10.12.14   host01-4.cloud.com
kube-dns-86f4d74b45-t9df2                    3/3       Running   0          1h        10.32.0.2     host01-4.cloud.com
kube-proxy-fs9d8                             1/1       Running   0          1h        10.10.12.14   host01-4.cloud.com
kube-proxy-r5bzj                             1/1       Running   0          51m       10.10.12.13   host01-3.cloud.com
kube-proxy-tvwnv                             1/1       Running   0          51m       10.10.12.12   host01-2.cloud.com
kube-scheduler-host01-4.cloud.com            1/1       Running   0          59m       10.10.12.14   host01-4.cloud.com
weave-net-hf9d5                              2/2       Running   1          51m       10.10.12.12   host01-2.cloud.com
weave-net-p5drv                              2/2       Running   1          51m       10.10.12.13   host01-3.cloud.com
weave-net-zr5qr                              2/2       Running   0          54m       10.10.12.14   host01-4.cloud.com
 
cs

*Cluster PAI 확인 : 

1
2
3
4
5
[root@host01-4 hk]# k cluster-info
Kubernetes master is running at https://10.10.12.14:6443
KubeDNS is running at https://10.10.12.14:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
 
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
cs


*Pod 가 어디있는지 상관없이 연결이 된다

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
[root@host01-4 hk]# ls
dns-kube-dns  etcd-amd64  kubeadm.host08-1.root.log.INFO.20180525-060140.2620  kube-apiserver   kube-proxy      weave-kube
dns-sidecar   k8s-dns     kubeadm.INFO                                         kube-controller  kube-scheduler  weave-npc
[root@host01-4 hk]# k run --image=reg.cloud.com/nginx --port=80 --generator=run/v1
error: NAME is required for run
See 'kubectl run -h' for help and examples.
[root@host01-4 hk]# k run --image=reg.cloud.com/nginx --port=80 --generator=run/v1
error: NAME is required for run
See 'kubectl run -h' for help and examples.
[root@host01-4 hk]# k run --image=reg.cloud.com/nginx nginx-app --port=80 --generator=run/v1
replicationcontroller "nginx-app" created
[root@host01-4 hk]# k get rc
NAME        DESIRED   CURRENT   READY     AGE
nginx-app   1         1         0         8s
[root@host01-4 hk]# k get po
NAME              READY     STATUS    RESTARTS   AGE
nginx-app-gb6ch   1/1       Running   0          15s
[root@host01-4 hk]# get po -o wide
-bash: get: command not found
[root@host01-4 hk]# k get po -o wide
NAME              READY     STATUS    RESTARTS   AGE       IP          NODE
nginx-app-gb6ch   1/1       Running   0          32s       10.36.0.1   host01-3.cloud.com
[root@host01-4 hk]# k logs
error: expected 'logs (POD | TYPE/NAME) [CONTAINER_NAME]'.
POD or TYPE/NAME is a required argument for the logs command
See 'kubectl logs -h' for help and examples.
[root@host01-4 hk]# k logs nginx-app-df618
Error from server (NotFound): pods "nginx-app-df618" not found
[root@host01-4 hk]# k logs nginx-app-gb6ch
[root@host01-4 hk]# k exec -it
error: expected 'exec POD_NAME COMMAND [ARG1] [ARG2] ... [ARGN]'.
POD_NAME and COMMAND are required arguments for the exec command
See 'kubectl exec -h' for help and examples.
[root@host01-4 hk]# k exec -it nginx-app-gb6ch bash
root@nginx-app-gb6ch:/#
 
cs

-podname이 container의 hostname이 된다.



*pod name으로 삭제 해도 다시 살아난다. 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
[root@host01-4 hk]# k get po -o wide
NAME              READY     STATUS    RESTARTS   AGE       IP          NODE
nginx-app-gb6ch   1/1       Running   0          5m        10.36.0.1   host01-3.cloud.com
[root@host01-4 hk]# delete po nginx-app-gb6ch
-bash: delete: command not found
[root@host01-4 hk]# k delete po nginx-app-gb6ch
pod "nginx-app-gb6ch" deleted
[root@host01-4 hk]# k get po -o wide
NAME              READY     STATUS              RESTARTS   AGE       IP        NODE
nginx-app-gnpsd   0/1       ContainerCreating   0          6s        <none>    host01-2.cloud.com
[root@host01-4 hk]# k get rc
NAME        DESIRED   CURRENT   READY     AGE
nginx-app   1         1         1         5m
[root@host01-4 hk]#
 
cs


*Scale in Scale out


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
[root@host01-4 hk]# k describe rc nginx-app
dns-kube-dns                                         kubeadm.host08-1.root.log.INFO.20180525-060140.2620  kube-proxy
dns-sidecar                                          kubeadm.INFO                                         kube-scheduler
etcd-amd64                                           kube-apiserver                                       weave-kube
k8s-dns                                              kube-controller                                      weave-npc
[root@host01-4 hk]# k describe rc nginx-app
Name:         nginx-app
Namespace:    default
Selector:     run=nginx-app
Labels:       run=nginx-app
Annotations:  <none>
Replicas:     1 current / 1 desired
Pods Status:  1 Running / 0 Waiting / 0 Succeeded / 0 Failed
Pod Template:
  Labels:  run=nginx-app
  Containers:
   nginx-app:
    Image:        reg.cloud.com/nginx
    Port:         80/TCP
    Host Port:    0/TCP
    Environment:  <none>
    Mounts:       <none>
  Volumes:        <none>
Events:
  Type    Reason            Age   From                    Message
  ----    ------            ----  ----                    -------
  Normal  SuccessfulCreate  6m    replication-controller  Created pod: nginx-app-gb6ch
  Normal  SuccessfulCreate  1m    replication-controller  Created pod: nginx-app-gnpsd
[root@host01-4 hk]# k get po
NAME              READY     STATUS    RESTARTS   AGE
nginx-app-gnpsd   1/1       Running   0          1m
[root@host01-4 hk]# k describe po nginx-app-gnpsd
Name:           nginx-app-gnpsd
Namespace:      default
Node:           host01-2.cloud.com/10.10.12.12
Start Time:     Fri, 25 May 2018 11:58:20 +0900
Labels:         run=nginx-app
Annotations:    <none>
Status:         Running
IP:             10.44.0.1
Controlled By:  ReplicationController/nginx-app
Containers:
  nginx-app:
    Container ID:   docker://6d0e9cb190b31334dee5dba4877ace52d8afd5a9956d7c50eae35d3107722a58
    Image:          reg.cloud.com/nginx
    Image ID:       docker-pullable://reg.cloud.com/nginx@sha256:a4fb15454c43237dbc6592c4f8e0b50160ceb03e852a10c9895cf2a6d16c7fe2
    Port:           80/TCP
    Host Port:      0/TCP
    State:          Running
      Started:      Fri, 25 May 2018 11:58:29 +0900
    Ready:          True
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-85hdm (ro)
Conditions:
  Type           Status
  Initialized    True
  Ready          True
  PodScheduled   True
Volumes:
  default-token-85hdm:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-85hdm
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type    Reason                 Age   From                         Message
  ----    ------                 ----  ----                         -------
  Normal  SuccessfulMountVolume  1m    kubelet, host01-2.cloud.com  MountVolume.SetUp succeeded for volume "default-token-85hdm"
  Normal  Scheduled              1m    default-scheduler            Successfully assigned nginx-app-gnpsd to host01-2.cloud.com
  Normal  Pulling                1m    kubelet, host01-2.cloud.com  pulling image "reg.cloud.com/nginx"
  Normal  Pulled                 1m    kubelet, host01-2.cloud.com  Successfully pulled image "reg.cloud.com/nginx"
  Normal  Created                1m    kubelet, host01-2.cloud.com  Created container
  Normal  Started                1m    kubelet, host01-2.cloud.com  Started container
[root@host01-4 hk]# k get rc
NAME        DESIRED   CURRENT   READY     AGE
nginx-app   1         1         1         8m
[root@host01-4 hk]# k get po -o wide
NAME              READY     STATUS    RESTARTS   AGE       IP          NODE
nginx-app-gnpsd   1/1       Running   0          3m        10.44.0.1   host01-2.cloud.com
 
[root@host01-4 hk]# k scale rc nginx-app --replicas=3
replicationcontroller "nginx-app" scaled
[root@host01-4 hk]# k get po -o wide
NAME              READY     STATUS    RESTARTS   AGE       IP          NODE
nginx-app-gnpsd   1/1       Running   0          4m        10.44.0.1   host01-2.cloud.com
nginx-app-jfmkd   1/1       Running   0          10s       10.44.0.2   host01-2.cloud.com
nginx-app-ww6sn   1/1       Running   0          10s       10.36.0.1   host01-3.cloud.com
 
[root@host01-4 hk]# k scale rc nginx-app --replicas=0
replicationcontroller "nginx-app" scaled
[root@host01-4 hk]# k get po -o wide
NAME              READY     STATUS        RESTARTS   AGE       IP          NODE
nginx-app-gnpsd   0/1       Terminating   0          5m        10.44.0.1   host01-2.cloud.com
nginx-app-jfmkd   0/1       Terminating   0          34s       10.44.0.2   host01-2.cloud.com
[root@host01-4 hk]# k get po -o wide
No resources found.
[root@host01-4 hk]# rc 0
-bash: rc: command not found
[root@host01-4 hk]# k scale rc nginx-app --replicas=1
replicationcontroller "nginx-app" scaled
[root@host01-4 hk]# k get po -o wide
NAME              READY     STATUS    RESTARTS   AGE       IP          NODE
nginx-app-7qpbv   1/1       Running   0          4s        10.36.0.1   host01-3.cloud.com
[root@host01-4 hk]#
 
cs



*Yaml으로 pod 조회

1
2
[root@host01-4 hk]# k get po nginx-app-7qpbv -o yaml
 
cs


*Yaml 파일로 추출 후 pod 생성

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
apiVersion: v1
kind: Pod
metadata:
  labels:
    type: web
  name: nginx-hk-app
spec:
  containers:
  - image: reg.cloud.com/nginx
    name: nginx-app
    ports:                            #expose와 동일
    - containerPort: 80
      protocol: TCP
 
 
 
[root@host01-4 hk]# k get po nginx-app-7qpbv -o yaml > temp.yaml
[root@host01-4 hk]# vi temp.yaml
[root@host01-4 hk]# k create -f temp.yaml
pod "nginx-hk-app" created
[root@host01-4 hk]# k get po -o wide
NAME              READY     STATUS    RESTARTS   AGE       IP          NODE
nginx-app-7qpbv   1/1       Running   0          6m        10.36.0.1   host01-3.cloud.com
nginx-hk-app      1/1       Running   0          13s       10.44.0.1   host01-2.cloud.com
[root@host01-4 hk]#
 
cs



*Yaml 파일로 rc 생성 후 배포 :


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
[root@host01-4 hk]# k get rc nginx-app -o yaml > hk.yaml
[root@host01-4 hk]# vi hk.yaml
apiVersion: v1
kind: ReplicationController
metadata:
  labels:
    run: nginx-app
  name: nginx-app
spec:
  replicas: 1
  selector:
    type: test
  template:
    metadata:
      labels:
        type: test
    spec:
      containers:
      - image: reg.cloud.com/nginx
        name: nginx-app
        ports:
        - containerPort: 80
          protocol: TCP
 
 
[root@host01-4 hk]# k create -f hk.yaml
replicationcontroller "nginx-app2" created
[root@host01-4 hk]# k get rc
NAME         DESIRED   CURRENT   READY     AGE
nginx-app    1         1         1         24m
nginx-app2   1         1         1         9s
[root@host01-4 hk]# k get po -o wide
NAME               READY     STATUS    RESTARTS   AGE       IP          NODE
nginx-app-7qpbv    1/1       Running   0          14m       10.36.0.1   host01-3.cloud.com
nginx-app2-tgqqf   1/1       Running   0          16s       10.36.0.2   host01-3.cloud.com
nginx-hk-app       1/1       Running   0          7m        10.44.0.1   host01-2.cloud.com
[root@host01-4 hk]#
 
cs


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
yum -y install bash-completion
 
curl -sSL https://get.docker.com/ | sh
 
 
systemctl start docker
 
===cAdvisor=================================
docker run \
   --detach=true \
   --volume=/:/rootfs:ro \
   --volume=/var/run:/var/run:rw \
   --volume=/sys:/sys:ro \
   --volume=/var/lib/docker/:/var/lib/docker:ro \
   --publish=8080:8080 \
   --privileged=true \
   --name=cadvisor \
google/cadvisor:latest
 
docker service create \
   --mode global \
   --mount type=bind,source=/,destination=/rootfs,ro=1 \
   --mount type=bind,source=/var/run,destination=/var/run \
   --mount type=bind,source=/sys,destination=/sys,ro=1 \
   --mount type=bind,source=/var/lib/docker,destination=/var/lib/docker,ro=1 \
   --publish mode=host,target=8080,published=8080 \
   --name=cadvisor \
   google/cadvisor:latest
 
===Compose=========
curl -L https://github.com/docker/compose/releases/download/1.21.0/docker-compose-`uname -s`-`uname -m` -/usr/local/bin/docker-compose
 
chmod +/usr/local/bin/docker-compose
 
cs



1
2
3
4
5
6
{
"insecure-registries":["10.10.12.0/24"]
}
 
 
/var/dockers/daemon.json
cs



1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
version: '3.3'
 
services:
   db:
     image: reg.cloud.com/mysql:5.7
     volumes:
       - dbdata:/var/lib/mysql
     restart: always
     environment:
       MYSQL_ROOT_PASSWORD: somewordpress
       MYSQL_DATABASE: wordpress
       MYSQL_USER: wordpress
       MYSQL_PASSWORD: wordpress
 
   wordpress:
     depends_on:
       - db
     image: reg.cloud.com/wordpress:latest
     ports:
       - "8000:80"
     restart: always
     environment:
       WORDPRESS_DB_HOST: db:3306
       WORDPRESS_DB_USER: wordpress
       WORDPRESS_DB_PASSWORD: wordpress
volumes:
    dbdata:
cs





- Cluster = 가용성과 Load Balancing을 고려(어플리케이션은 어디로 던지는지 고려 안함, 죽으면, fail over되면 알아서 해결)


세팅정보를 저장할 DB가 있어야 함(Key:Value를 저장하는 nosql형태의 DB 사용, 여기서는 Discovery Service - ETCD를 사용한다)

NODE이중화는 최소 3 NODE

- 과반수 이상의 같은 Data의 경우 Recovery 가능

ETCD는 로컬볼륨으로 각 Node에 설치한다.

Master Node는 어떤 데이터도 갖지 않고 어플리케이션만 고려


Node간 통신을 위해서는 overlay 네트웍이 필요(VxLAN)

그리고 외부망 통신은 bridge로 한다!


SWARM의 장점은 세팅이 간단하다는 것

Kubernetes는 설치가 어렵다


*SWARM Scale in Scale out 예제 : 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
[root@host01-2 ~]#  docker swarm join \
>     --token SWMTKN-1-0qw1ki2xppg9rh6fhw310wi4dhczl54hl3uweydrfo2ld3aw2z-06n8xco8k8gpzwstwl5u2s0pr \
>     10.10.12.13:2377
Error response from daemon: This node is already part of a swarm. Use "docker swarm leave" to leave this swarm and join another one.
[root@host01-2 ~]# docker network ls
NETWORK ID          NAME                DRIVER              SCOPE
76d13a20d92c        bridge              bridge              local
2626920fe992        docker_gwbridge     bridge              local
a4bddc4df10b        host                host                local
y39kmqh237vy        ingress             overlay             swarm
1e75b30aa8ac        isolated_nw         bridge              local
e9ef483dde2d        none                null                local
5c585fb9c7f2        temp_default        bridge              local
efe418b38219        test                bridge              local
[root@host01-2 ~]# docker service ls
ID                  NAME                MODE                REPLICAS            IMAGE               PORTS
[root@host01-2 ~]# docker service create --replicas 2 --name hello reg.cloud.com/alpine ping docker.com
q86onfawwioyu50p9pfoh78om
overall progress: 2 out of 2 tasks
1/2: running   [==================================================>]
2/2: running   [==================================================>]
verify: Service converged
[root@host01-2 ~]# docker service ps hello
ID                  NAME                IMAGE                         NODE                 DESIRED STATE       CURRENT STATE           ERROR               PORTS
c5q5vvadwnj7        hello.1             reg.cloud.com/alpine:latest   host01-2.cloud.com   Running             Running 2 minutes ago
wiqej3wj0xfy        hello.2             reg.cloud.com/alpine:latest   host01-2.cloud.com   Running             Running 2 minutes ago
[root@host01-2 ~]# docker service scale hello=4
hello scaled to 4
overall progress: 4 out of 4 tasks
1/4: running   [==================================================>]
2/4: running   [==================================================>]
3/4: running   [==================================================>]
4/4: running   [==================================================>]
verify: Service converged
[root@host01-2 ~]# docker service ps hello
ID                  NAME                IMAGE                         NODE                 DESIRED STATE       CURRENT STATE            ERROR               PORTS
c5q5vvadwnj7        hello.1             reg.cloud.com/alpine:latest   host01-2.cloud.com   Running             Running 3 minutes ago
wiqej3wj0xfy        hello.2             reg.cloud.com/alpine:latest   host01-2.cloud.com   Running             Running 3 minutes ago
n02v6wp29911        hello.3             reg.cloud.com/alpine:latest   host01-2.cloud.com   Running             Running 27 seconds ago
yse5jakppzod        hello.4             reg.cloud.com/alpine:latest   host01-2.cloud.com   Running             Running 27 seconds ago
[root@host01-2 ~]# docker service scale hello=1
hello scaled to 1
overall progress: 1 out of 1 tasks
1/1: running   [==================================================>]
verify: Service converged
[root@host01-2 ~]#
 
cs


 






*ETCD(Discovery Service)에서 IP 호스트 정보를 모두 인지하고 기록한다.


보통 다른 호스트와 통신이 필요할 때, 물리 네트워크 구간을 통해야 한다. 너무 어려움. 물리망을 마치 투명하게 만들 수 있다

=> 물리 어뎁터 기준 밑으로 물리망 (Underlay), 윗쪽은(Overlay)

=> Container들이 bridge network와 연동되어 요청을 직접 받는 것도 가능

=> VXLAN은 터널링의 표준?

=> Source / Destination을 지정할 수 있음!


*포트 설정 :

- EXPOSE의 정보를 통해서 HOST가 알아서 Container의 PORT를 찾아가게 된다.

- 외부 Client에서는 LB로 접근

- LB는 여러개 HOST로 연결 ( Port는 찾아서 자동 구성)

방법은 3가지 : 
1) -p <host port>:<container port>

2) -p <container port>

3) 호스트 머신에 동적으로 할당된 포트를 EXPOSE나 --expose를 사용하여 노출된 모든 포트에 매핑한다 : -P  


Kubernetes = 묶어서 관리하는 요소(Orchestration 도구)

Docker = Application 레벨에서만 생각

 




*Docker CP 명령어 


1
2
[root@host01-2 ~]# docker cp keen_newton:/etc/docker/registry/config.yml .
 
cs



1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
[root@host01-2 ~]# docker run -dp 80:80 -v $(pwd)/config.yml:/config.yml registry:2.5 config.yml
d60aad9dcefbd9f75511d176c51e65b9fdc4740f665aa98afed3e2e370de79cc
[root@host01-2 ~]# docker ps
CONTAINER ID        IMAGE               COMMAND                  CREATED             STATUS              PORTS                          NAMES
d60aad9dcefb        registry:2.5        "/entrypoint.sh conf…"   10 seconds ago      Up 9 seconds        0.0.0.0:80->80/tcp, 5000/tcp   flamboyant_joliot
[root@host01-2 ~]# docker exec -it d60aad9dcefb sh
/ # ls
bin            etc            media          run            tmp
config.yml     home           mnt            sbin           usr
dev            lib            proc           srv            var
entrypoint.sh  linuxrc        root           sys
/ # vi config.yml
/ # exit
[root@host01-2 ~]#
 
cs


*Docker Compose : 

docker compose = 단일 호스트

docker swarm (클러스터 관리툴)= 멀티 호스트


compose와 swarm을 연동할 수 있게 됐다..


* 표준 : yaml을 통해서 host에 배포? CLI는 단순 호출


*Docker compose 설치 : 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
[root@host01-2 docker]# curl -L https://github.com/docker/compose/releases/download/1.21.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
100   617    0   617    0     0     93      0 --:--:--  0:00:06 --:--:--   150
100 10.3M  100 10.3M    0     0   374k      0  0:00:28  0:00:28 --:--:-- 1122k
[root@host01-2 docker]#
[root@host01-2 docker]# chmod +x /usr/local/bin/docker-compose
[root@host01-2 docker]# docker-compose version
docker-compose version 1.21.0, build 5920eb0
docker-py version: 3.2.1
CPython version: 3.6.5
OpenSSL version: OpenSSL 1.0.1t  3 May 2016
[root@host01-2 docker]#
 
cs


*YARM 파일 예시 : 

- XML과 유사, space로 구분, Tab은 안됨!!


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
version: '3.3'
 
services:
   db:
     image: reg.cloud.com/mysql:5.7
     volumes:  #가지고 뜨는 볼륨
       - dbdata:/var/lib/mysql
     restart: always
     environment:
       MYSQL_ROOT_PASSWORD: somewordpress
       MYSQL_DATABASE: wordpress
       MYSQL_USER: wordpress
       MYSQL_PASSWORD: wordpress
 
   wordpress:
     depends_on: #시작 함수를 해준다(db 함수가 먼저 실행된다)
       - db
     image: reg.cloud.com/wordpress:latest
     ports:
       - "8000:80" #포트 매핑 
     restart: always
     environment:
       WORDPRESS_DB_HOST: db:3306
       WORDPRESS_DB_USER: wordpress
       WORDPRESS_DB_PASSWORD: wordpress
volumes:
    dbdata: #볼륨명
cs


*yml 파일 실행으로 wordpress 설치(결국 yml 파일을 잘 구성하는것이 중요)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
[root@host01-2 temp]# docker volume ls
DRIVER              VOLUME NAME
local               2042e79725f9ebcc220a00ec036065afe47f6a313c7fbc37424dc06707506024
local               3731b5e0372bf0b19e572f7022041069ef60631b68035fb044241a38f6de04de
local               4a35da8fe0f8f7855c92230679c18089d8734a66754c8e8523ac486a55cc4d05
local               5aaa3c1d276e0a539b0928437df457197be510c035cbfa8d34a7b7c345060335
local               6a37dd2b5eda64b880db3e122f3985e45bd249c94030d2db0d158691a45901eb
local               7177f41f8225923f8cde394fcd529b72ab9719a35fc1bd9723e2a6f2ab707681
local               7267f43dfed496a69277236002ab423079b871400ff68596856517e57291e7b3
local               9a8f8faa0acf42207fe6d8e6b24598edb2a3561616bf4f9abcc27295773ffa76
local               data
local               f4f24f0b97c0e0e3d739141827867affd43edd21b3667de33e7527a888b43f12
[root@host01-2 temp]# docker rm -rf volume
unknown shorthand flag: 'r' in -rf
See 'docker rm --help'.
[root@host01-2 temp]# docker rm -f volume
Error: No such container: volume
[root@host01-2 temp]# docker rm -f volume 2042e79725f9ebcc220a00ec036065afe47f6a313c7fbc37424dc06707506024 3731b5e0372bf0b19e572f7022041069ef60631b68035fb044241a38f6de04de
Error: No such container: volume
Error: No such container: 2042e79725f9ebcc220a00ec036065afe47f6a313c7fbc37424dc06707506024
Error: No such container: 3731b5e0372bf0b19e572f7022041069ef60631b68035fb044241a38f6de04de
[root@host01-2 temp]#
[root@host01-2 temp]#
[root@host01-2 temp]#
[root@host01-2 temp]#
[root@host01-2 temp]# docker-compose -f docker-compose.yml ^C
[root@host01-2 temp]# clear
[root@host01-2 temp]# ls
docker-compose.yml
[root@host01-2 temp]# docker-compose up -d
Creating network "temp_default" with the default driver
Creating volume "temp_dbdata" with default driver
Pulling db (reg.cloud.com/mysql:5.7)...
5.7: Pulling from mysql
Digest: sha256:a0423a7d021b7a7775f1d2db1014bd15fde029f538c1f8d97c9832aa4a25209f
Status: Downloaded newer image for reg.cloud.com/mysql:5.7
Pulling wordpress (reg.cloud.com/wordpress:latest)...
latest: Pulling from wordpress
85b1f47fba49: Already exists
d8204bc92725: Pull complete
92fc16bb18e4: Pull complete
31098e61b2ae: Pull complete
f6ae64bfd33d: Pull complete
003c1818b354: Pull complete
a6fd4aeb32ad: Pull complete
a094df7cedc1: Pull complete
e3bf6fc1a51d: Pull complete
ad235c260360: Pull complete
edbf48bcbd7e: Pull complete
fd6ae81d5745: Pull complete
69838fd876d6: Pull complete
3186ebffd72d: Pull complete
b24a415ea2c0: Pull complete
225bda14ea90: Pull complete
fc0ad3550a92: Pull complete
0e4600933a8c: Pull complete
Digest: sha256:5b3b36db3c19d5b8c6ded6facec4daac57fe2ea1879351a2e65ac8919cea37ce
Status: Downloaded newer image for reg.cloud.com/wordpress:latest
Creating temp_db_1 ... done
Creating temp_wordpress_1 ... done
[root@host01-2 temp]# docker-comose ps
-bash: docker-comose: command not found
[root@host01-2 temp]# docker-compose ps
      Name                    Command               State          Ports
--------------------------------------------------------------------------------
temp_db_1          docker-entrypoint.sh mysqld      Up      3306/tcp
temp_wordpress_1   docker-entrypoint.sh apach ...   Up      0.0.0.0:8000->80/tcp
[root@host01-2 temp]# docker network ls
NETWORK ID          NAME                DRIVER              SCOPE
76d13a20d92c        bridge              bridge              local
a4bddc4df10b        host                host                local
1e75b30aa8ac        isolated_nw         bridge              local
e9ef483dde2d        none                null                local
02e52b7344b7        temp_default        bridge              local
efe418b38219        test                bridge              local
[root@host01-2 temp]# docker volum ls
docker: 'volum' is not a docker command.
See 'docker --help'
[root@host01-2 temp]# docker volume ls
DRIVER              VOLUME NAME
local               2042e79725f9ebcc220a00ec036065afe47f6a313c7fbc37424dc06707506024
local               3731b5e0372bf0b19e572f7022041069ef60631b68035fb044241a38f6de04de
local               4a35da8fe0f8f7855c92230679c18089d8734a66754c8e8523ac486a55cc4d05
local               5aaa3c1d276e0a539b0928437df457197be510c035cbfa8d34a7b7c345060335
local               67e959ee45f5221450510cbd82e314be628275fdfc34729002ed667c026e94b8
local               6a37dd2b5eda64b880db3e122f3985e45bd249c94030d2db0d158691a45901eb
local               7177f41f8225923f8cde394fcd529b72ab9719a35fc1bd9723e2a6f2ab707681
local               7267f43dfed496a69277236002ab423079b871400ff68596856517e57291e7b3
local               9a8f8faa0acf42207fe6d8e6b24598edb2a3561616bf4f9abcc27295773ffa76
local               data
local               f4f24f0b97c0e0e3d739141827867affd43edd21b3667de33e7527a888b43f12
local               temp_dbdata
[root@host01-2 temp]# ls /var/lib/docker/volumse/lab_dbdata/_data/-l
ls: cannot access /var/lib/docker/volumse/lab_dbdata/_data/-l: No such file or directory
[root@host01-2 temp]# ls /var/lib/docker/volumse/lab_dbdata/_data/ -l
ls: cannot access /var/lib/docker/volumse/lab_dbdata/_data/: No such file or directory
[root@host01-2 temp]# ls /var/lib/docker/volumes/temp_dbdata/_data/ -l
total 188488
-rw-r-----1 polkitd ssh_keys       56 May 24 17:19 auto.cnf
-rw-------1 polkitd ssh_keys     1675 May 24 17:19 ca-key.pem
-rw-r--r--1 polkitd ssh_keys     1107 May 24 17:19 ca.pem
-rw-r--r--1 polkitd ssh_keys     1107 May 24 17:19 client-cert.pem
-rw-------1 polkitd ssh_keys     1679 May 24 17:19 client-key.pem
-rw-r-----1 polkitd ssh_keys     1321 May 24 17:19 ib_buffer_pool
-rw-r-----1 polkitd ssh_keys 79691776 May 24 17:21 ibdata1
-rw-r-----1 polkitd ssh_keys 50331648 May 24 17:21 ib_logfile0
-rw-r-----1 polkitd ssh_keys 50331648 May 24 17:19 ib_logfile1
-rw-r-----1 polkitd ssh_keys 12582912 May 24 17:21 ibtmp1
drwxr-x---2 polkitd ssh_keys     4096 May 24 17:19 mysql
drwxr-x---2 polkitd ssh_keys     8192 May 24 17:19 performance_schema
-rw-------1 polkitd ssh_keys     1679 May 24 17:19 private_key.pem
-rw-r--r--1 polkitd ssh_keys      451 May 24 17:19 public_key.pem
-rw-r--r--1 polkitd ssh_keys     1107 May 24 17:19 server-cert.pem
-rw-------1 polkitd ssh_keys     1675 May 24 17:19 server-key.pem
drwxr-x---2 polkitd ssh_keys     8192 May 24 17:19 sys
drwxr-x---2 polkitd ssh_keys     4096 May 24 17:21 wordpress
[root@host01-2 temp]# docker-compose down
Stopping temp_wordpress_1 ... done
Stopping temp_db_1        ... done
Removing temp_wordpress_1 ... done
Removing temp_db_1        ... done
Removing network temp_default
[root@host01-2 temp]# docker-compose ps
Name   Command   State   Ports
------------------------------
[root@host01-2 temp]# docker volume ls
DRIVER              VOLUME NAME
local               2042e79725f9ebcc220a00ec036065afe47f6a313c7fbc37424dc06707506024
local               3731b5e0372bf0b19e572f7022041069ef60631b68035fb044241a38f6de04de
local               4a35da8fe0f8f7855c92230679c18089d8734a66754c8e8523ac486a55cc4d05
local               5aaa3c1d276e0a539b0928437df457197be510c035cbfa8d34a7b7c345060335
local               67e959ee45f5221450510cbd82e314be628275fdfc34729002ed667c026e94b8
local               6a37dd2b5eda64b880db3e122f3985e45bd249c94030d2db0d158691a45901eb
local               7177f41f8225923f8cde394fcd529b72ab9719a35fc1bd9723e2a6f2ab707681
local               7267f43dfed496a69277236002ab423079b871400ff68596856517e57291e7b3
local               9a8f8faa0acf42207fe6d8e6b24598edb2a3561616bf4f9abcc27295773ffa76
local               data
local               f4f24f0b97c0e0e3d739141827867affd43edd21b3667de33e7527a888b43f12
local               temp_dbdata
[root@host01-2 temp]# docker-compose up -d
Creating network "temp_default" with the default driver
Creating temp_db_1 ... done
Creating temp_wordpress_1 ... done
[root@host01-2 temp]#
 
cs






*Docker의 네트워크 모델: 

CNM을 사용한다: Container Network Model


SandboxID...

샌드박스는 독립적인 환경에 컨테이너의 네트워킹 configuration을 물고 있는 것이다


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
       "NetworkSettings": {
            "Bridge""",
            "SandboxID""91896e3968b1aedee2ba4275531b7ff1435f2cd3233a703c553990f65246cdeb",
            "HairpinMode"false,
            "LinkLocalIPv6Address""",
            "LinkLocalIPv6PrefixLen"0,
            "Ports": {},
            "SandboxKey""/var/run/docker/netns/91896e3968b1",
            "SecondaryIPAddresses": null,
            "SecondaryIPv6Addresses": null,
            "EndpointID""8bd83561b900d05dab2ed804b28ded759cf1e174858dad6e6c6d47f549e33d51",
            "Gateway""172.17.0.1",
            "GlobalIPv6Address""",
            "GlobalIPv6PrefixLen"0,
            "IPAddress""172.17.0.2",
            "IPPrefixLen"16,
            "IPv6Gateway""",
            "MacAddress""02:42:ac:11:00:02",
            "Networks": {
                "bridge": {
                    "IPAMConfig": null,
                    "Links": null,
                    "Aliases": null,
                    "NetworkID""76d13a20d92c8d68860705f57e8989055660ac78cafea4eaa033cb1fd856e6fa",
                    "EndpointID""8bd83561b900d05dab2ed804b28ded759cf1e174858dad6e6c6d47f549e33d51",
                    "Gateway""172.17.0.1",
                    "IPAddress""172.17.0.2",
                    "IPPrefixLen"16,
                    "IPv6Gateway""",
                    "GlobalIPv6Address""",
                    "GlobalIPv6PrefixLen"0,
                    "MacAddress""02:42:ac:11:00:02",
                    "DriverOpts": null
                }
            }
        }
    }
]
[root@host01-2 _data]# ^C
[root@host01-2 _data]# docker network ls
NETWORK ID          NAME                DRIVER              SCOPE
76d13a20d92c        bridge              bridge              local
a4bddc4df10b        host                host                local
e9ef483dde2d        none                null                local
[root@host01-2 _data]# docker attach 76d13a20d92c
Error: No such container: 76d13a20d92c
[root@host01-2 _data]# docker ps
CONTAINER ID        IMAGE                   COMMAND             CREATED             STATUS              PORTS               NAMES
e3343a6dd1be        reg.cloud.com/busybox   "sh"                2 minutes ago       Up 2 minutes                            focused_albattani
[root@host01-2 _data]# docker attach e3343a6dd1be
/ # ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
150: eth0@if151: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue
    link/ether 02:42:ac:11:00:02 brd ff:ff:ff:ff:ff:ff
    inet 172.17.0.2/16 brd 172.17.255.255 scope global eth0
       valid_lft forever preferred_lft forever
/ #
 
cs


서로 다른 L2의 인스턴스들이 서로 통신하려면...

eth0를 가지고 별도 컨테이너가 2개의 컨테이너의 bridge 역할을 한다.


*docker의 기본 네트워크


docker0를 통해 통신한다. 아래는 bridge 구성 : 


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
[root@host01-2 _data]# docker network ls
NETWORK ID          NAME                DRIVER              SCOPE
76d13a20d92c        bridge              bridge              local
a4bddc4df10b        host                host                local
e9ef483dde2d        none                null                local
 
[root@host01-2 _data]# docker inspect bridge
[
    {
        "Name""bridge",
        "Id""76d13a20d92c8d68860705f57e8989055660ac78cafea4eaa033cb1fd856e6fa",
        "Created""2018-05-21T15:11:08.970105946+09:00",
        "Scope""local",
        "Driver""bridge",
        "EnableIPv6"false,
        "IPAM": {
            "Driver""default",
            "Options": null,
            "Config": [
                {
                    "Subnet""172.17.0.0/16"#bridge의 
                    "Gateway""172.17.0.1"
                }
            ]
        },
        "Internal"false,
        "Attachable"false,
        "Ingress"false,
        "ConfigFrom": {
            "Network"""
        },
        "ConfigOnly"false,
        "Containers": {},
        "Options": {
            "com.docker.network.bridge.default_bridge""true",
            "com.docker.network.bridge.enable_icc""true"#같은 bridge 내에서 통신하게 할 것인가? 이 부분이 false라면 컨테이너간 통신이 안된다
            "com.docker.network.bridge.enable_ip_masquerade""true"#masquerade는 일종의 NAT역할을 한다.
            "com.docker.network.bridge.host_binding_ipv4""0.0.0.0",
            "com.docker.network.bridge.name""docker0"#docker0 가 bridge 역할
            "com.docker.network.driver.mtu""1500"
        },
        "Labels": {}
    }
]
[root@host01-2 _data]#
 
cs


- 기본적으로 도커의 네트워크는 HOST에서 iptables 룰을 타고 나간다.

=> 네트워크 지연이 거의 없다...베어메탈과 거의 동급

=> 반면 보안쪽으로 민감한 사항들도 있다. 


- 컨테이너간 통신(C1 : 80, C2: 81, C3: 82 일 경우 서로 통신은 어떻게 하나?) :
=> localhost로 통신하면 된다.


- 도커는 결국 하나다(bridge라는 driver) :

1
2
3
4
5
6
7
[root@host01-2 _data]# docker network ls
NETWORK ID          NAME                DRIVER              SCOPE
76d13a20d92c        bridge              bridge              local  #bridge  
a4bddc4df10b        host                host                local  #호스트와 네트워크를 공유
e9ef483dde2d        none                null                local  #네트워크 X
 
 
cs






* 멀티호스트 네트워킹(VxLAN 사용) / 단일호스트 네트워킹


- VXLAN

- SDN

- NFV


- Service Channing

보안을 고려한 분산 네트워크 시, 

L2로 구성되어있는데 IPS가 1개만 있다면, 결국 IPS가 없는 인스턴스에서 다른 하나의 인스턴스까지 접근해야 하는 아키텍처를 구성해야 한다.

IAAS > PAAS 



1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
[root@host01-2 _data]# docker inspect bridge
[
    {
        "Name""bridge",
        "Id""76d13a20d92c8d68860705f57e8989055660ac78cafea4eaa033cb1fd856e6fa",
        "Created""2018-05-21T15:11:08.970105946+09:00",
        "Scope""local",
        "Driver""bridge",
        "EnableIPv6"false,
        "IPAM": {
            "Driver""default",
            "Options": null,
            "Config": [
                {
                    "Subnet""172.17.0.0/16",
                    "Gateway""172.17.0.1"
                }
            ]
        },
        "Internal"false,
        "Attachable"false,
        "Ingress"false,
        "ConfigFrom": {
            "Network"""
        },
        "ConfigOnly"false,
        "Containers": {},  #컨테이너가 없다
        "Options": {
            "com.docker.network.bridge.default_bridge""true",
            "com.docker.network.bridge.enable_icc""true",
            "com.docker.network.bridge.enable_ip_masquerade""true",
            "com.docker.network.bridge.host_binding_ipv4""0.0.0.0",
            "com.docker.network.bridge.name""docker0",
            "com.docker.network.driver.mtu""1500"
        },
        "Labels": {}
    }
]
[root@host01-2 _data]# docker inspect bridge
[
    {
        "Name""bridge",
        "Id""76d13a20d92c8d68860705f57e8989055660ac78cafea4eaa033cb1fd856e6fa"                                                                                                                      ,
        "Created""2018-05-21T15:11:08.970105946+09:00",
        "Scope""local",
        "Driver""bridge",
        "EnableIPv6"false,
        "IPAM": {
            "Driver""default",
            "Options": null,
            "Config": [
                {
                    "Subnet""172.17.0.0/16",
                    "Gateway""172.17.0.1"
                }
            ]
        },
        "Internal"false,
        "Attachable"false,
        "Ingress"false,
        "ConfigFrom": {
            "Network"""
        },
        "ConfigOnly"false,
        "Containers": {
            "8c4add8d8d16d6b7d9eb247cee125e012a803c61f5b6592c8819dd30b926fbab":                                                                                                                       {
                "Name""c1",
                "EndpointID""dfda49ba957d32e371c012c3952f01aaff83e2c3c9668fbae                                                                                                                      07cb53595ef02d5",
                "MacAddress""02:42:ac:11:00:02",
                "IPv4Address""172.17.0.2/16",
                "IPv6Address"""
            }
        },
        "Options": {
            "com.docker.network.bridge.default_bridge""true",
            "com.docker.network.bridge.enable_icc""true",
            "com.docker.network.bridge.enable_ip_masquerade""true",
            "com.docker.network.bridge.host_binding_ipv4""0.0.0.0",
            "com.docker.network.bridge.name""docker0",
            "com.docker.network.driver.mtu""1500"
        },
        "Labels": {}
    }
]
[root@host01-2 _data]# docker inspect bridge
[
    {
        "Name""bridge",
        "Id""76d13a20d92c8d68860705f57e8989055660ac78cafea4eaa033cb1fd856e6fa",
        "Created""2018-05-21T15:11:08.970105946+09:00",
        "Scope""local",
        "Driver""bridge",
        "EnableIPv6"false,
        "IPAM": {
            "Driver""default",
            "Options": null,
            "Config": [
                {
                    "Subnet""172.17.0.0/16",
                    "Gateway""172.17.0.1"
                }
            ]
        },
        "Internal"false,
        "Attachable"false,
        "Ingress"false,
        "ConfigFrom": {
            "Network"""
        },
        "ConfigOnly"false,
        "Containers": {
            "703292574cb0d7730b1fe601acf826a99ac2530b056624ae2ad808e1e90db2f1": {
                "Name""c2",
                "EndpointID""43da37e1fef531cf525de9dae8801889e819108f27113bed9b09bda6e82cb95e",
                "MacAddress""02:42:ac:11:00:03",
                "IPv4Address""172.17.0.3/16",
                "IPv6Address"""
            },
            "8c4add8d8d16d6b7d9eb247cee125e012a803c61f5b6592c8819dd30b926fbab": {
                "Name""c1",
                "EndpointID""dfda49ba957d32e371c012c3952f01aaff83e2c3c9668fbae07cb53595ef02d5",
                "MacAddress""02:42:ac:11:00:02",
                "IPv4Address""172.17.0.2/16",
                "IPv6Address"""
            }
        },
        "Options": {
            "com.docker.network.bridge.default_bridge""true",
            "com.docker.network.bridge.enable_icc""true",
            "com.docker.network.bridge.enable_ip_masquerade""true",
            "com.docker.network.bridge.host_binding_ipv4""0.0.0.0",
            "com.docker.network.bridge.name""docker0",
            "com.docker.network.driver.mtu""1500"
        },
        "Labels": {}
    }
]
[root@host01-2 _data]# docker attach c1
/ # ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
152: eth0@if153: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue
    link/ether 02:42:ac:11:00:02 brd ff:ff:ff:ff:ff:ff
    inet 172.17.0.2/16 brd 172.17.255.255 scope global eth0
       valid_lft forever preferred_lft forever
/ # exit
d[root@host01-2 _data]# docker ps a
"docker ps" accepts no arguments.
See 'docker ps --help'.
 
Usage:  docker ps [OPTIONS] [flags]
 
List containers
[root@host01-2 _data]# docker ps -aa
CONTAINER ID        IMAGE                   COMMAND             CREATED             STATUS                     PORTS               NAMES
703292574cb0        reg.cloud.com/busybox   "sh"                29 seconds ago      Up 27 seconds                                  c2
8c4add8d8d16        reg.cloud.com/busybox   "sh"                50 seconds ago      Exited (05 seconds ago                       c1
[root@host01-2 _data]# docker ps -a
CONTAINER ID        IMAGE                   COMMAND             CREATED             STATUS                     PORTS               NAMES
703292574cb0        reg.cloud.com/busybox   "sh"                31 seconds ago      Up 29 seconds                                  c2
8c4add8d8d16        reg.cloud.com/busybox   "sh"                52 seconds ago      Exited (07 seconds ago                       c1
[root@host01-2 _data]# network inspect
-bash: network: command not found
[root@host01-2 _data]# docker network inspect
"docker network inspect" requires at least 1 argument.
See 'docker network inspect --help'.
 
Usage:  docker network inspect [OPTIONS] NETWORK [NETWORK...] [flags]
 
Display detailed information on one or more networks
[root@host01-2 _data]# docker inspect bridge
[
    {
        "Name""bridge",
        "Id""76d13a20d92c8d68860705f57e8989055660ac78cafea4eaa033cb1fd856e6fa",
        "Created""2018-05-21T15:11:08.970105946+09:00",
        "Scope""local",
        "Driver""bridge",
        "EnableIPv6"false,
        "IPAM": {
            "Driver""default",
            "Options": null,
            "Config": [
                {
                    "Subnet""172.17.0.0/16",
                    "Gateway""172.17.0.1"
                }
            ]
        },
        "Internal"false,
        "Attachable"false,
        "Ingress"false,
        "ConfigFrom": {
            "Network"""
        },
        "ConfigOnly"false,
        "Containers": {
            "703292574cb0d7730b1fe601acf826a99ac2530b056624ae2ad808e1e90db2f1": {
                "Name""c2",  #c1은 사라지고 c2만 
                "EndpointID""43da37e1fef531cf525de9dae8801889e819108f27113bed9b09bda6e82cb95e",
                "MacAddress""02:42:ac:11:00:03",
                "IPv4Address""172.17.0.3/16",
                "IPv6Address"""
            }
        },
        "Options": {
            "com.docker.network.bridge.default_bridge""true",
            "com.docker.network.bridge.enable_icc""true",
            "com.docker.network.bridge.enable_ip_masquerade""true",
            "com.docker.network.bridge.host_binding_ipv4""0.0.0.0",
            "com.docker.network.bridge.name""docker0",
            "com.docker.network.driver.mtu""1500"
        },
        "Labels": {}
    }
]
[root@host01-2 _data]#
root@host01-2 _data]# docker ps  -a
CONTAINER ID        IMAGE                   COMMAND             CREATED             STATUS                     PORTS               NAMES
703292574cb0        reg.cloud.com/busybox   "sh"                3 minutes ago       Up 3 minutes                                   c2
8c4add8d8d16        reg.cloud.com/busybox   "sh"                3 minutes ago       Exited (02 minutes ago                       c1
[root@host01-2 _data]# docker attach c2
/ # cat /etc/resolv.conf
# Generated by NetworkManager
search cloud.com
nameserver 10.10.12.1
 
 
cs


*기본적으로 host 네트워크를 공유함으로 통신이 docker0를 통해 이루어진다. User Defined Network를 사용하기 위해서는...DNS설정 필요...사용자 정의 네트워크를 만들어 띄울 수 있다,


* 사용자 정의 네트워크를 만들어 띄우기 

Container name이 DNS로 올라간다


*사용자 정의 네트워크를 통해 C3, C4 컨테이너를 띄워보자(기존 host network와 다른 네트워크) :

(C3에서 C4로 ping 실행 시 dns에서 ip를 알려준다.)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
[root@host01-2 _data]# docker network ls
NETWORK ID          NAME                DRIVER              SCOPE
76d13a20d92c        bridge              bridge              local
a4bddc4df10b        host                host                local
e9ef483dde2d        none                null                local
[root@host01-2 _data]# docker network create test
efe418b38219518e5a7b4d09902dd5c4772c9a4ce9a715a842b60d6848165825
 
[root@host01-2 _data]# docker network ls
NETWORK ID          NAME                DRIVER              SCOPE
76d13a20d92c        bridge              bridge              local
a4bddc4df10b        host                host                local
e9ef483dde2d        none                null                local
efe418b38219        test                bridge              local
[root@host01-2 _data]# docker run --name c3 --network=test --itd reg.cloud.com/busybox
unknown flag: --itd
See 'docker run --help'.
[root@host01-2 _data]# docker run --name c3 --network=test -itd reg.cloud.com/busybox
28674f6d5ac97c95694b4342a76e34120c872c9914881d7fb7a8d9860f587fe8
[root@host01-2 _data]# docker run --name c4 --network=test -itd reg.cloud.com/busybox
48d67f53f81a5c61a1358e02576a1000d8a8d1395df4dfb13e0675408d90d714
[root@host01-2 _data]# docker attach c3
/ # ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
157: eth0@if158: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue
    link/ether 02:42:ac:12:00:02 brd ff:ff:ff:ff:ff:ff
    inet 172.18.0.2/16 brd 172.18.255.255 scope global eth0
       valid_lft forever preferred_lft forever
/ # hostname
28674f6d5ac9
/ # ping c4
PING c4 (172.18.0.3): 56 data bytes
64 bytes from 172.18.0.3: seq=0 ttl=64 time=0.257 ms
64 bytes from 172.18.0.3: seq=1 ttl=64 time=0.178 ms
64 bytes from 172.18.0.3: seq=2 ttl=64 time=0.177 ms
64 bytes from 172.18.0.3: seq=3 ttl=64 time=0.181 ms
64 bytes from 172.18.0.3: seq=4 ttl=64 time=0.180 ms
64 bytes from 172.18.0.3: seq=5 ttl=64 time=0.178 ms
64 bytes from 172.18.0.3: seq=6 ttl=64 time=0.178 ms
64 bytes from 172.18.0.3: seq=7 ttl=64 time=0.206 ms
 
cs







*docker0와 별도의 사용자 정의 네트워크 구성
isolated_nw라는 사용자정의 네트워크를 새로 만들고
C3, C4, C5 컨테이너를 구성한다.


C4를 Web이라는 이름으로 조회

C5를 DB라는 이름으로 조회


*Alias 할당 방법 (link로 구성) - db, web이라는 이름으로도 ping 가능

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
[root@host01-2 ~]# docker run --network=isolated_nw -itd --name c5 --link c4:web  reg.cloud.com/b^Cybox
[root@host01-2 ~]# docker rm -f $(docker ps -aq)
9891fb7e3af2
ec900985c74c
1df32b36382b
33c48eda73cc
079bb7c7c709
[root@host01-2 ~]# docker run --network=isolated_nw -itd --name c5 --link c4:web  reg.cloud.com/busybox
0a0c051a84839085601e938e3843015263b1867cf87db9985051cb20d2aca433
[root@host01-2 ~]# docker run --network=isolated_nw -itd --name c4 --link c5:db  reg.cloud.com/busybox
6c07e34435a360d3142f0955a06a64197775902095ea54ff9009c6107593c98d
[root@host01-2 ~]# docker attach c4
/ # ping c5
PING c5 (172.25.0.2): 56 data bytes
64 bytes from 172.25.0.2: seq=0 ttl=64 time=0.287 ms
64 bytes from 172.25.0.2: seq=1 ttl=64 time=0.180 ms
^C
--- c5 ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 0.180/0.233/0.287 ms
/ # ping db
PING db (172.25.0.2): 56 data bytes
64 bytes from 172.25.0.2: seq=0 ttl=64 time=0.159 ms
64 bytes from 172.25.0.2: seq=1 ttl=64 time=0.183 ms
^C
--- db ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 0.159/0.171/0.183 ms
/ # ^C
/ #
 
cs



*Alias 할당 방법 (network_alias로 구성) - alias로 호출하면(c6, c7 round robin방식으로 라턴)


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
[root@host01-2 ~]# docker run -itd --network=isolated_nw --name c6 --network-alias app reg.cloud.com/busybox
117113d2b352a4a7253914d1b1178cf92e6ef6aa49bf194f25f81fe8ccf5452a
^[[A[root@host01-2 ~]# docker run -itd --network=isolated_nw --name c7 --network-alias app reg.cloud.com/busybox
23b79ec545f71d9a448829fbdd1c30ee1a1110d07742a067c7475544d6987abe
[root@host01-2 ~]# docker run --network=isolated_nw -it --name c8 reg.cloud.com/busybox
/ # ping c6
PING c6 (172.25.0.4): 56 data bytes
64 bytes from 172.25.0.4: seq=0 ttl=64 time=0.261 ms
64 bytes from 172.25.0.4: seq=1 ttl=64 time=0.176 ms
^C
--- c6 ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 0.176/0.218/0.261 ms
/ # ping c7
PING c7 (172.25.0.5): 56 data bytes
64 bytes from 172.25.0.5: seq=0 ttl=64 time=0.265 ms
64 bytes from 172.25.0.5: seq=1 ttl=64 time=0.179 ms
^C
--- c7 ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 0.179/0.222/0.265 ms
/ # ping app
PING app (172.25.0.4): 56 data bytes
64 bytes from 172.25.0.4: seq=0 ttl=64 time=0.189 ms
64 bytes from 172.25.0.4: seq=1 ttl=64 time=0.177 ms
^C
--- app ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 0.177/0.183/0.189 ms
/ #
 
 
cs






* LINK 개념(old) - 지금은 사용 안함(Why? 싱글 호스트만 지원가능) => 이제는 User Defined Network를 사용한다


C1(DB) <------C2(web) 


DNS서버를 조회하기 전에 C2서버에서 먼저 조회하는 파일이 /etc/hosts 파일임!!

C2으로부터 expose의 정보가 모두 c2로 넘어간다?


* link 사용법(env로 MYSQL_ROOT_PASSWORD를 포함한 env 정보가 넘어간다)


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
[root@host01-2 ~]# docker run --name db -e MYSQL_ROOT_PASSWORD=1234 -d reg.cloud.com/mysql
c718db3a5bed23a0c8f4bba81b2500e9350b3813364cf2780c86b27ac1754732
[root@host01-2 ~]# docker logs db
Initializing database
2018-05-24T05:50:32.189359Z 0 [Warning] TIMESTAMP with implicit DEFAULT value is deprecated. Please use --explicit_defaults_for_timestamp server option (see documentation for more details).
2018-05-24T05:50:34.743553Z 0 [Warning] InnoDB: New log files created, LSN=45790
2018-05-24T05:50:35.111689Z 0 [Warning] InnoDB: Creating foreign key constraint system tables.
2018-05-24T05:50:35.216022Z 0 [Warning] No existing UUID has been found, so we assume that this is the first time that this server has been started. Generating a new UUID: 60c3bc8a-5f16-11e8-a17f-0242ac110002.
2018-05-24T05:50:35.234417Z 0 [Warning] Gtid table is not ready to be used. Table 'mysql.gtid_executed' cannot be opened.
2018-05-24T05:50:35.235689Z 1 [Warning] root@localhost is created with an empty password ! Please consider switching off the --initialize-insecure option.
2018-05-24T05:50:39.434990Z 1 [Warning] 'user' entry 'root@localhost' ignored in --skip-name-resolve mode.
2018-05-24T05:50:39.435049Z 1 [Warning] 'user' entry 'mysql.session@localhost' ignored in --skip-name-resolve mode.
2018-05-24T05:50:39.435078Z 1 [Warning] 'user' entry 'mysql.sys@localhost' ignored in --skip-name-resolve mode.
2018-05-24T05:50:39.435139Z 1 [Warning] 'db' entry 'performance_schema mysql.session@localhost' ignored in --skip-name-resolve mode.
2018-05-24T05:50:39.435158Z 1 [Warning] 'db' entry 'sys mysql.sys@localhost' ignored in --skip-name-resolve mode.
2018-05-24T05:50:39.435198Z 1 [Warning] 'proxies_priv' entry '@ root@localhost' ignored in --skip-name-resolve mode.
2018-05-24T05:50:39.435294Z 1 [Warning] 'tables_priv' entry 'user mysql.session@localhost' ignored in --skip-name-resolve mode.
2018-05-24T05:50:39.435326Z 1 [Warning] 'tables_priv' entry 'sys_config mysql.sys@localhost' ignored in --skip-name-resolve mode.
[root@host01-2 ~]# docker ps
CONTAINER ID        IMAGE                   COMMAND                  CREATED             STATUS              PORTS               NAMES
c718db3a5bed        reg.cloud.com/mysql     "docker-entrypoint.s…"   22 seconds ago      Up 21 seconds       3306/tcp            db
23b79ec545f7        reg.cloud.com/busybox   "sh"                     22 minutes ago      Up 22 minutes                           c7
117113d2b352        reg.cloud.com/busybox   "sh"                     22 minutes ago      Up 22 minutes                           c6
6c07e34435a3        reg.cloud.com/busybox   "sh"                     25 minutes ago      Up 25 minutes                           c4
0a0c051a8483        reg.cloud.com/busybox   "sh"                     25 minutes ago      Up 25 minutes                           c5
[root@host01-2 ~]# docker exec db env
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
HOSTNAME=c718db3a5bed
MYSQL_ROOT_PASSWORD=1234
GOSU_VERSION=1.7
MYSQL_MAJOR=5.7
MYSQL_VERSION=5.7.20-1debian8
HOME=/root
[root@host01-2 ~]# docker run -it --link db:sql reg.cloud.com/mysql bash
root@e0de0246a2a3:/# cat /etc/resolv.conf
# Generated by NetworkManager
search cloud.com
nameserver 10.10.12.1
root@e0de0246a2a3:/# cat /etc/hosts
127.0.0.1       localhost
::1     localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
172.17.0.2      sql c718db3a5bed db
172.17.0.3      e0de0246a2a3
root@e0de0246a2a3:/# ping sql
PING sql (172.17.0.2): 56 data bytes
64 bytes from 172.17.0.2: icmp_seq=0 ttl=64 time=0.328 ms
64 bytes from 172.17.0.2: icmp_seq=1 ttl=64 time=0.202 ms
^C--- sql ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max/stddev = 0.202/0.265/0.328/0.063 ms
root@e0de0246a2a3:/# env
HOSTNAME=e0de0246a2a3
TERM=xterm
MYSQL_VERSION=5.7.20-1debian8
SQL_ENV_MYSQL_VERSION=5.7.20-1debian8
SQL_PORT_3306_TCP=tcp://172.17.0.2:3306
SQL_NAME=/brave_panini/sql
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
SQL_PORT_3306_TCP_ADDR=172.17.0.2
SQL_ENV_MYSQL_MAJOR=5.7
PWD=/
SQL_PORT_3306_TCP_PORT=3306
SQL_ENV_MYSQL_ROOT_PASSWORD=1234
HOME=/root
SHLVL=1
SQL_PORT_3306_TCP_PROTO=tcp
MYSQL_MAJOR=5.7
GOSU_VERSION=1.7
SQL_ENV_GOSU_VERSION=1.7
SQL_PORT=tcp://172.17.0.2:3306
_=/usr/bin/env
root@e0de0246a2a3:/#
 
cs






+ Recent posts