vagrant@k-control:~$ kubectl create -f ldh-cj.yml
cronjob.batch/ldh-cj created
vagrant@k-control:~$ kubectl get cronjobs.batch
NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE
ldh-cj */2 * * * * False 1 60s 5m41s
vagrant@k-control:~$ kubectl get pods --watch
NAME READY STATUS RESTARTS AGE
h 0/1 ImagePullBackOff 0 17m
ldh-cj-1625807880-9jmkk 0/1 Completed 0 5m24s
ldh-cj-1625808000-rpmhh 0/1 Completed 0 3m23s
ldh-cj-1625808120-swvdl 0/1 Completed 0 93s
ldh-rs-2dkt8 1/1 Running 0 73m
ldh-rs-52kqm 1/1 Running 0 73m
ldh-rs-9m5k4 1/1 Running 0 66m
ldh-rs-f4sqr 1/1 Running 0 73m
ldh-rs-vjp5n 1/1 Running 0 66m
^Cvagrant@k-control:~$ kubectl get cronjobs.batch
NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE
ldh-cj */2 * * * * False 0 110s 6m31s
vagrant@k-control:~$ kubectl delete -f ldh-cj.yml
cronjob.batch "ldh-cj" deleted
vagrant@k-control:~$ cat ldh.cj.yml
cat: ldh.cj.yml: No such file or directory
vagrant@k-control:~$ cat ldh-cj.yml
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: ldh-cj
spec:
schedule: "*/2 * * * *"
jobTemplate:
spec:
template:
metadata:
labels:
app: ldh-cj
spec:
restartPolicy: OnFailure
containers:
- name: ldh
image: busybox
command: ["sleep", "60"]
vagrant@k-control:~$ kubectl delete teplicasets.apps,service --all
error: the server doesn't have a resource type "teplicasets"
vagrant@k-control:~$ kubectl delete replicasets.apps,service --all
replicaset.apps "ldh-rs" deleted
service "kubernetes" deleted
service "ldh-svc" deleted
vagrant@k-control:~$ vi ldh-svc-np.yml
vagrant@k-control:~$ cat ldh-svc-np.yml
apiVersion: v1
kind: Service
metadata:
name: ldh-svc-np
spec:
type: NodePort
ports:
- port: 80
targetPort: 8080
selector:
app: ldh-rs
vagrant@k-control:~$ kubectl create -f ldh-svc-np.yml
service/ldh-svc-np created
vagrant@k-control:~$ kubectl get np
error: the server doesn't have a resource type "np"
vagrant@k-control:~$ kubectl get ep
NAME ENDPOINTS AGE
kubernetes 192.168.200.50:6443 48m
ldh-svc-np <none> 30s
vagrant@k-control:~$ kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 48m
ldh-svc-np NodePort 10.99.90.246 <none> 80:31521/TCP 56s
vagrant@k-control:~$ sudo ss -tnlp
State Recv-Q Send-Q Local Address:Port Peer Address:Port Process
LISTEN 0 4096 127.0.0.53%lo:53 0.0.0.0:* users:(("systemd-resolve",pid=577,fd=13))
LISTEN 0 128 0.0.0.0:22 0.0.0.0:* users:(("sshd",pid=682,fd=3))
LISTEN 0 4096 127.0.0.1:45529 0.0.0.0:* users:(("kubelet",pid=626,fd=14))
LISTEN 0 4096 0.0.0.0:31521 0.0.0.0:* users:(("kube-proxy",pid=3110,fd=10))
LISTEN 0 4096 127.0.0.1:10248 0.0.0.0:* users:(("kubelet",pid=626,fd=28))
LISTEN 0 4096 127.0.0.1:10249 0.0.0.0:* users:(("kube-proxy",pid=3110,fd=19))
LISTEN 0 4096 127.0.0.1:9099 0.0.0.0:* users:(("calico-node",pid=3826,fd=8))
LISTEN 0 4096 192.168.200.50:2379 0.0.0.0:* users:(("etcd",pid=2706,fd=6))
LISTEN 0 4096 127.0.0.1:2379 0.0.0.0:* users:(("etcd",pid=2706,fd=5))
LISTEN 0 4096 192.168.200.50:2380 0.0.0.0:* users:(("etcd",pid=2706,fd=3))
LISTEN 0 4096 127.0.0.1:2381 0.0.0.0:* users:(("etcd",pid=2706,fd=11))
LISTEN 0 4096 127.0.0.1:10257 0.0.0.0:* users:(("kube-controller",pid=2686,fd=7))
LISTEN 0 8 0.0.0.0:179 0.0.0.0:* users:(("bird",pid=3978,fd=7))
LISTEN 0 4096 127.0.0.1:10259 0.0.0.0:* users:(("kube-scheduler",pid=2661,fd=7))
LISTEN 0 128 [::]:22 [::]:* users:(("sshd",pid=682,fd=4))
LISTEN 0 4096 *:10250 *:* users:(("kubelet",pid=626,fd=26))
LISTEN 0 4096 *:6443 *:* users:(("kube-apiserver",pid=2690,fd=7))
LISTEN 0 4096 *:10256 *:* users:(("kube-proxy",pid=3110,fd=20))
vagrant@k-control:~$ kubectl run ldh -it --image=ghcr.io/c1t1d0s7/network-multitool --rm bash
If you don't see a command prompt, try pressing enter.
Error attaching, falling back to logs: unable to upgrade connection: pod does not exist
pod "ldh" deleted
Error from server (NotFound): the server could not find the requested resource ( pods/log ldh)
vagrant@k-control:~$ kubectl run dh -it --image=ghcr.io/c1t1d0s7/network-multitool --rm bash
If you don't see a command prompt, try pressing enter.
bash-5.1# curl 10.99.90.246
curl: (7) Failed to connect to 10.99.90.246 port 80: Connection refused
bash-5.1# curl 10.99.90.246
curl: (7) Failed to connect to 10.99.90.246 port 80: Connection refused
bash-5.1# curl 10.99.90.246:8080
^C
bash-5.1# exit
exit
Session ended, resume using 'kubectl attach dh -c dh -i -t' command when the pod is running
pod "dh" deleted
vagrant@k-control:~$ kubectl get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k-control Ready master 2d22h v1.19.12 192.168.200.50 <none> Ubuntu 20.04.2 LTS 5.4.0-77-generic docker://20.10.7
k-node1 Ready <none> 2d22h v1.19.12 192.168.200.51 <none> Ubuntu 20.04.2 LTS 5.4.0-77-generic docker://20.10.7
k-node2 Ready <none> 2d22h v1.19.12 192.168.200.52 <none> Ubuntu 20.04.2 LTS 5.4.0-77-generic docker://20.10.7
k-node3 Ready <none> 29h v1.19.12 10.0.2.15 <none> Ubuntu 20.04.2 LTS 5.4.0-77-generic docker://20.10.7
vagrant@k-control:~$ vi ldh-svc-np.yml
vagrant@k-control:~$ kubectl create -f ldh-rs.yml
replicaset.apps/ldh-rs created
vagrant@k-control:~$ kubectl get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k-control Ready master 2d22h v1.19.12 192.168.200.50 <none> Ubuntu 20.04.2 LTS 5.4.0-77-generic docker://20.10.7
k-node1 Ready <none> 2d22h v1.19.12 192.168.200.51 <none> Ubuntu 20.04.2 LTS 5.4.0-77-generic docker://20.10.7
k-node2 Ready <none> 2d22h v1.19.12 192.168.200.52 <none> Ubuntu 20.04.2 LTS 5.4.0-77-generic docker://20.10.7
k-node3 Ready <none> 29h v1.19.12 10.0.2.15 <none> Ubuntu 20.04.2 LTS 5.4.0-77-generic docker://20.10.7
vagrant@k-control:~$ curl http://192.168.200.50
curl: (7) Failed to connect to 192.168.200.50 port 80: Connection refused
vagrant@k-control:~$ curl http://192.168.200.50:31521
Hello World!
ldh-rs-wvwx5
vagrant@k-control:~$ kubectl get replicasets.apps
NAME DESIRED CURRENT READY AGE
ldh-rs 3 3 3 3m33s
vagrant@k-control:~$ kubectl get pods
NAME READY STATUS RESTARTS AGE
h 0/1 ImagePullBackOff 0 90m
ldh-rs-5pbt4 1/1 Running 0 3m43s
ldh-rs-gxjs6 1/1 Running 0 3m44s
ldh-rs-wvwx5 1/1 Running 0 3m43s
vagrant@k-control:~$ kubectl run dh -it --image=ghcr.io/c1t1d0s7/network-multitool --rm bash
If you don't see a command prompt, try pressing enter.
bash-5.1# curl 10.99.90.246
Hello World!
ldh-rs-wvwx5
bash-5.1# exit
exit
Session ended, resume using 'kubectl attach dh -c dh -i -t' command when the pod is running
pod "dh" deleted
vagrant@k-control:~$ kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.10.2/manifests/namespace.yaml
namespace/metallb-system created
vagrant@k-control:~$ kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.10.2/manifests/metallb.yaml
podsecuritypolicy.policy/controller created
podsecuritypolicy.policy/speaker created
serviceaccount/controller created
serviceaccount/speaker created
clusterrole.rbac.authorization.k8s.io/metallb-system:controller created
clusterrole.rbac.authorization.k8s.io/metallb-system:speaker created
role.rbac.authorization.k8s.io/config-watcher created
role.rbac.authorization.k8s.io/pod-lister created
role.rbac.authorization.k8s.io/controller created
clusterrolebinding.rbac.authorization.k8s.io/metallb-system:controller created
clusterrolebinding.rbac.authorization.k8s.io/metallb-system:speaker created
rolebinding.rbac.authorization.k8s.io/config-watcher created
rolebinding.rbac.authorization.k8s.io/pod-lister created
rolebinding.rbac.authorization.k8s.io/controller created
daemonset.apps/speaker created
deployment.apps/controller created
vagrant@k-control:~$ kubectl get ns
NAME STATUS AGE
default Active 2d22h
kube-node-lease Active 2d22h
kube-public Active 2d22h
kube-system Active 2d22h
ldh Active 26h
metallb-system Active 32s
vagrant@k-control:~$ kubectl get all -n metallb-system
NAME READY STATUS RESTARTS AGE
pod/controller-6b78bff7d9-p5k97 1/1 Running 0 2m2s
pod/speaker-bfb8q 1/1 Running 0 2m2s
pod/speaker-p2xcz 1/1 Running 0 2m2s
pod/speaker-qtdww 1/1 Running 0 2m2s
pod/speaker-t84rj 1/1 Running 0 2m1s
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
daemonset.apps/speaker 4 4 4 4 4 kubernetes.io/os=linux 2m2s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/controller 1/1 1 1 2m2s
NAME DESIRED CURRENT READY AGE
replicaset.apps/controller-6b78bff7d9 1 1 1 2m2s
vagrant@k-control:~$ mkdir addon
vagrant@k-control:~$ cd addon/
vagrant@k-control:~/addon$ mkdir metallb
vagrant@k-control:~/addon$ cd metallb/
vagrant@k-control:~/addon/metallb$ vi configmap.yaml
vagrant@k-control:~/addon/metallb$ cat configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
namespace: metallb-system
name: config
data:
config: |
address-pools:
- name: default
protocol: layer2
addresses:
- 192.168.200.200-192.168.200.210
vagrant@k-control:~/addon/metallb$ cd
vagrant@k-control:~$ kubectl create -f ldh-svc-lb.yml
error: the path "ldh-svc-lb.yml" does not exist
vagrant@k-control:~$ kubectl create -f ldh-svc-lb.yaml
error: the path "ldh-svc-lb.yaml" does not exist
vagrant@k-control:~$ history
vagrant@k-control:~$ vi ldh-svc-lb.yml
vagrant@k-control:~$ kubectl create -f ldh-svc-lb.yaml
error: the path "ldh-svc-lb.yaml" does not exist
vagrant@k-control:~$ kubectl create -f ldh-svc-lb.yml
service/ldh-svc-lb created
vagrant@k-control:~$ kubectl get svc,ep
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 108m
service/ldh-svc-lb LoadBalancer 10.99.132.5 <pending> 80:32268/TCP 24s
service/ldh-svc-np NodePort 10.99.90.246 <none> 80:31521/TCP 60m
NAME ENDPOINTS AGE
endpoints/kubernetes 192.168.200.50:6443 108m
endpoints/ldh-svc-lb 192.168.108.22:8080,192.168.169.96:8080,192.168.82.154:8080 24s
endpoints/ldh-svc-np 192.168.108.22:8080,192.168.169.96:8080,192.168.82.154:8080 60m
vagrant@k-control:~$ kubectl get svc,ep
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 108m
service/ldh-svc-lb LoadBalancer 10.99.132.5 <pending> 80:32268/TCP 47s
service/ldh-svc-np NodePort 10.99.90.246 <none> 80:31521/TCP 60m
NAME ENDPOINTS AGE
endpoints/kubernetes 192.168.200.50:6443 108m
endpoints/ldh-svc-lb 192.168.108.22:8080,192.168.169.96:8080,192.168.82.154:8080 47s
endpoints/ldh-svc-np 192.168.108.22:8080,192.168.169.96:8080,192.168.82.154:8080 60m
vagrant@k-control:~$ cd ~/addon/metallb/
vagrant@k-control:~/addon/metallb$ kubectl create -f configmap.yaml
configmap/config created
vagrant@k-control:~/addon/metallb$ cd~
Command 'cd~' not found, did you mean:
command 'cdw' from deb cdw (0.8.1-1build4)
command 'cdp' from deb irpas (0.10-7)
command 'cde' from deb cde (0.1+git9-g551e54d-1.1build1)
command 'cdi' from deb cdo (1.9.9~rc1-1)
command 'cd5' from deb cd5 (0.1-4)
command 'cdo' from deb cdo (1.9.9~rc1-1)
command 'cdb' from deb tinycdb (0.78build1)
Try: apt install <deb name>
vagrant@k-control:~/addon/metallb$ cd ~
vagrant@k-control:~$ kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 109m
ldh-svc-lb LoadBalancer 10.99.132.5 192.168.200.200 80:32268/TCP 98s
ldh-svc-np NodePort 10.99.90.246 <none> 80:31521/TCP 61m
vagrant@k-control:~$ cat ldh-svc-lb.yml
apiVersion: v1
kind: Service
metadata:
name: ldh-svc-lb
spec:
type: LoadBalancer
ports:
- port: 80
targetPort: 8080
selector:
app: ldh-rs
vagrant@k-control:~$
vagrant@k-node1:~$ exit
logout
Connection to 127.0.0.1 closed.
(base) student@cccr:~/vagrant/k8s$ curl 192.168.200.50:31521
Hello World!
ldh-rs-gxjs6
(base) student@cccr:~/vagrant/k8s$ curl 192.168.200.51:31521
Hello World!
ldh-rs-5pbt4
(base) student@cccr:~/vagrant/k8s$ curl 192.168.200.52:31521
Hello World!
ldh-rs-5pbt4
(base) student@cccr:~/vagrant/k8s$ curl 192.168.200.200
Hello World!
ldh-rs-gxjs6
'클라우드 > 쿠버네티스(Kubernetes)' 카테고리의 다른 글
쿠버네티스 Kubernetes 스토리지 볼륨 2 / emptyDir 볼륨 (0) | 2021.07.12 |
---|---|
쿠버네티스 Kubernetes 스토리지 볼륨 1 (0) | 2021.07.12 |
쿠버네티스 Kubernetes 크론잡 (CronJob) (1) | 2021.07.09 |
쿠버네티스 Kubernetes 잡 (Job) (0) | 2021.07.09 |
쿠버네티스 Kubernetes 데몬셋 (DaemonSet) (0) | 2021.07.09 |