zone "host.com" IN { type master; file "host.com.zone"; #正向解析(名称自定义) allow-update { 192.168.108.128; }; }; zone "od.com" IN { type master; file "od.com.zone"; #正向解析(名称自定义) allow-update { 192.168.108.128; }; };
@ IN SOA dns.host.com. dnsadmin.host.com. ( 2021070601; serial # 版本号 10800; refresh # 刷新时间 900; retry # 重试时间 604800; expire # 过期时间 86400; minimum # 否定答案缓存时间 )
NS dns.host.com. $TTL 60
dns A 192.168.108.128 ceshi-128 A 192.168.108.128 ceshi-129 A 192.168.108.129 ceshi-130 A 192.168.108.130 ceshi-131 A 192.168.108.131 ceshi-132 A 192.168.108.132
主机域配置文件/var/namd/od.com.zone
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
$ORIGIN od.com. $TTL 600
@ IN SOA dns.od.com. dnsadmin.od.com. ( 2021070601; serial 10800; refresh 900; retry 604800; expire 86400; minimum )
安装报错: prepare base dir is set to /usr/local/harbor-v2.2.1 Error happened in config validation... ERROR:root:Error: The protocol is https but attribute ssl_cert is not set
解决方法: 注释ssl部分 重新install.sh
基于docker-compose启动harbor
1 2 3
[root@ceshi-132 harbor]# pwd /usr/local/harbor [root@ceshi-132 harbor]# docker-compose up -d
安装nginx
下载安装
安装节点(132)
1 2 3 4 5 6 7
安装依赖: [root@ceshi-132 harbor]#yum install gcc-c++ pcre pcre-devel zlib zlib-devel openssl openssl-devel -y [root@ceshi-132 harbor]# wget http://tengine.taobao.org/download/tengine-2.2.0.tar.gz [root@ceshi-132 harbor]# tar xf tengine-2.2.0.tar.gz -C /usr/local [root@ceshi-132 harbor]# cd /usr/local/tengine-2.2.0/ [root@ceshi-132 harbor]# ./configure [root@ceshi-132 harbor]# make && make install
[root@ceshi-130 bin]# mkdir certs [root@ceshi-130 bin]# cd certs/ [root@ceshi-130 certs]# scp root@192.168.108.132:/opt/certs/apiserver-key.pem . [root@ceshi-130 certs]# scp root@192.168.108.132:/opt/certs/apiserver.pem . [root@ceshi-130 certs]# scp root@192.168.108.132:/opt/certs/ca.pem . [root@ceshi-130 certs]# scp root@192.168.108.132:/opt/certs/ca-key.pem . [root@ceshi-130 certs]# scp root@192.168.108.132:/opt/certs/client.pem . [root@ceshi-130 certs]# scp root@192.168.108.132:/opt/certs/client-key.pem . [root@ceshi-130 certs]# ll -rw------- 1 root root 1679 Jul 19 10:34 apiserver-key.pem -rw-r--r-- 1 root root 1631 Jul 19 10:34 apiserver.pem -rw------- 1 root root 1675 Jul 19 10:34 ca-key.pem -rw-r--r-- 1 root root 1342 Jul 19 10:34 ca.pem -rw------- 1 root root 1679 Jul 19 10:35 client-key.pem -rw-r--r-- 1 root root 1363 Jul 19 10:35 client.pem [root@ceshi-130 bin]# mkdir conf 配置kube-apiserver审计日志记录和采集 [root@ceshi-130 conf]# vi audit.yaml apiVersion: audit.k8s.io/v1beta1 # This is required. kind: Policy # Don't generate audit events for all requests in RequestReceived stage. omitStages: - "RequestReceived" rules: # Log pod changes at RequestResponse level - level: RequestResponse resources: - group: "" # Resource "pods" doesn't match requests to any subresource of pods, # which is consistent with the RBAC policy. resources: ["pods"] # Log "pods/log", "pods/status" at Metadata level - level: Metadata resources: - group: "" resources: ["pods/log", "pods/status"]
# Don't log requests to a configmap called "controller-leader" - level: None resources: - group: "" resources: ["configmaps"] resourceNames: ["controller-leader"]
# Don't log watch requests by the "system:kube-proxy" on endpoints or services - level: None users: ["system:kube-proxy"] verbs: ["watch"] resources: - group: ""# core API group resources: ["endpoints", "services"]
# Log the request body of configmap changes in kube-system. - level: Request resources: - group: ""# core API group resources: ["configmaps"] # This rule only applies to resources in the "kube-system" namespace. # The empty string "" can be used to select non-namespaced resources. namespaces: ["kube-system"]
# Log configmap and secret changes in all other namespaces at the Metadata level. - level: Metadata resources: - group: ""# core API group resources: ["secrets", "configmaps"]
# Log all other resources in core and extensions at the Request level. - level: Request resources: - group: ""# core API group - group: "extensions"# Version of group should NOT be included.
# A catch-all rule to log all other requests at the Metadata level. - level: Metadata # Long-running requests like watches that fall under this rule will not # generate an audit event in RequestReceived. omitStages: - "RequestReceived"
[program:kube-apiserver-7-22] command=/opt/kubernetes/server/bin/kube-apiserver.sh ; the program (relative uses PATH, can take args) numprocs=1 ; number of processes copies to start (def 1) directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd) autostart=true ; start at supervisord start (default: true) autorestart=true ; retstart at unexpected quit (default: true) startsecs=30 ; number of secs prog must stay running (def. 1) startretries=3 ; max # of serial start failures (default 3) exitcodes=0,2 ; 'expected'exit codes for process (default 0,2) stopsignal=QUIT ; signal used to kill process (default TERM) stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10) user=root ; setuid to this UNIX account to run the program redirect_stderr=true ; redirect proc stderr to stdout (default false) stdout_logfile=/data/logs/kubernetes/kube-apiserver/apiserver.stdout.log ; stderr log path, NONE for none; default AUTO stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB) stdout_logfile_backups=4 ; # of stdout logfile backups (default 10) stdout_capture_maxbytes=1MB ; number of bytes in'capturemode' (default 0) stdout_events_enabled=false ; emit events on stdout writes (default false)
[root@ceshi-131 supervisord.d]# supervisorctl update kube-apiserver-7-22: added process group [root@ceshi-131 bin]# supervisorctl status etcd-server RUNNING pid 2405, uptime 2 days, 19:47:56 kube-apiserver-7-22 RUNNING pid 3545, uptime 0:03:32
[root@ceshi-128 ~]# yum install gcc-c++ pcre pcre-devel zlib zlib-devel openssl openssl-devel -y [root@ceshi-128 ~]# tar -xf tengine-2.3.3.tar.gz -C /usr/local/ [root@ceshi-128 ~]# cd /usr/local/tengine-2.3.3/ [root@ceshi-128 tengine-2.3.3]# ./configure --with-stream [root@ceshi-128 tengine-2.3.3]# make && make install [root@ceshi-128 conf]# cat /usr/local/nginx/conf/nginx.conf 最后添加4层配置文件,不能放置http模块之内,因为http属于7层模型 stream { #4层反代 upstream kube-apiserver { # 后台负载地址 server 192.168.108.130:6443 max_fails=3 fail_timeout=30s; server 192.168.108.131:6443 max_fails=3 fail_timeout=30s; } server { listen 7443; #监听本地端口 proxy_connect_timeout 2s; proxy_timeout 900s; proxy_pass kube-apiserver;
}
} [root@ceshi-128 conf]# ../sbin/nginx -t nginx: the configuration file /usr/local/nginx/conf/nginx.conf syntax is ok nginx: configuration file /usr/local/nginx/conf/nginx.conf test is successful [root@ceshi-128 logs]# systemctl enable nginx [root@ceshi-128 logs]# systemctl status nginx
[root@ceshi-130 ~]# vi /etc/supervisord.d/kube-controller-manager.ini # 不同节点替换kube-controller-manager-7-21 [program:kube-controller-manager-7-21] command=/opt/kubernetes/server/bin/kube-controller-manager.sh ; the program (relative uses PATH, can take args) numprocs=1 ; number of processes copies to start (def 1) directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd) autostart=true ; start at supervisord start (default: true) autorestart=true ; retstart at unexpected quit (default: true) startsecs=30 ; number of secs prog must stay running (def. 1) startretries=3 ; max # of serial start failures (default 3) exitcodes=0,2 ; 'expected'exit codes for process (default 0,2) stopsignal=QUIT ; signal used to kill process (default TERM) stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10) user=root ; setuid to this UNIX account to run the program redirect_stderr=true ; redirect proc stderr to stdout (default false) stdout_logfile=/data/logs/kubernetes/kube-controller-manager/controller.stdout.log ; stderr log path, NONE for none; default AUTO stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB) stdout_logfile_backups=4 ; # of stdout logfile backups (default 10) stdout_capture_maxbytes=1MB ; number of bytes in'capturemode' (default 0) stdout_events_enabled=false ; emit events on stdout writes (default false) [root@ceshi-130 ~]# supervisorctl update kube-controller-manager-7-21: added process group
[root@ceshi-130 ~]# vi /etc/supervisord.d/kube-scheduler.ini # 不同节点替换kube-controller-manager-7-21 [program:kube-scheduler-7-21] command=/opt/kubernetes/server/bin/kube-scheduler.sh ; the program (relative uses PATH, can take args) numprocs=1 ; number of processes copies to start (def 1) directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd) autostart=true ; start at supervisord start (default: true) autorestart=true ; retstart at unexpected quit (default: true) startsecs=30 ; number of secs prog must stay running (def. 1) startretries=3 ; max # of serial start failures (default 3) exitcodes=0,2 ; 'expected'exit codes for process (default 0,2) stopsignal=QUIT ; signal used to kill process (default TERM) stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10) user=root ; setuid to this UNIX account to run the program redirect_stderr=true ; redirect proc stderr to stdout (default false) stdout_logfile=/data/logs/kubernetes/kube-scheduler/scheduler.stdout.log ; stderr log path, NONE for none; default AUTO stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB) stdout_logfile_backups=4 ; # of stdout logfile backups (default 10) stdout_capture_maxbytes=1MB ; number of bytes in'capturemode' (default 0) stdout_events_enabled=false ; emit events on stdout writes (default false) [root@ceshi-130 ~]# supervisorctl update kube-scheduler-7-21: added process group [root@ceshi-130 ~]# supervisorctl status etcd-server RUNNING pid 1640, uptime 0:02:55 kube-apiserver-7-21 RUNNING pid 1644, uptime 0:02:55 kube-controller-manager-7-21 RUNNING pid 1631, uptime 0:02:55 kube-scheduler-7-21 RUNNING pid 1641, uptime 0:02:55 [root@ceshi-130 ~]# ln -s /opt/kubernetes/server/bin/kubectl /usr/bin/kubectl 查看组件信息 [root@ceshi-130 ~]# kubectl get cs NAME STATUS MESSAGE ERROR scheduler Healthy ok etcd-0 Healthy {"health": "true"} etcd-1 Healthy {"health": "true"} controller-manager Healthy ok etcd-2 Healthy {"health": "true"}
[root@ceshi-130 ~]# kubectl create -f nginx-ceshi.yaml daemonset.extensions/nginx-ds created [root@ceshi-130 ~]# kubectl get pods NAME READY STATUS RESTARTS AGE nginx-ds-25fff 1/1 Running 0 2m7s nginx-ds-fcj25 1/1 Running 0 2m7s [root@ceshi-130 ~]# kubectl get pods -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx-ceshi-g6f7f 1/1 Running 1 14h 172.7.200.3 ceshi-131.host.com <none> <none> nginx-ceshi-pdp5q 1/1 Running 1 14h 172.7.200.2 ceshi-130.host.com <none> <none> nginx-ds-25fff 1/1 Running 1 15h 172.7.200.1 ceshi-131.host.com <none> <none> nginx-ds-fcj25 1/1 Running 1 15h 172.7.200.3 ceshi-130.host.com <none> <none> [root@ceshi-130 ~]# curl 172.7.200.3 <!DOCTYPE html> <html> <head> <title>Welcome to nginx!</title> <style> body { width: 35em; margin: 0 auto; font-family: Tahoma, Verdana, Arial, sans-serif; } </style> </head> <body> <h1>Welcome to nginx!</h1> <p>If you see this page, the nginx web server is successfully installed and working. Further configuration is required.</p>
<p>For online documentation and support please refer to <a href="http://nginx.org/">nginx.org</a>.<br/> Commercial support is available at <a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p> </body> </html>