티스토리 뷰

  ms ( master ) wk1 ( worker 1 ) wk2 ( worker 2 )
IP 192.168.98.10 192.168.98.20 192.168.98.30
CPU 2 1 1
RAM 8G 4G 4G
OS generic/centos8 generic/centos8 generic/centos8
virtualbox virtualbox

 

 

▶ Vagrantfile

>> 반복문으로 처리했다

# -*- mode: ruby -*-
# vi: set ft=ruby :
# vagrant files for k8s cluster(one master node, two worker nodes)
# edited by lee

VAGRANTFILE_API_VERSION = "2"

k8s_cluster = {
	"wk1.example.com" => { :ip => "192.168.98.20", :cpus => 1, :memory => 4096 },
	"wk2.example.com" => { :ip => "192.168.98.30", :cpus => 1, :memory => 4096 },
	"ms.example.com" => { :ip => "192.168.98.10", :cpus => 2, :memory => 8192 },
}
 
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|

  k8s_cluster.each do |hostname, info|

    config.vm.define hostname do |cfg|
      cfg.vm.provider "virtualbox" do |vb,override|
        config.vm.box = "generic/centos8"
        override.vm.network "private_network", ip: "#{info[:ip]}"
        override.vm.host_name = hostname
        vb.name = hostname
				vb.gui = false
        vb.customize ["modifyvm", :id, "--memory", info[:memory], "--cpus", info[:cpus]]
				if "#{hostname}" == "ms.example.com" then
					override.vm.provision "shell", path: "ssh_conf.sh", privileged: true
					override.vm.provision "shell", path: "install_cluster.sh", privileged: true
					override.vm.provision "shell", path: "run_in_master.sh", privileged: true
					override.vm.provision "shell", path: "account.sh", privileged: false
					override.vm.provision "shell", path: "send_pub_key.sh", privileged: false
				else
					override.vm.provision "shell", path: "ssh_conf.sh", privileged: true
					override.vm.provision "shell", path: "install_cluster.sh", privileged: true
				end  
      end  
    end  
  end  
end

 

 

▶install_cluster.sh

### bash script for install kubernetes cluster

#Set the time zone to local time and set the exact time
timedatectl set-timezone Asia/Seoul

yum install -y yum-utils
modprobe overlay
modprobe br_netfilter
yum install -y iproute-tc

cat <<EOF | tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF

cat <<EOF | tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
# apply the 99-kubernetes-cri.conf file immediately
sysctl --system

# disable firewall
systemctl disable firewalld
systemctl stop firewalld

yum-config-manager \
    --add-repo \
    https://download.docker.com/linux/centos/docker-ce.repo

yum install -y containerd.io

mkdir -p /etc/containerd
containerd config default | tee /etc/containerd/config.toml
sed -i 's/SystemdCgroup = false/SystemdCgroup = true/g' /etc/containerd/config.toml

systemctl restart containerd
systemctl enable containerd

# disable selinux
setenforce 0
sed -i 's/^SELINUX=enforcing/SELINUX=permissive/' /etc/selinux/config

cat <<EOF | tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-\$basearch
enabled=1
gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
exclude=kubelet kubeadm kubectl
EOF
yum install -y kubelet kubeadm kubectl \
--disableexcludes=kubernetes

systemctl enable --now kubelet

# disable swap space
swapoff -a
sed -e '/swap/s/^/#/' -i /etc/fstab

# add cluster nodes in the hosts file
tee /etc/hosts <<EOF
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
:1         localhost localhost.localdomain localhost6 localhost6.localdomain6
EOF

echo "192.168.98.10   ms.example.com      ms" >> /etc/hosts
echo "192.168.98.20   wk1.example.com     wk1" >> /etc/hosts
echo "192.168.98.30   wk2.example.com     wk2" >> /etc/hosts

 

 

▶ account.sh

#! /bin/bash

# configuration for authorization to use kubecli command
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown vagrant:vagrant  /home/vagrant/.kube/config
echo "source <(kubectl completion bash)" >> ~/.bashrc

# .vimrc - vim editor's config file for yaml syntax
echo "autocmd FileType yaml setlocal ts=2 sts=2 sw=2 expandtab autoindent" > /home/vagrant/.vimrc

# .nanorc - nano editor's configuration file for yaml syntax
tee /home/vagrant/.nanorc <<EOF
# nano editor config for yaml syntax highliting
syntax "default"
color white,black ".*"
## Keys
color magenta "^\s*[\$A-Za-z0-9_-]+\:"
color brightmagenta "^\s*@[\$A-Za-z0-9_-]+\:"

# Values
color white ":\s.+$"
# Booleans
icolor brightcyan " (y|yes|n|no|true|false|on|off)$"
## Numbers
color brightred " [[:digit:]]+(\.[[:digit:]]+)?"
## Arrays
color red "\[" "\]" ":\s+[|>]" "^\s*- "
## Reserved
color green "(^| )!!(binary|bool|float|int|map|null|omap|seq|set|str) "

## Comments
color brightwhite "#.*$"

## Errors
color ,red ":\w.+$"
color ,red ":'.+$"
color ,red ":".+$"
color ,red "\s+$"

## Non closed quote
color ,red "['\"][^['\"]]*$"

## Closed quotes
color yellow "['\"].*['\"]"

## Equal sign
color brightgreen ":( |$)"
# tab size
set tabsize 2
set tabstospaces
EOF

 

 

▶ run_in_master.sh

#! /bin/bash
# This script is only needed in master node
pod_network="10.244.0.0/16"
apiserver_network=$(hostname -i)
# configure pod network and for save token for cluster join
kubeadm init --pod-network-cidr=$pod_network --apiserver-advertise-address=$apiserver_network | tee /home/vagrant/kubeadm_init_output
grep -A 2 'kubeadm join' /home/vagrant/kubeadm_init_output > /home/vagrant/token

if [ $? -ne 0 ]
then
	echo "kubeadm init failed"
	echo "fix the errors and retry"
	exit
fi

# environment variable for using kubectl command
export KUBECONFIG=/etc/kubernetes/admin.conf

# download the CNI flannel file if it is not in the current directory
[ -f kube-flannel.yml ] || \
	wget https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml

# add eth1 interface card to the CNI flannel yaml file
#(This setting is neccssary if the first nic of the host is a nat type)
sed -e "/kube-subnet-mgr/a\        - --iface=eth1" kube-flannel.yml > modified-kube-flannel.yml
kubectl apply -f ./modified-kube-flannel.yml

if [ $? -ne 0 ]
then
	echo "CNI flannel installation failed"
	echo "fix the errors and retry"
	exit
fi

 

 

 

▶ send_pub_key.sh

#! /bin/bash
# transfer ssh public key and token to worker nodes
user=vagrant
password=vagrant
######

# array variable declaration
host_ip=$(hostname -i)
declare -a worker_nodes  
worker_nodes=($(grep -v ${host_ip} /etc/hosts | awk '!/localhost/{print $1}'))
ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa <<<y > /dev/null
sudo sed -i '/StrictHostKeyChecking/c StrictHostKeyChecking no' /etc/ssh/ssh_config

# if not installed package sshpass and then install package sshpass
#rpm -qi sshpass > /dev/null 2>&1 || sudo yum -y install sshpass
echo "install sshpass"
rpm -qi sshpass > /dev/null 2>&1 || \
sudo rpm -ivh http://vault.centos.org/8.5.2111/cloud/x86_64/openstack-ussuri/Packages/s/sshpass-1.06-8.el8.x86_64.rpm

for i in ${worker_nodes[*]}
do
        sshpass -p $password ssh-copy-id -i -f ${user}@${i} > /dev/null
	scp /home/vagrant/token ${user}@${i}:/home/vagrant/
	ssh ${user}@${i} 'chmod u+x /home/vagrant/token'
	ssh ${user}@${i} 'sudo /home/vagrant/token > /dev/null 2>&1' && join="success"
	if [ "$join" == "success" ]
	then	
		echo "$i cluser join successful"
	else
		echo "$i cluster join failed"
	fi
done

 

 

▶ ssh_conf.sh

#/bin/bash
# allow ssh login with password
time=$(date "+%Y%m%d.%H%M%S")
# backup before overwriting
sudo cp /etc/ssh/sshd_config /etc/ssh/sshd_config_$time.backup
sudo sed -i -e 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config
sudo systemctl restart sshd

 

공지사항
최근에 올라온 글
최근에 달린 댓글
Total
Today
Yesterday
링크
TAG
more
«   2024/09   »
1 2 3 4 5 6 7
8 9 10 11 12 13 14
15 16 17 18 19 20 21
22 23 24 25 26 27 28
29 30
글 보관함