k8s 1.29 keepalived nginx 3master 3node

1.k8s安装部署规划

ip地址主机名角色配置
10.0.17.101keeplived01Keepalived & HAproxy2核1G,10GB
10.0.17.102keeplived01Keepalived & HAproxy2核1G,10GB
10.0.17.201k8s-master01master主节点,etcd2核2G,10GB
10.0.17.202k8s-master02master主节点,etcd2核2G,10GB
10.0.17.203k8s-master03master主节点,etcd2核2G,10GB
10.0.17.211k8s-node01worker node工作节点2核2G,10GB
10.0.17.212k8s-node02worker node工作节点2核2G,10GB
10.0.17.213k8s-node03worker node工作节点2核2G,10GB
Virtal IP 10.0.17.200

2.安装虚拟机模板安装RockyLinux 9.5并更换源

2.1 更换阿里源

sed -e 's|^mirrorlist=|#mirrorlist=|g' \
    -e 's|^#baseurl=http://dl.rockylinux.org/$contentdir|baseurl=https://mirrors.aliyun.com/rockylinux|g' \
    -i.bak \
    /etc/yum.repos.d/[Rr]ocky*.repo
dnf makecache

2.2 防火墙修改 firewalld 为 iptables

systemctl stop firewalld
systemctl disable firewalld

yum -y install iptables-services
systemctl start iptables
iptables -F
systemctl enable iptables
service iptables save

2.3 禁用 Selinux

setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
grubby --update-kernel ALL --args selinux=0
# 查看是否禁用,grubby --info DEFAULT
# 回滚内核层禁用操作,grubby --update-kernel ALL --remove-args selinux

2.4 设置时区 配置ntp

timedatectl set-timezone Asia/Shanghai

dnf install -y chrony

vi /etc/chrony.conf
server ntp1.aliyun.com iburst
server ntp2.aliyun.com iburst
chronyc sources -n

2.5 关闭 swap 分区

swapoff -a
sed -i 's:/dev/mapper/rl-swap:#/dev/mapper/rl-swap:g' /etc/fstab

2.6 安装 ipvs

echo 'net.ipv4.ip_forward=1' >> /etc/sysctl.conf
sysctl -p

2.7 加载 bridge

yum install -y epel-release
yum install -y bridge-utils

modprobe br_netfilter
echo 'br_netfilter' >> /etc/modules-load.d/bridge.conf
echo 'net.bridge.bridge-nf-call-iptables=1' >> /etc/sysctl.conf
echo 'net.bridge.bridge-nf-call-ip6tables=1' >> /etc/sysctl.conf
sysctl -p

2.8 安装docker

2.8.1 添加 docker-ce yum 源 阿里

sudo dnf config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
cd /etc/yum.repos.d

2.8.2 切换docker阿里源安装

sed -e 's|download.docker.com|mirrors.aliyun.com/docker-ce|g' docker-ce.repo

2.8.3 安装docker

yum -y install docker-ce

2.8.4 重启docker服务 设置开机启动

systemctl daemon-reload && systemctl restart docker && systemctl enable docker

reboot

2.9 安装cri-docker

kubelet调用容器使用ocir标准
需要安装cri-docker

‌cri-dockerd是一个容器运行时接口(CRI)的实现,用于Kubernetes与Docker容器引擎进行交互。‌ 它允许Kubernetes管理和调度Docker容器,是Kubernetes CRI的一个实现,可以与Docker Engine一起使用,将Docker Engine作为容器运行时与Kubernetes集群集成。

cri-dockerd的工作原理是通过实现CRI接口,一头通过CRI跟Kubelet交互,另一头跟Docker API交互,从而间接实现了Kubernetes以Docker作为容器运行时。这种架构使得Kubernetes能够使用Docker作为容器引擎,同时符合CRI标准。

2.9.1 下载cri-docker

# 下载解压安装cri-docker
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.9/cri-dockerd-0.3.9.amd64.tgz
tar -xf cri-dockerd-0.3.9.amd64.tgz
cp cri-dockerd/cri-dockerd /usr/bin/
chmod +x /usr/bin/cri-dockerd

2.9.2 配置cri-docker服务

cat <<"EOF" > /usr/lib/systemd/system/cri-docker.service
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket
[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.8
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target
EOF


# ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.8
# 启动命令
# ExecStart=/usr/bin/cri-dockerd 可执行程序
# --network-plugin=cni 传递当前网络插件 基于cni实现 容器网络接口
# --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.8 pod初始化容器下载器地址及版本

2.9.3 添加cir-docker套接字

cat <<"EOF" > /usr/lib/systemd/system/cri-docker.socket
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service
[Socket]
ListenStream=%t/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
[Install]
WantedBy=sockets.target
EOF

2.9.4 启动cri-docker对应服务

systemctl daemon-reload
systemctl enable cri-docker
systemctl start cri-docker
systemctl is-active cri-docker

2.10 安装k8s

2.10.1 添加k8s yum源

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://pkgs.k8s.io/core:/stable:/v1.29/rpm/
enabled=1
gpgcheck=1
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.29/rpm/repodata/repomd.xml.key
exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
EOF

# 下载对应软件包 放入同一文件夹 
  conntrack-tools-1.4.7-2.el9.x86_64                                     
  cri-tools-1.29.0-150500.1.1.x86_64                                     
  kubeadm-1.29.0-150500.1.1.x86_64                                       
  kubectl-1.29.0-150500.1.1.x86_64                                       
  kubelet-1.29.0-150500.1.1.x86_64                                       
  kubernetes-cni-1.3.0-150500.1.1.x86_64                                 
  libnetfilter_cthelper-1.0.0-22.el9.x86_64                              
  libnetfilter_cttimeout-1.0.0-19.el9.x86_64                             
  libnetfilter_queue-1.0.5-1.el9.x86_64                                  
  socat-1.7.4.1-5.el9_4.2.x86_64

yum -y install *

#无法访问下载 使用清华镜像源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=kubernetes
baseurl=https://mirrors.tuna.tsinghua.edu.cn/kubernetes/yum/repos/kubernetes-el7-$basearch
name=Kubernetes
baseurl=https://mirrors.tuna.tsinghua.edu.cn/kubernetes/core:/stable:/v1.29/rpm/
enabled=1
gpgcheck=1
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.29/rpm/repodata/repomd.xml.key
EOF

2.10.2 安装 kubeadm 1.29 版本

yum install -y kubelet-1.29.0 kubectl-1.29.0 kubeadm-1.29.0
systemctl enable kubelet.service

3 复制虚拟机

3.1 虚拟机修改hostname


hostnamectl set-hostname k8s-master-01
hostnamectl set-hostname k8s-master-02
hostnamectl set-hostname k8s-master-03
hostnamectl set-hostname k8s-node-01
hostnamectl set-hostname k8s-node-02
hostnamectl set-hostname k8s-node-03



k8s-master-01 k8s-master-02 k8s-master-03 k8s-node-01 k8s-node-02 k8s-node-03


ssh-keygen -f /root/.ssh/id_rsa -N ''
for i in k8s-master-01 k8s-master-02 k8s-master-03 k8s-node-01 k8s-node-02 k8s-node-03; do ssh-copy-id $i; done;

3.2 master节点初始化

kubeadm init --apiserver-advertise-address=10.0.17.201 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version 1.29.0 --service-cidr=10.10.0.0/12 --pod-network-cidr=10.244.0.0/16 --ignore-preflight-errors=all --cri-socket unix:///var/run/cri-dockerd.sock

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config


kubectl -n kube-system edit kubeadm-config 
# 最下方加入
#添加本机地址及端口
controlPlaneEndpoint: 10.0.17.201:6443

kubeadm init phase upload-certs --upload-certs
38f2c6d57fd74faf8e2836f4ee0a6d41155f24d4fc5b98e034fc668567339eb2
# master加入节点命令


kubeadm token create --print-join-command

kubeadm join 10.0.17.201:6443 --token tacr77.b7up6y71hdpwqg18 \
  --discovery-token-ca-cert-hash sha256:a80194d2b83f8fc77797366dbb83805831be779c13ee6d484d3181fc0e287743 \
  --control-plane --certificate-key 38f2c6d57fd74faf8e2836f4ee0a6d41155f24d4fc5b98e034fc668567339eb2   \
  --cri-socket unix:///run/cri-dockerd.sock



# node加入节点命令
kubeadm join 10.0.17.201:6443 --token tacr77.b7up6y71hdpwqg18 \
  --discovery-token-ca-cert-hash sha256:a80194d2b83f8fc77797366dbb83805831be779c13ee6d484d3181fc0e287743 \
  --cri-socket unix:///run/cri-dockerd.sock

3.3 问题排查

[root@node1 ~]# kubeadm join 192.168.10.100:6443 --token v9bqbb.wwuk9s8ytn0xaefn \
        --discovery-token-ca-cert-hash sha256:496eeb8ea938e1919782e25b5f9a74f71f1460900a1c7ae3e0b62d4ffb68271a 
Found multiple CRI endpoints on the host. Please define which one do you wish to use by setting the 'criSocket' field in the kubeadm configuration file: unix:///var/run/containerd/containerd.sock, unix:///var/run/cri-dockerd.sock
To see the stack trace of this error execute with --v=5 or higher

# 出现报错

这个错误表明在您的环境中存在多个容器运行时接口(Container Runtime Interface,CRI)端点,Kubernetes 不确定应该使用哪一个。要解决这个问题,您需要在 kubeadm 配置文件中明确指定要使用的 CRI 端点。

解决方法:
[root@node1 ~]# find / -name cri-dockerd.sock
/run/cri-dockerd.sock
添加--cri-socket unix:///run/cri-dockerd.sock


kubeadm join 192.168.10.100:6443 --token v9bqbb.wwuk9s8ytn0xaefn \
	--discovery-token-ca-cert-hash sha256:496eeb8ea938e1919782e25b5f9a74f71f1460900a1c7ae3e0b62d4ffb68271a \
    --cri-socket unix:///run/cri-dockerd.sock

4 部署网络插件calico

https://docs.tigera.io/calico/latest/getting-started/kubernetes/self-managed-onprem/onpremises#install-calico-with-kubernetes-api-datastore-more-than-50-nodes
下载地址
https://github.com/projectcalico/calico/releases?page=1

curl https://raw.githubusercontent.com/projectcalico/calico/v3.26.3/manifests/calico-typha.yaml -o calico.yaml


# 修改文件 CALICO_IPV4POOL_CIDR
# kubeadm init --apiserver-advertise-address=10.0.17.201 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version 1.29.0 --service-cidr=10.10.0.0/12 --pod-network-cidr=10.244.0.0/16 --ignore-preflight-errors=all --cri-socket unix:///var/run/cri-dockerd.sock	
# 根据 master kubeadm init --pod-network-cidr=10.244.0.0/16 指定为 pod 地址 10.244.0.0/16

4.1每个节点docker导入calico images


tar -zxvf calico-images.tar.gz
cd calico-images
docker load -i calico-cni-v3.26.3.tar
docker load -i calico-node-v3.26.3.tar
docker load -i calico-kube-controllers-v3.26.3.tar
docker load -i calico-typha-v3.26.3.tar


scp -r calico-images root@k8s-master-02:~
scp -r calico-images root@k8s-master-03:~
scp -r calico-images root@k8s-node-01:~
scp -r calico-images root@k8s-node-02:~
scp -r calico-images root@k8s-node-03:~


ssh k8s-master-02
cd calico-images/
docker load -i calico-cni-v3.26.3.tar
docker load -i calico-node-v3.26.3.tar
docker load -i calico-kube-controllers-v3.26.3.tar
docker load -i calico-typha-v3.26.3.tar


ssh k8s-node-02
cd calico-images/
docker load -i calico-cni-v3.26.3.tar
docker load -i calico-node-v3.26.3.tar
docker load -i calico-kube-controllers-v3.26.3.tar
docker load -i calico-typha-v3.26.3.tar

修改calico.yaml

vim calico-typha.yaml
# 根据 master kubeadm init --pod-network-cidr=10.244.0.0/16 指定为 pod 地址 10.244.0.0/16
# 修改 CALICO_IPV4POOL_CIDR 保持一致
            - name: CALICO_IPV4POOL_CIDR
              value: "10.244.0.0/16"

# 修改为 BGP 模式
# Enable IPIP
- name: CALICO_IPV4POOL_IPIP
  value: "Always"  #改成Off

kubectl apply -f calico-typha.yaml

5.高可用nginx+Keepalived

5.1 安装nginx Keepalived

yum install epel-release nginx keepalived nginx-mod-stream -y

5.2 nginx 配置文件

user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
 
include /usr/share/nginx/modules/*.conf;
 
events {
    worker_connections 1024;
}
 
# 四层负载均衡,为两台Master apiserver组件提供负载均衡
stream {
 
    log_format  main  '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
 
    access_log  /var/log/nginx/k8s-access.log  main;
 
    upstream k8s-apiserver {
            server 10.0.17.201:6443 weight=5 max_fails=3 fail_timeout=30s;  
            server 10.0.17.202:6443 weight=5 max_fails=3 fail_timeout=30s; 
            server 10.0.17.203:6443 weight=5 max_fails=3 fail_timeout=30s;        
 
    }
    
    server {
       listen 16443; # 由于nginx与master节点复用,这个监听端口不能是6443,否则会冲突
       proxy_pass k8s-apiserver;
    }
}
 
http {
    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';
 
    access_log  /var/log/nginx/access.log  main;
 
    sendfile            on;
    tcp_nopush          on;
    tcp_nodelay         on;
    keepalive_timeout   65;
    types_hash_max_size 2048;
 
    include             /etc/nginx/mime.types;
    default_type        application/octet-stream;
 
    server {
        listen       80 default_server;
        server_name  _;
 
        location / {
        }
    }
}
 

5.3 keepalived配置文件

global_defs { 
   notification_email { 
     acassen@firewall.loc 
     failover@firewall.loc 
     sysadmin@firewall.loc 
   } 
   notification_email_from Alexandre.Cassen@firewall.loc  
   smtp_server 127.0.0.1 
   smtp_connect_timeout 30 
   router_id NGINX_MASTER
} 
 
vrrp_script check_nginx {
    script "/etc/keepalived/check_nginx.sh"
}
 
vrrp_instance VI_1 { 
    state MASTER 
    interface ens160  # 修改为实际网卡名
    virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的 
    priority 100    # 优先级,备服务器设置 90 
    advert_int 1    # 指定VRRP 心跳包通告间隔时间,默认1秒 
    authentication { 
        auth_type PASS      
        auth_pass 1111 
    }  
    # 虚拟IP
    virtual_ipaddress { 
        10.0.17.200/24
    } 
    track_script {
        check_nginx
    } 
}
 

5.4 检测nginx运行脚本

vi  /etc/keepalived/check_nginx.sh

#!/bin/bash
#1、判断Nginx是否存活
counter=$(ps -ef |grep nginx | grep sbin | egrep -cv "grep|$$" )
if [ $counter -eq 0 ]; then
    #2、如果不存活则尝试启动Nginx
    service nginx start
    sleep 2
    #3、等待2秒后再次获取一次Nginx状态
    counter=$(ps -ef |grep nginx | grep sbin | egrep -cv "grep|$$" )
    #4、再次进行判断,如Nginx还不存活则停止Keepalived,让地址进行漂移
    if [ $counter -eq 0 ]; then
        service  keepalived stop
    fi
fi

chmod +x /etc/keepalived/check_nginx.sh
 

5.5 启动服务

systemctl daemon-reload && systemctl start nginx
systemctl start keepalived && systemctl enable nginx keepalived

5.6 使用 kubeadm 初始化 k8s 集群

kubeadm init --apiserver-advertise-address=10.0.17.201 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version 1.29.0 --service-cidr=10.10.0.0/12 --pod-network-cidr=10.244.0.0/16 --ignore-preflight-errors=all --cri-socket unix:///var/run/cri-dockerd.sock

kubectl -n kube-system edit cm kubeadm-config

apiVersion: v1
data:
  ClusterConfiguration: |
    apiServer:
      extraArgs:
        authorization-mode: Node,RBAC
      timeoutForControlPlane: 4m0s
    apiVersion: kubeadm.k8s.io/v1beta3
    certificatesDir: /etc/kubernetes/pki
    clusterName: kubernetes
    controllerManager: {}
    dns: {}
    etcd:
      local:
        dataDir: /var/lib/etcd
    imageRepository: registry.aliyuncs.com/google_containers
    kind: ClusterConfiguration
    kubernetesVersion: v1.29.0
    networking:
      dnsDomain: cluster.local
      podSubnet: 10.244.0.0/16
      serviceSubnet: 10.10.0.0/12
    scheduler: {}
    controlPlaneEndpoint: 10.0.17.201:6443
kind: ConfigMap
metadata:
  creationTimestamp: "2024-12-01T12:48:54Z"
  name: kubeadm-config
  namespace: kube-system
  resourceVersion: "920"
  uid: 79ab4b6a-5a48-46bb-bbe9-bbb68cc4f7b5


mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

5.7 初始化成功

[root@k8s-master-01 ~]# kubeadm init phase upload-certs --upload-certs
W1201 21:23:30.106972    3658 version.go:104] could not fetch a Kubernetes version from the internet: unable to get URL "https://dl.k8s.io/release/stable-1.txt": Get "https://cdn.dl.k8s.io/release/stable-1.txt": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
W1201 21:23:30.107262    3658 version.go:105] falling back to the local client version: v1.29.0
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
2118f52b3255f5e0a99821171fdde475abc38b920a903ef758a6fa991470b46b

[root@k8s-master-01 ~]# kubeadm token create --print-join-command
kubeadm join 10.0.17.201:6443 --token 40ibw0.sygoyt60n2yzr2we --discovery-token-ca-cert-hash sha256:92489c5ccaa3b13b2eee02e14a60fe3d7297ac9211ef2ca773024fa2a2926654 


# master节点加入
kubeadm join 10.0.17.201:6443 \
  --token 40ibw0.sygoyt60n2yzr2we \
  --discovery-token-ca-cert-hash sha256:92489c5ccaa3b13b2eee02e14a60fe3d7297ac9211ef2ca773024fa2a2926654 \
  --control-plane \
  --certificate-key 2118f52b3255f5e0a99821171fdde475abc38b920a903ef758a6fa991470b46b \
  --cri-socket unix:///var/run/cri-dockerd.sock


# node节点加入
kubeadm join 10.0.17.201:6443 \
  --token 40ibw0.sygoyt60n2yzr2we \
  --discovery-token-ca-cert-hash sha256:92489c5ccaa3b13b2eee02e14a60fe3d7297ac9211ef2ca773024fa2a2926654 \
  --certificate-key 2118f52b3255f5e0a99821171fdde475abc38b920a903ef758a6fa991470b46b \
  --cri-socket unix:///var/run/cri-dockerd.sock


#如果失败的话,执行"sudo kubeadm reset -f"命令清理环境后,再重新执行
sudo kubeadm join --config=/etc/kubernetes/kubeadm-config.yaml --ignore-preflight-errors=FileExisting-crictl,ImagePull --cri-socket unix:///var/run/cri-dockerd.sock


6. calico

https://docs.tigera.io/calico/latest/getting-started/kubernetes/self-managed-onprem/onpremises#install-calico-with-kubernetes-api-datastore-more-than-50-nodes
下载地址
https://github.com/projectcalico/calico/releases?page=1

curl https://raw.githubusercontent.com/projectcalico/calico/v3.26.3/manifests/calico-typha.yaml -o calico.yaml


# 修改文件 CALICO_IPV4POOL_CIDR
# kubeadm init --apiserver-advertise-address=10.0.17.201 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version 1.29.0 --service-cidr=10.10.0.0/12 --pod-network-cidr=10.244.0.0/16 --ignore-preflight-errors=all --cri-socket unix:///var/run/cri-dockerd.sock	
# 根据 master kubeadm init --pod-network-cidr=10.244.0.0/16 指定为 pod 地址 10.244.0.0/16


6.1 每个节点docker导入calico images

vim /etc/hosts

10.0.17.201 k8s-master-01 
10.0.17.202 k8s-master-02 
10.0.17.203 k8s-master-03 
10.0.17.211 k8s-node-01 
10.0.17.212 k8s-node-02 
10.0.17.213 k8s-node-03

ssh-keygen -f /root/.ssh/id_rsa -N ''
for i in k8s-master-01 k8s-master-02 k8s-master-03 k8s-node-01 k8s-node-02 k8s-node-03; do ssh-copy-id $i; done;


scp -r calico-images root@k8s-master-02:~
scp -r calico-images root@k8s-master-03:~
scp -r calico-images root@k8s-node-01:~
scp -r calico-images root@k8s-node-02:~
scp -r calico-images root@k8s-node-03:~


cd calico-images
docker load -i calico-cni.tar
docker load -i calico-node.tar
docker load -i calico-kube-controllers.tar
docker load -i calico-typha.tar

修改calico.yaml

vim calico-typha.yaml
# 根据 master kubeadm init --pod-network-cidr=10.244.0.0/16 指定为 pod 地址 10.244.0.0/16
# 修改 CALICO_IPV4POOL_CIDR 保持一致
            - name: CALICO_IPV4POOL_CIDR
              value: "10.244.0.0/16"

# 修改为 BGP 模式
# Enable IPIP
- name: CALICO_IPV4POOL_IPIP
  value: "Always"  #改成Off

kubectl apply -f calico.yaml

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值