[root@rhel-1 ~]# mount | grep pvc # RWX # 此处 NFS Client 使用的是宿主机 IP # 此处 NFS Server 使用的是 share-manager-pvc Pod 对应的 Service ClusterIP 10.43.217.7:/pvc-01f47f0a-12d2-4d1c-8dd6-8343e9e534cc on /var/lib/kubelet/plugins/kubernetes.io/csi/driver.longhorn.io/447ee02d670adfe4a48ae856b8eb99bd43bd6fa936594b10cdecc04fb0addbb3/globalmount type nfs4 (rw,relatime,vers=4.1,rsize=1048576,wsize=1048576,namlen=255,softerr,softreval,noresvport,proto=tcp,timeo=600,retrans=5,sec=sys,clientaddr=172.16.16.151,local_lock=none,addr=10.43.217.7) 10.43.217.7:/pvc-01f47f0a-12d2-4d1c-8dd6-8343e9e534cc on /var/lib/kubelet/pods/37e1e1e7-ead3-41ba-b6cc-1f548186a05b/volumes/kubernetes.io~csi/pvc-01f47f0a-12d2-4d1c-8dd6-8343e9e534cc/mount type nfs4 (rw,relatime,vers=4.1,rsize=1048576,wsize=1048576,namlen=255,softerr,softreval,noresvport,proto=tcp,timeo=600,retrans=5,sec=sys,clientaddr=172.16.16.151,local_lock=none,addr=10.43.217.7)
[root@rhel-0 ~]# kubectl get pod NAME READY STATUS RESTARTS AGE multus-nad-test-jbxcz 1/1 Running 0 69s multus-nad-test-s8zqx 1/1 Running 0 69s
[root@rhel-0 ~]# kubectl exec -it multus-nad-test-jbxcz -- ip -4 a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever 2: net1@if90: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue qlen 1000 inet 172.16.16.201/24 brd 172.16.16.255 scope global net1 valid_lft forever preferred_lft forever 91: eth0@if92: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue inet 10.42.0.71/32 scope global eth0 valid_lft forever preferred_lft forever
[root@rhel-0 ~]# kubectl exec -it multus-nad-test-s8zqx -- ip -4 a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever 2: net1@if68: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue qlen 1000 inet 172.16.16.206/24 brd 172.16.16.255 scope global net1 valid_lft forever preferred_lft forever 69: eth0@if70: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue inet 10.42.1.31/32 scope global eth0 valid_lft forever preferred_lft forever
[root@rhel-0 ~]# kubectl exec -it multus-nad-test-jbxcz -- ip r default via 10.42.0.163 dev eth0 10.42.0.163 dev eth0 scope link 172.16.16.0/24 dev net1 scope link src 172.16.16.201
[root@rhel-0 ~]# kubectl exec -it multus-nad-test-jbxcz -- ping -c 3 172.16.16.206 PING 172.16.16.206 (172.16.16.206): 56 data bytes 64 bytes from 172.16.16.206: seq=0 ttl=64 time=0.435 ms 64 bytes from 172.16.16.206: seq=1 ttl=64 time=0.211 ms 64 bytes from 172.16.16.206: seq=2 ttl=64 time=0.181 ms
--- 172.16.16.206 ping statistics --- 3 packets transmitted, 3 packets received, 0% packet loss round-trip min/avg/max = 0.181/0.275/0.435 ms
在 Longhorn UI -> Setting -> General,找到 Storage Network,将先前创建的 NAD 填入,并勾选 Storage Network for RWX Volume Enabled:
保存后,会触发 instance-manager/longhorn-csi-plugin 等 Pod 的重建,并分配 Macvlan IP:
验证 Workload 使用 Storage Network
启动 Workload,启动完成后查看挂载情况:
1 2 3 4 5 6
[root@rhel-1 ~]# mount | grep pvc # RWX # NFS Client 是所在节点 longhorn-csi-plugin Pod 的 Macvlan IP,即 172.16.16.215 # NFS Server 是 share-manager-pvc Pod 的 Macvlan IP,即 172.16.16.223 pvc-01f47f0a-12d2-4d1c-8dd6-8343e9e534cc.longhorn-system.svc.cluster.local:/pvc-01f47f0a-12d2-4d1c-8dd6-8343e9e534cc on /var/lib/kubelet/plugins/kubernetes.io/csi/driver.longhorn.io/447ee02d670adfe4a48ae856b8eb99bd43bd6fa936594b10cdecc04fb0addbb3/globalmount type nfs4 (rw,relatime,vers=4.1,rsize=1048576,wsize=1048576,namlen=255,softerr,softreval,noresvport,proto=tcp,timeo=600,retrans=5,sec=sys,clientaddr=172.16.16.215,local_lock=none,addr=172.16.16.223) pvc-01f47f0a-12d2-4d1c-8dd6-8343e9e534cc.longhorn-system.svc.cluster.local:/pvc-01f47f0a-12d2-4d1c-8dd6-8343e9e534cc on /var/lib/kubelet/pods/2948c4ba-b41e-4f19-b5b6-628a78abdd02/volumes/kubernetes.io~csi/pvc-01f47f0a-12d2-4d1c-8dd6-8343e9e534cc/mount type nfs4 (rw,relatime,vers=4.1,rsize=1048576,wsize=1048576,namlen=255,softerr,softreval,noresvport,proto=tcp,timeo=600,retrans=5,sec=sys,clientaddr=172.16.16.215,local_lock=none,addr=172.16.16.223)