2023年11月

SDN作业2 Open vSwitch 应用实践1

本文地址:

  1. https://type.dayiyi.top/index.php/archives/292/
  2. https://blog.dayi.ink/?p=162
  3. https://www.cnblogs.com/rabbit-dayi/p/17868059.html
  4. https://cmd.dayi.ink/ltQtj5aOSi-x2Xc6xOuHGg

本实验均在root用户下进行

如果失败了,可能是OVS的问题,请参考文章末尾

su
sudo su

预制

启动OVS服务

ovs-ctl start

然后想法开两个shell

任务1

先清空下

终端1:

mn -c
mn

终端2(另外开一个终端):

ovs-ofctl dump-flows s1

没有流表:

PingALL

#CLI 终端1
mininet> pingall
*** Ping: testing ping reachability
h1 -> h2
h2 -> h1
*** Results: 0% dropped (2/2 received)
mininet>

查看流表:

#终端2
ovs-ofctl dump-flows s1

建立拓扑

# 终端1
# 清理并且建立 1个交换机4个主机
mn -c && mn --topo single,4

查看流表

# 终端2
# 查看流表
ovs-ofctl dump-flows s1 #这里应该是空的

<!-- ### 设置IP

更新内容
mininet> h1 ifconfig h1-eth0 10.0.0.1
mininet> h2 ifconfig h2-eth0 10.0.0.2
mininet> h3 ifconfig h3-eth0 10.0.0.3
mininet> h4 ifconfig h4-eth0 10.0.0.4

PINGALL

#终端1
mininet> pingall
*** Ping: testing ping reachability
h1 -> h2 h3 h4
h2 -> h1 h3 h4
h3 -> h1 h2 h4
h4 -> h1 h2 h3
*** Results: 0% dropped (12/12 received)
mininet>

这里全部可以PING通

查看目前的流表:

#终端2
root@ubuntu:/home/sdn# ovs-ofctl dump-flows s1
 cookie=0x0, duration=55.843s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,arp,in_port="s1-eth2",vlan_tci=0x0000,dl_src=22:87:c2:36:ab:33,dl_dst=ca:76:64:1b:37:77,arp_spa=10.0.0.2,arp_tpa=10.0.0.1,arp_op=2 actions=output:"s1-eth1"
 cookie=0x0, duration=55.841s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,arp,in_port="s1-eth3",vlan_tci=0x0000,dl_src=12:18:0f:59:6d:59,dl_dst=ca:76:64:1b:37:77,arp_spa=10.0.0.3,arp_tpa=10.0.0.1,arp_op=2 actions=output:"s1-eth1"
 cookie=0x0, duration=55.839s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,arp,in_port="s1-eth4",vlan_tci=0x0000,dl_src=c2:4d:1b:f3:02:51,dl_dst=ca:76:64:1b:37:77,arp_spa=10.0.0.4,arp_tpa=10.0.0.1,arp_op=2 actions=output:"s1-eth1"
 cookie=0x0, duration=55.836s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,arp,in_port="s1-eth3",vlan_tci=0x0000,dl_src=12:18:0f:59:6d:59,dl_dst=22:87:c2:36:ab:33,arp_spa=10.0.0.3,arp_tpa=10.0.0.2,arp_op=2 actions=output:"s1-eth2"
 cookie=0x0, duration=55.833s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,arp,in_port="s1-eth4",vlan_tci=0x0000,dl_src=c2:4d:1b:f3:02:51,dl_dst=22:87:c2:36:ab:33,arp_spa=10.0.0.4,arp_tpa=10.0.0.2,arp_op=2 actions=output:"s1-eth2"
 cookie=0x0, duration=55.829s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,arp,in_port="s1-eth4",vlan_tci=0x0000,dl_src=c2:4d:1b:f3:02:51,dl_dst=12:18:0f:59:6d:59,arp_spa=10.0.0.4,arp_tpa=10.0.0.3,arp_op=2 actions=output:"s1-eth3"
 cookie=0x0, duration=50.806s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,arp,in_port="s1-eth4",vlan_tci=0x0000,dl_src=c2:4d:1b:f3:02:51,dl_dst=12:18:0f:59:6d:59,arp_spa=10.0.0.4,arp_tpa=10.0.0.3,arp_op=1 actions=output:"s1-eth3"
 cookie=0x0, duration=50.806s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,arp,in_port="s1-eth3",vlan_tci=0x0000,dl_src=12:18:0f:59:6d:59,dl_dst=22:87:c2:36:ab:33,arp_spa=10.0.0.3,arp_tpa=10.0.0.2,arp_op=1 actions=output:"s1-eth2"
 cookie=0x0, duration=50.806s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,arp,in_port="s1-eth2",vlan_tci=0x0000,dl_src=22:87:c2:36:ab:33,dl_dst=ca:76:64:1b:37:77,arp_spa=10.0.0.2,arp_tpa=10.0.0.1,arp_op=1 actions=output:"s1-eth1"
 cookie=0x0, duration=50.806s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,arp,in_port="s1-eth4",vlan_tci=0x0000,dl_src=c2:4d:1b:f3:02:51,dl_dst=22:87:c2:36:ab:33,arp_spa=10.0.0.4,arp_tpa=10.0.0.2,arp_op=1 actions=output:"s1-eth2"
 cookie=0x0, duration=50.806s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,arp,in_port="s1-eth3",vlan_tci=0x0000,dl_src=12:18:0f:59:6d:59,dl_dst=ca:76:64:1b:37:77,arp_spa=10.0.0.3,arp_tpa=10.0.0.1,arp_op=1 actions=output:"s1-eth1"
 cookie=0x0, duration=50.806s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,arp,in_port="s1-eth4",vlan_tci=0x0000,dl_src=c2:4d:1b:f3:02:51,dl_dst=ca:76:64:1b:37:77,arp_spa=10.0.0.4,arp_tpa=10.0.0.1,arp_op=1 actions=output:"s1-eth1"
 cookie=0x0, duration=50.806s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,arp,in_port="s1-eth3",vlan_tci=0x0000,dl_src=12:18:0f:59:6d:59,dl_dst=c2:4d:1b:f3:02:51,arp_spa=10.0.0.3,arp_tpa=10.0.0.4,arp_op=2 actions=output:"s1-eth4"
 cookie=0x0, duration=50.806s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,arp,in_port="s1-eth2",vlan_tci=0x0000,dl_src=22:87:c2:36:ab:33,dl_dst=12:18:0f:59:6d:59,arp_spa=10.0.0.2,arp_tpa=10.0.0.3,arp_op=2 actions=output:"s1-eth3"
 cookie=0x0, duration=50.806s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,arp,in_port="s1-eth1",vlan_tci=0x0000,dl_src=ca:76:64:1b:37:77,dl_dst=22:87:c2:36:ab:33,arp_spa=10.0.0.1,arp_tpa=10.0.0.2,arp_op=2 actions=output:"s1-eth2"
 cookie=0x0, duration=50.806s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,arp,in_port="s1-eth1",vlan_tci=0x0000,dl_src=ca:76:64:1b:37:77,dl_dst=12:18:0f:59:6d:59,arp_spa=10.0.0.1,arp_tpa=10.0.0.3,arp_op=2 actions=output:"s1-eth3"
 cookie=0x0, duration=50.806s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,arp,in_port="s1-eth1",vlan_tci=0x0000,dl_src=ca:76:64:1b:37:77,dl_dst=c2:4d:1b:f3:02:51,arp_spa=10.0.0.1,arp_tpa=10.0.0.4,arp_op=2 actions=output:"s1-eth4"
 cookie=0x0, duration=50.806s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,arp,in_port="s1-eth2",vlan_tci=0x0000,dl_src=22:87:c2:36:ab:33,dl_dst=c2:4d:1b:f3:02:51,arp_spa=10.0.0.2,arp_tpa=10.0.0.4,arp_op=2 actions=output:"s1-eth4"
 cookie=0x0, duration=55.843s, table=0, n_packets=3, n_bytes=294, idle_timeout=60, priority=65535,icmp,in_port="s1-eth1",vlan_tci=0x0000,dl_src=ca:76:64:1b:37:77,dl_dst=22:87:c2:36:ab:33,nw_src=10.0.0.1,nw_dst=10.0.0.2,nw_tos=0,icmp_type=8,icmp_code=0 actions=output:"s1-eth2"
 cookie=0x0, duration=55.843s, table=0, n_packets=1, n_bytes=98, idle_timeout=60, priority=65535,icmp,in_port="s1-eth2",vlan_tci=0x0000,dl_src=22:87:c2:36:ab:33,dl_dst=ca:76:64:1b:37:77,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,icmp_type=0,icmp_code=0 actions=output:"s1-eth1"
 cookie=0x0, duration=55.841s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,icmp,in_port="s1-eth1",vlan_tci=0x0000,dl_src=ca:76:64:1b:37:77,dl_dst=12:18:0f:59:6d:59,nw_src=10.0.0.1,nw_dst=10.0.0.3,nw_tos=0,icmp_type=8,icmp_code=0 actions=output:"s1-eth3"
 cookie=0x0, duration=55.841s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,icmp,in_port="s1-eth3",vlan_tci=0x0000,dl_src=12:18:0f:59:6d:59,dl_dst=ca:76:64:1b:37:77,nw_src=10.0.0.3,nw_dst=10.0.0.1,nw_tos=0,icmp_type=0,icmp_code=0 actions=output:"s1-eth1"
 cookie=0x0, duration=55.839s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,icmp,in_port="s1-eth1",vlan_tci=0x0000,dl_src=ca:76:64:1b:37:77,dl_dst=c2:4d:1b:f3:02:51,nw_src=10.0.0.1,nw_dst=10.0.0.4,nw_tos=0,icmp_type=8,icmp_code=0 actions=output:"s1-eth4"
 cookie=0x0, duration=55.839s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,icmp,in_port="s1-eth4",vlan_tci=0x0000,dl_src=c2:4d:1b:f3:02:51,dl_dst=ca:76:64:1b:37:77,nw_src=10.0.0.4,nw_dst=10.0.0.1,nw_tos=0,icmp_type=0,icmp_code=0 actions=output:"s1-eth1"
 cookie=0x0, duration=55.838s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,icmp,in_port="s1-eth2",vlan_tci=0x0000,dl_src=22:87:c2:36:ab:33,dl_dst=ca:76:64:1b:37:77,nw_src=10.0.0.2,nw_dst=10.0.0.1,nw_tos=0,icmp_type=8,icmp_code=0 actions=output:"s1-eth1"
 cookie=0x0, duration=55.838s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,icmp,in_port="s1-eth1",vlan_tci=0x0000,dl_src=ca:76:64:1b:37:77,dl_dst=22:87:c2:36:ab:33,nw_src=10.0.0.1,nw_dst=10.0.0.2,nw_tos=0,icmp_type=0,icmp_code=0 actions=output:"s1-eth2"
 cookie=0x0, duration=55.835s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,icmp,in_port="s1-eth2",vlan_tci=0x0000,dl_src=22:87:c2:36:ab:33,dl_dst=12:18:0f:59:6d:59,nw_src=10.0.0.2,nw_dst=10.0.0.3,nw_tos=0,icmp_type=8,icmp_code=0 actions=output:"s1-eth3"
 cookie=0x0, duration=55.835s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,icmp,in_port="s1-eth3",vlan_tci=0x0000,dl_src=12:18:0f:59:6d:59,dl_dst=22:87:c2:36:ab:33,nw_src=10.0.0.3,nw_dst=10.0.0.2,nw_tos=0,icmp_type=0,icmp_code=0 actions=output:"s1-eth2"
 cookie=0x0, duration=55.833s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,icmp,in_port="s1-eth2",vlan_tci=0x0000,dl_src=22:87:c2:36:ab:33,dl_dst=c2:4d:1b:f3:02:51,nw_src=10.0.0.2,nw_dst=10.0.0.4,nw_tos=0,icmp_type=8,icmp_code=0 actions=output:"s1-eth4"
 cookie=0x0, duration=55.833s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,icmp,in_port="s1-eth4",vlan_tci=0x0000,dl_src=c2:4d:1b:f3:02:51,dl_dst=22:87:c2:36:ab:33,nw_src=10.0.0.4,nw_dst=10.0.0.2,nw_tos=0,icmp_type=0,icmp_code=0 actions=output:"s1-eth2"
 cookie=0x0, duration=55.832s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,icmp,in_port="s1-eth3",vlan_tci=0x0000,dl_src=12:18:0f:59:6d:59,dl_dst=ca:76:64:1b:37:77,nw_src=10.0.0.3,nw_dst=10.0.0.1,nw_tos=0,icmp_type=8,icmp_code=0 actions=output:"s1-eth1"
 cookie=0x0, duration=55.831s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,icmp,in_port="s1-eth1",vlan_tci=0x0000,dl_src=ca:76:64:1b:37:77,dl_dst=12:18:0f:59:6d:59,nw_src=10.0.0.1,nw_dst=10.0.0.3,nw_tos=0,icmp_type=0,icmp_code=0 actions=output:"s1-eth3"
 cookie=0x0, duration=55.830s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,icmp,in_port="s1-eth3",vlan_tci=0x0000,dl_src=12:18:0f:59:6d:59,dl_dst=22:87:c2:36:ab:33,nw_src=10.0.0.3,nw_dst=10.0.0.2,nw_tos=0,icmp_type=8,icmp_code=0 actions=output:"s1-eth2"
 cookie=0x0, duration=55.830s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,icmp,in_port="s1-eth2",vlan_tci=0x0000,dl_src=22:87:c2:36:ab:33,dl_dst=12:18:0f:59:6d:59,nw_src=10.0.0.2,nw_dst=10.0.0.3,nw_tos=0,icmp_type=0,icmp_code=0 actions=output:"s1-eth3"
 cookie=0x0, duration=55.829s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,icmp,in_port="s1-eth3",vlan_tci=0x0000,dl_src=12:18:0f:59:6d:59,dl_dst=c2:4d:1b:f3:02:51,nw_src=10.0.0.3,nw_dst=10.0.0.4,nw_tos=0,icmp_type=8,icmp_code=0 actions=output:"s1-eth4"
 cookie=0x0, duration=55.828s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,icmp,in_port="s1-eth4",vlan_tci=0x0000,dl_src=c2:4d:1b:f3:02:51,dl_dst=12:18:0f:59:6d:59,nw_src=10.0.0.4,nw_dst=10.0.0.3,nw_tos=0,icmp_type=0,icmp_code=0 actions=output:"s1-eth3"
 cookie=0x0, duration=55.827s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,icmp,in_port="s1-eth4",vlan_tci=0x0000,dl_src=c2:4d:1b:f3:02:51,dl_dst=ca:76:64:1b:37:77,nw_src=10.0.0.4,nw_dst=10.0.0.1,nw_tos=0,icmp_type=8,icmp_code=0 actions=output:"s1-eth1"
 cookie=0x0, duration=55.827s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,icmp,in_port="s1-eth1",vlan_tci=0x0000,dl_src=ca:76:64:1b:37:77,dl_dst=c2:4d:1b:f3:02:51,nw_src=10.0.0.1,nw_dst=10.0.0.4,nw_tos=0,icmp_type=0,icmp_code=0 actions=output:"s1-eth4"
 cookie=0x0, duration=55.826s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,icmp,in_port="s1-eth4",vlan_tci=0x0000,dl_src=c2:4d:1b:f3:02:51,dl_dst=22:87:c2:36:ab:33,nw_src=10.0.0.4,nw_dst=10.0.0.2,nw_tos=0,icmp_type=8,icmp_code=0 actions=output:"s1-eth2"
 cookie=0x0, duration=55.826s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,icmp,in_port="s1-eth2",vlan_tci=0x0000,dl_src=22:87:c2:36:ab:33,dl_dst=c2:4d:1b:f3:02:51,nw_src=10.0.0.2,nw_dst=10.0.0.4,nw_tos=0,icmp_type=0,icmp_code=0 actions=output:"s1-eth4"
 cookie=0x0, duration=55.825s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,icmp,in_port="s1-eth4",vlan_tci=0x0000,dl_src=c2:4d:1b:f3:02:51,dl_dst=12:18:0f:59:6d:59,nw_src=10.0.0.4,nw_dst=10.0.0.3,nw_tos=0,icmp_type=8,icmp_code=0 actions=output:"s1-eth3"
 cookie=0x0, duration=55.825s, table=0, n_packets=0, n_bytes=0, idle_timeout=60, priority=65535,icmp,in_port="s1-eth3",vlan_tci=0x0000,dl_src=12:18:0f:59:6d:59,dl_dst=c2:4d:1b:f3:02:51,nw_src=10.0.0.3,nw_dst=10.0.0.4,nw_tos=0,icmp_type=0,icmp_code=0 actions=output:"s1-eth4"
root@ubuntu:/home/sdn#

修改流表

阻止H1访问其他的主机

# 终端1
# 重新开一下 CTRL+D
mn -c && mn --topo single,4


# 终端2
ovs-ofctl add-flow s1 icmp,nw_src=10.0.0.1,icmp_type=8,action=drop

测试:

#终端1
mininet> pingall
*** Ping: testing ping reachability
h1 -> X X X
h2 -> h1 h3 h4
h3 -> h1 h2 h4
h4 -> h1 h2 h3
*** Results: 25% dropped (9/12 received)
mininet>

失败了?

把内核降到了5.8,然后又重新装了下OVS 2.17,记得删除内核模块哦。

大体就这样,挺麻烦的。

虚拟机放这里了,这样省脑子:

这个是我自己用的版本,没经过测试,可能多少有点不一样,OVS要手动启动哦。

Ubuntu_20.04_sdn_ovs_2.17.8-LTS-fix1

链接:https://pan.baidu.com/s/1fwvV2B_eH6D3xEQ2bYJnlQ?pwd=6y8l
提取码:6y8l
--来自百度网盘超级会员V6的分享

SDN 作业1

任务:要求1、将代码上传作业 2、用python3运行代码,使用cli实现pingall、nodes、net、dump iperf h1 h2等命令

更好阅读:

可能仍然有BUG,请反馈。

打开虚拟机

我这里打开的是Ubuntu_20.04_sdn_ovs-2.17.8-LTS

这个虚拟机。具体的文件在群文件中

打开miniedit.py

sudo python2 /opt/sdn/mininet/examples/miniedit.py

画图

照着图画一个,然后连起来即可。

设置IP

右键这个主机,然后选properties

分别设置IP地址即可。

设置链路速率

右键这个线,然后选properties

然后参数这样设置:

100,5ms

设置,进入CLI模式

Edit->Preferences->Start CLI

CLI:

保存文件

这里记得存两份,保存出的python文件是不能再变成图片的!

保存mn

File->Save

导出python文件

导出Level 2 Script

另外,这里导出到过程不应该出现报错

这里不应该有python报错。

运行测试

进入导出文件的目录,我这里是/home/sdn

运行:

python3 ovo.py

测试 PING(4,5交换机未连接控制器)

# 在mn的CLI下
net.pingall()

pingall()

因为那个没连接控制器,所以会ping不通,只有部分可以通。

测试PING(4/5连接控制器)

mininet> pingall
*** Ping: testing ping reachability
h6 -> h8 h1 h5 h4 h2 h3 h7
h8 -> h6 h1 h5 h4 h2 h3 h7
h1 -> h6 h8 h5 h4 h2 h3 h7
h5 -> h6 h8 h1 h4 h2 h3 h7
h4 -> h6 h8 h1 h5 h2 h3 h7
h2 -> h6 h8 h1 h5 h4 h3 h7
h3 -> h6 h8 h1 h5 h4 h2 h7
h7 -> h6 h8 h1 h5 h4 h2 h3
*** Results: 0% dropped (56/56 received)
mininet>

测试延迟:

20ms(去5ms,回来5ms)

mininet> h1 ping h2
PING 10.0.0.2 (10.0.0.2) 56(84) bytes of data.
64 bytes from 10.0.0.2: icmp_seq=1 ttl=64 time=46.7 ms
64 bytes from 10.0.0.2: icmp_seq=2 ttl=64 time=23.4 ms
64 bytes from 10.0.0.2: icmp_seq=3 ttl=64 time=21.9 ms
64 bytes from 10.0.0.2: icmp_seq=4 ttl=64 time=22.5 ms
64 bytes from 10.0.0.2: icmp_seq=5 ttl=64 time=23.1 ms
64 bytes from 10.0.0.2: icmp_seq=6 ttl=64 time=22.9 ms
64 bytes from 10.0.0.2: icmp_seq=7 ttl=64 time=23.8 ms
^C
--- 10.0.0.2 ping statistics ---
7 packets transmitted, 7 received, 0% packet loss, time 6007ms
rtt min/avg/max/mdev = 21.987/26.393/46.792/8.345 ms
mininet>

nodes

nodes

结果:

mininet> nodes
available nodes are:
c0 h1 h2 h3 h4 h5 h6 h7 h8 s1 s2 s3 s4 s5
mininet>

net

net

mininet> net
h7 h7-eth0:s3-eth2
h3 h3-eth0:s4-eth2
h4 h4-eth0:s4-eth3
h5 h5-eth0:s5-eth2
h1 h1-eth0:s1-eth1
h8 h8-eth0:s3-eth3
h6 h6-eth0:s5-eth3
h2 h2-eth0:s1-eth2
s2 lo:  s2-eth1:s1-eth3 s2-eth2:s3-eth1 s2-eth3:s4-eth1 s2-eth4:s5-eth1
s4 lo:  s4-eth1:s2-eth3 s4-eth2:h3-eth0 s4-eth3:h4-eth0
s1 lo:  s1-eth1:h1-eth0 s1-eth2:h2-eth0 s1-eth3:s2-eth1
s3 lo:  s3-eth1:s2-eth2 s3-eth2:h7-eth0 s3-eth3:h8-eth0
s5 lo:  s5-eth1:s2-eth4 s5-eth2:h5-eth0 s5-eth3:h6-eth0
c0
mininet>

dump

dump

mininet> dump
<Host h7: h7-eth0:10.0.0.7 pid=6926>
<Host h3: h3-eth0:10.0.0.3 pid=6928>
<Host h4: h4-eth0:10.0.0.4 pid=6930>
<Host h5: h5-eth0:10.0.0.5 pid=6932>
<Host h1: h1-eth0:10.0.0.1 pid=6934>
<Host h8: h8-eth0:10.0.0.8 pid=6936>
<Host h6: h6-eth0:10.0.0.6 pid=6938>
<Host h2: h2-eth0:10.0.0.2 pid=6940>
<OVSSwitch s2: lo:127.0.0.1,s2-eth1:None,s2-eth2:None,s2-eth3:None,s2-eth4:None pid=6909>
<OVSSwitch s4: lo:127.0.0.1,s4-eth1:None,s4-eth2:None,s4-eth3:None pid=6912>
<OVSSwitch s1: lo:127.0.0.1,s1-eth1:None,s1-eth2:None,s1-eth3:None pid=6915>
<OVSSwitch s3: lo:127.0.0.1,s3-eth1:None,s3-eth2:None,s3-eth3:None pid=6918>
<OVSSwitch s5: lo:127.0.0.1,s5-eth1:None,s5-eth2:None,s5-eth3:None pid=6921>
<Controller c0: 127.0.0.1:6633 pid=6899>
mininet>

iperf测速

iperf h1 h2

*** Starting CLI:
mininet> iperf h1 h2
*** Iperf: testing TCP bandwidth between h1 and h2
*** Results: ['78.1 Mbits/sec', '79.7 Mbits/sec']
mininet> iperf h1 h2
*** Iperf: testing TCP bandwidth between h1 and h2
*** Results: ['81.0 Mbits/sec', '96.0 Mbits/sec']
mininet>
mininet> iperf h1 h2
*** Iperf: testing TCP bandwidth between h1 and h2
*** Results: ['74.6 Mbits/sec', '87.2 Mbits/sec']
mininet>
mininet> iperf h1 h2
*** Iperf: testing TCP bandwidth between h1 and h2
*** Results: ['78.1 Mbits/sec', '94.0 Mbits/sec']
mininet> iperfudp h1 h2
invalid number of args: iperfudp bw src dst
bw examples: 10M
mininet> iperfudp 100M h1 h2
*** Iperf: testing UDP bandwidth between h1 and h2
*** Results: ['100M', '23.7 Mbits/sec', '23.7 Mbits/sec']
mininet>

调试过程

  • 发现pingall有不通的

做了好久好久,换了内核,重装了好多查了好久,发现必须要连接那个controller才可以。

代码

from mininet.net import Mininet
from mininet.node import Controller, RemoteController, OVSController
from mininet.node import CPULimitedHost, Host, Node
from mininet.node import OVSKernelSwitch, UserSwitch
from mininet.node import IVSSwitch
from mininet.cli import CLI
from mininet.log import setLogLevel, info
from mininet.link import TCLink, Intf
from subprocess import call

def myNetwork():

    net = Mininet( topo=None,
                   build=False,
                   ipBase='10.0.0.0/8')

    info( '*** Adding controller\n' )
    c0=net.addController(name='c0',
                      controller=Controller,
                      protocol='tcp',
                      port=6633)

    info( '*** Add switches\n')
    s4 = net.addSwitch('s4', cls=OVSKernelSwitch)
    s3 = net.addSwitch('s3', cls=OVSKernelSwitch)
    s1 = net.addSwitch('s1', cls=OVSKernelSwitch)
    s2 = net.addSwitch('s2', cls=OVSKernelSwitch)
    s5 = net.addSwitch('s5', cls=OVSKernelSwitch)

    info( '*** Add hosts\n')
    h3 = net.addHost('h3', cls=Host, ip='10.0.0.3/24', defaultRoute=None)
    h2 = net.addHost('h2', cls=Host, ip='10.0.0.2/24', defaultRoute=None)
    h4 = net.addHost('h4', cls=Host, ip='10.0.0.4/24', defaultRoute=None)
    h5 = net.addHost('h5', cls=Host, ip='10.0.0.5/24', defaultRoute=None)
    h1 = net.addHost('h1', cls=Host, ip='10.0.0.1/24', defaultRoute=None)
    h7 = net.addHost('h7', cls=Host, ip='10.0.0.7/24', defaultRoute=None)
    h8 = net.addHost('h8', cls=Host, ip='10.0.0.8/24', defaultRoute=None)
    h6 = net.addHost('h6', cls=Host, ip='10.0.0.6/24', defaultRoute=None)

    info( '*** Add links\n')
    s1h1 = {'bw':100,'delay':'5ms'}
    net.addLink(s1, h1, cls=TCLink , **s1h1)
    s1h2 = {'bw':100,'delay':'5ms'}
    net.addLink(s1, h2, cls=TCLink , **s1h2)
    s1s2 = {'bw':100,'delay':'5ms'}
    net.addLink(s1, s2, cls=TCLink , **s1s2)
    s2s3 = {'bw':100,'delay':'5ms'}
    net.addLink(s2, s3, cls=TCLink , **s2s3)
    s2s4 = {'bw':100,'delay':'5ms'}
    net.addLink(s2, s4, cls=TCLink , **s2s4)
    s2s5 = {'bw':100,'delay':'5ms'}
    net.addLink(s2, s5, cls=TCLink , **s2s5)
    s3h7 = {'bw':100,'delay':'5ms'}
    net.addLink(s3, h7, cls=TCLink , **s3h7)
    s3h8 = {'bw':100,'delay':'5ms'}
    net.addLink(s3, h8, cls=TCLink , **s3h8)
    s4h3 = {'bw':100,'delay':'5ms'}
    net.addLink(s4, h3, cls=TCLink , **s4h3)
    s4h4 = {'bw':100,'delay':'5ms'}
    net.addLink(s4, h4, cls=TCLink , **s4h4)
    s5h5 = {'bw':100,'delay':'5ms'}
    net.addLink(s5, h5, cls=TCLink , **s5h5)
    s5h6 = {'bw':100,'delay':'5ms'}
    net.addLink(s5, h6, cls=TCLink , **s5h6)

    info( '*** Starting network\n')
    net.build()
    info( '*** Starting controllers\n')
    for controller in net.controllers:
        controller.start()

    info( '*** Starting switches\n')
    net.get('s4').start([c0])
    net.get('s3').start([c0])
    net.get('s1').start([c0])
    net.get('s2').start([c0])
    net.get('s5').start([c0])

    info( '*** Post configure switches and hosts\n')

    CLI(net)
    net.stop()

if __name__ == '__main__':
    setLogLevel( 'info' )
    myNetwork()
root@ubuntu:/home/sdn# python3 wk3.py
*** Adding controller
*** Add switches
*** Add hosts
*** Add links
(100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) *** Starting network
*** Configuring hosts
h3 h2 h4 h5 h1 h7 h8 h6
*** Starting controllers
*** Starting switches
(100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) (100.00Mbit 5ms delay) *** Post configure switches and hosts
*** Starting CLI:
mininet> pingall
*** Ping: testing ping reachability
h3 -> h2 h4 h5 h1 h7 h8 h6
h2 -> h3 h4 h5 h1 h7 h8 h6
h4 -> h3 h2 h5 h1 h7 h8 h6
h5 -> h3 h2 h4 h1 h7 h8 h6
h1 -> h3 h2 h4 h5 h7 h8 h6
h7 -> h3 h2 h4 h5 h1 h8 h6
h8 -> h3 h2 h4 h5 h1 h7 h6
h6 -> h3 h2 h4 h5 h1 h7 h8
*** Results: 0% dropped (56/56 received)

云与虚拟化:实验2 Docker镜像常用命令的使用

实验要求

了解Docker镜像,掌握Docker镜像常用命令的使用。

前置准备

要求实验主机能够连接外网,已经正确安装Docker,并关闭防火墙和selinux。

实验步骤

步骤1:拉取nginx:latest、busybox:latest和centos:latest镜像

docker pull centos:latest
docker pull nginx:latest
docker pull busybox:latest

步骤2:列出所有本地镜像

docker images

步骤3:搜索收藏数不小于5的redhat镜像,并且完整显示镜像描述信息

docker search -s 5 --no-trunc redhat

docker search --filter stars=5 redhat

其他的命令也可以哦

curl -s "https://registry.hub.docker.com/v2/repositories/redhat/" | jq '.results[] | select(.pull_count >= 5) | .description'

步骤4:拉取收藏数是高的redhat镜像

docker pull redhat/ubi8

步骤5:将redhat镜像标签名修改为redhat:v8.0

docker tag redhat/ubi8:latest redhat:v8.0

步骤6:将nginx:latest镜像导出,命名为nginx.tar

docker save -o nginx.tar nginx:latest

步骤7:删除nginx:latest镜像,并运行docker images命令查看

docker rmi -f nginx:latest

步骤8:将nginx.tar导入,并运行docker images命令查看

docker load --input nginx.tar
docker images

步骤9:删除本地主机中所有的镜像,要求逐一删除

docker rmi nginx:latest
docker rmi busybox:latest
docker rmi centos:latest
docker rmi redhat:v7.0
docker rmi hjd48/redhat:latest
docker rm -v $(docker ps -aq -f status=created)
docker rm -v $(docker ps -aq -f status=exited)

这里还有两个冲突:

K8S

K8S部署

先创建一个debian模板

选择iso

快速安装一下即可:

安装完成克隆成模板:

Master节点

直接克隆一个就可以

克隆两个本地node

sed -i 's/deb.debian.org/mirrors.ustc.edu.cn/g' /etc/apt/sources.list.d/debian.sources
sed -i 's|security.debian.org/debian-security|mirrors.ustc.edu.cn/debian-security|g' /etc/apt/sources.list
echo "export PATH=$PATH:/usr/sbin:/home/$(whoami)/.local/bin" >> ~/.bashrc
source ~/.bashrc
apt install ufw

apt install sudo vim wget curl -y

sudo hostnamectl set-hostname dayi-cloud-k8s-master

这里HOSTS用了tailscile 来进行相互访问

curl -fsSL https://tailscale.com/install.sh | sh

这样就配好hosts了

dayi-cloud-k8s-master
dayi-cloud-k8s-node1
dayi-cloud-k8s-node2

Master

sudo ufw allow 6443/tcp
sudo ufw allow 2379/tcp
sudo ufw allow 2380/tcp
sudo ufw allow 10250/tcp
sudo ufw allow 10251/tcp
sudo ufw allow 10252/tcp
sudo ufw allow 10255/tcp
sudo ufw reload

cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf 
overlay 
br_netfilter
EOF

sudo modprobe overlay 
sudo modprobe br_netfilter
cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1 
net.bridge.bridge-nf-call-ip6tables = 1 
EOF


sudo sysctl --system

Other

sudo ufw allow 10250/tcp
sudo ufw allow 30000:32767/tcp
sudo ufw reload


cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf 
overlay 
br_netfilter
EOF

sudo modprobe overlay 
sudo modprobe br_netfilter
cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1 
net.bridge.bridge-nf-call-ip6tables = 1 
EOF


sudo sysctl --system

SSH-密钥复制

mkdir ~/.ssh
echo "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDk1VvdE9ZZFnK5Bwhi+C74B/rLvT7zKHN4/tUhzLFrMVDQZvsF+8ON9w4xHK1AZyb64GQXs9BtX93N+OJ51t5ZbEl960S0o7BYgcdB7q3+wb9E4uTYNFK7akcBgtxa+3gmZTmttr1l2KT3xzfE9BebkRg+C/DO/PwnaPwOWyeYc90fmcpk7voM2e268wmv6V2eZmIKaA/T7GXCe22qKIcrgWZp78BHL1Je+sqQ72FfzGasDj/iDCcqbW6fsQ4v0QHsuQ4SavcT3xvPDPTwCow4CoV8cQiK4s2nak/5z8lZF07FRJsF0oYcg7m+9qizLv3jBi/P1M26Rhpj5fV6XmdBtIpSY8A958U2bPT/16eAJOcyj+0qFL2E0DWs7Hbh8tni4L51mppQfx4VnozzEzPPaEauGA/GNQ+HlKcaEe43bBia7btW9K9c6EHPvolAf/365gv4BcKArlbOVo4kfxqfX7ybjSnK2pz3kZjFDrXwhQ0LsVqlZIvGu+wkHIVFYHM= root@dayi-cloud-k8s-maste" >> ~/.ssh/authorized_keys

安装containerd

apt install containerd
containerd config default | sudo tee /etc/containerd/config.toml >/dev/null 2>&1
sudo vi /etc/containerd/config.toml

scp /etc/containerd/config.toml dayi-cloud-k8s-node1:/etc/containerd/config.toml
scp /etc/containerd/config.toml dayi-cloud-k8s-node2:/etc/containerd/config.toml

sudo systemctl restart containerd
sudo systemctl enable containerd

安装K8S仓库


sudo apt install gnupg gnupg2 curl software-properties-common -y
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo gpg --dearmour -o /etc/apt/trusted.gpg.d/cgoogle.gpg
apt-add-repository "deb http://apt.kubernetes.io/ kubernetes-xenial main"

安装K8S

apt update
apt install kubelet kubeadm kubectl -y
apt-mark hold kubelet kubeadm kubectl

初始化集群:

kubeadm init --image-repository registry.aliyuncs.com/google_containers --pod-network-cidr=10.234.0.0/16 --control-plane-endpoint=dayi-cloud-k8s-master --v=5

虚拟化K8S安装

# 设置主机名
hostnamectl set-hostname m1
hostnamectl set-hostname node1
hostnamectl set-hostname node2
、
===== 192.168.0.202 node2

# 使用scp复制/etc/hosts到node1和node2
scp /etc/hosts root@192.168.0.201:/etc/hosts
scp /etc/hosts root@192.168.0.202:/etc/hosts

# 关闭swap并配置内核参数
swapoff -a
sed -i 's/.*swap.*/#&/' /etc/fstab
echo -e 'net.bridge.bridge-nf-call-iptables = 1 \nnet.bridge.bridge-nf-call-ip6tables = 1' >> /etc/sysctl.conf

# 安装基本软件包
yum -y install wget ntpdate

# 配置时间同步
ntpdate ntp1.aliyun.com
crontab -e
# 添加以下内容
*/1 * * * * /usr/sbin/ntpdate ntp1.aliyun.com
systemctl restart crond.service
reboot

# 配置yum源及下载Kubernetes和Docker相关软件包
cd /etc/yum.repos.d
rm -f CentOS-*
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
wget -P /etc/yum.repos.d/ http://mirrors.aliyun.com/repo/epel-7.repo
vi /etc/yum.repos.d/kubernetes.repo
# 添加kubernetes源的配置信息
wget https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
rpm -import rpm-package-key.gpg
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum clean all
yum makecache fast
cd

# 安装docker-ce并启动服务
yum -y install docker-ce
systemctl start docker
systemctl enable docker

# 配置Docker的镜像加速器
vi /etc/docker/daemon.json
# 添加以下内容
{
    "registry-mirrors": ["https://x3nqjrcg.mirror.aliyuncs.com"]
}

# Docker降级为20.10
yum downgrade --setopt=obsoletes=0 -y docker-ce-20.10.24 docker-ce-selinux-20.10.24 containerd.io

# 修改Docker Cgroup Driver为systemd
vi /etc/docker/daemon.json
# 添加以下内容
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "registry-mirrors": ["https://x3nqjrcg.mirror.aliyuncs.com"]
}

# 重启docker服务
systemctl daemon-reload
systemctl restart docker

# 安装kubeadm、kubectl、kubelet
yum install -y kubelet-1.23.17 kubeadm-1.23.17 kubectl-1.23.17
systemctl start kubelet
systemctl enable kubelet

# 下载Kubernetes镜像
kubeadm config images list --kubernetes-version v1.23.17
kubeadm config images pull --image-repository=registry.aliyuncs.com/google_containers

# 初始化Kubernetes集群
kubeadm init \
--kubernetes-version=v1.23.17 \
--pod-network-cidr=10.244.0.0/16 \
--service-cidr=10.96.0.0/12 \
--apiserver-advertise-address=192.168.8.10 \
--image-repository=registry.aliyuncs.com/google_containers 

# 配置环境变量
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

# 安装Flannel网络
wget https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
kubectl apply -f kube-flannel.yml

# 将节点加入到集群
kubeadm join 192.168.8.10:6443 --token [token] --discovery-token-ca-cert-hash [hash]

# 查看集群状态
kubectl get nodes

Debian安装

K8S2

哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈哈

换CENTOS

换大牛

Rencher

真男人直接下载:


修改主机名:

ssh-keygen -t ecdsa -b 256
ssh-copy-id node1
ssh-copy-id node2

复制密钥:

ssh-copy-id node2

复制hosts

scp /etc/hosts root@node1:/etc/hosts 
scp /etc/hosts root@node2:/etc/hosts 

修改内核配置

/etc/sysctl.conf

vi:

echo -e 'net.bridge.bridge-nf-call-iptables = 1 \nnet.bridge.bridge-nf-call-ip6tables = 1' >> /etc/sysctl.conf

开启模块

modprobe br_netfilter
ls /proc/sys/net/bridge/
echo "br_netfilter" > /etc/modules-load.d/br_netfilter.conf
sysctl -p

发送到从节点:

scp /etc/sysctl.conf root@node1:/etc/sysctl.conf
scp /etc/sysctl.conf root@node2:/etc/sysctl.conf 

#从节点执行:
modprobe br_netfilter
ls /proc/sys/net/bridge/
echo "br_netfilter" > /etc/modules-load.d/br_netfilter.conf
sysctl -p

防火墙 SELINUX:

systemctl stop firewalld
systemctl disable firewalld
systemctl status firewalld



setenforce 0
vi /etc/selinux/config
#修改: SELINUX=disabled

reboot

发送到从节点

scp /etc/selinux/config root@node1:/etc/selinux/config
scp /etc/selinux/config root@node2:/etc/selinux/config

查看SELINUX状态

sestatus

NTP时间同步

yum -y install wget ntpdate


crontab -e
*/1 * * * * /usr/sbin/ntpdate ntp1.aliyun.com

systemctl restart crond.service

添加源


vi /etc/yum.repos.d/kubernetes.repo

[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg

安装

yum downgrade --setopt=obsoletes=0 -y docker-ce-20.10.24 docker-ce-selinux-20.10.24 containerd.io


yum install -y kubelet-1.23.17 kubeadm-1.23.17 kubectl-1.23.17
systemctl start kubelet
systemctl enable kubelet
systemctl status kubelet
kubeadm config images list --kubernetes-version v1.23.17

初始化

mv /etc/containerd/config.toml /root/config.toml.bak
systemctl restart containerd

/etc/kubernetes/manifests/kube-scheduler.yaml

rm -rf /etc/kubernetes/manifests/kube-apiserver.yaml
rm -rf /etc/kubernetes/manifests/kube-controller-manager.yaml
rm -rf /etc/kubernetes/manifests/kube-scheduler.yaml
rm -rf /etc/kubernetes/manifests/etcd.yaml

kubeadm init \
--pod-network-cidr=10.244.0.0/16 \
--service-cidr=10.96.0.0/12 \
--apiserver-advertise-address=192.168.59.137 \
--image-repository=registry.aliyuncs.com/google_containers
--v=5

rm -f /etc/kubernetes/controller-manager.conf
rm -f /etc/kubernetes/scheduler.conf
rm -f /etc/kubernetes/scheduler.conf
rm -f /etc/kubernetes/admin.conf
rm -f /etc/kubernetes/kubelet.conf

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

kubeadm join 192.168.59.137:6443 --token y8m7u7.3ur44mc2ezb87fmq \
        --discovery-token-ca-cert-hash sha256:cb9d1bfa0c3adaabd9a34cd072e1d27c13c9cf9e6e3ef4a0c810b84d6fdf4c9f

节点:

节点:

状态:

云与虚拟化_实验12 Rancher平台部署应用

实验要求

掌握 Rancher 平台部署。

前置准备

要求实验主机能够连接外网,已经正确安装 Docker,并关闭防火墙和 selinux。

实验步骤

这个东西蛮大的。

docker pull rancher/rancher:stable
docker run -d --restart=unless-stopped --privileged -p 8443:443 rancher/rancher:stable

FRP:

FRP:

进入:

加入:

节点添加:

步骤1:拉取 Rancher 镜像。

[root@localhost ~]# docker pull rancher/server:stable

步骤2:利用 Rancher 镜像生成 Rancher 容器。

[root@localhost ~]# docker run -d --restart=unless-stopped -p 8080:8080 rancher/server:stable

主机上复制 Rancher 脚本代码。

[root@localhost ~]# sudo docker run --rm --privileged -v /var/run/docker.sock:/var/run/docker.sock -v /var/lib/rancher:/var/lib/rancher rancher/agent:v1.2.11 http://192.168.5.100:8080/v1/scripts/33363BDEA2105F76C889:1577750400000:7pDzaq5dOUcHCFdLqfZ2lyvGex0

步骤11:添加主机成功。移动窗口底部,点击“创建”按钮。

不过..

更新也容易卡住..

好像不是很稳定的样子..