When one or more 'external' ports are bound to a chassis, N/S routing (or E/W with a router which has a gateway port) won't work as expected receiving duplicated traffic: Example from SR-IOV machine (OVN port of type 'external') on Logical Switch 1 pinging another [root@localhost ~]# ping 192.168.2.147 -c4 PING 192.168.2.147 (192.168.2.147) 56(84) bytes of data. 64 bytes from 192.168.2.147: icmp_seq=1 ttl=63 time=0.859 ms 64 bytes from 192.168.2.147: icmp_seq=1 ttl=63 time=0.887 ms (DUP!) 64 bytes from 192.168.2.147: icmp_seq=1 ttl=63 time=0.902 ms (DUP!) 64 bytes from 192.168.2.147: icmp_seq=1 ttl=63 time=0.913 ms (DUP!) 64 bytes from 192.168.2.147: icmp_seq=1 ttl=63 time=1.75 ms (DUP!) 64 bytes from 192.168.2.147: icmp_seq=1 ttl=63 time=1.76 ms (DUP!) 64 bytes from 192.168.2.147: icmp_seq=1 ttl=63 time=1.76 ms (DUP!) 64 bytes from 192.168.2.147: icmp_seq=2 ttl=63 time=0.726 ms 64 bytes from 192.168.2.147: icmp_seq=3 ttl=63 time=0.212 ms 64 bytes from 192.168.2.147: icmp_seq=4 ttl=63 time=0.284 ms --- 192.168.2.147 ping statistics --- 4 packets transmitted, 4 received, +6 duplicates, 0% packet loss, time 8ms rtt min/avg/max/mdev = 0.212/1.005/1.760/0.545 ms If the machine has a floating IP and I ping its FIP from an external host, we can see the same thing: (overcloud) [stack@undercloud-0 ~]$ ping 10.46.21.217 -c4 PING 10.46.21.217 (10.46.21.217) 56(84) bytes of data. 64 bytes from 10.46.21.217: icmp_seq=1 ttl=61 time=1.47 ms 64 bytes from 10.46.21.217: icmp_seq=1 ttl=61 time=1.49 ms (DUP!) 64 bytes from 10.46.21.217: icmp_seq=1 ttl=61 time=1.64 ms (DUP!) 64 bytes from 10.46.21.217: icmp_seq=1 ttl=61 time=1.71 ms (DUP!) 64 bytes from 10.46.21.217: icmp_seq=1 ttl=61 time=1.72 ms (DUP!) 64 bytes from 10.46.21.217: icmp_seq=2 ttl=61 time=0.774 ms 64 bytes from 10.46.21.217: icmp_seq=2 ttl=61 time=0.805 ms (DUP!) 64 bytes from 10.46.21.217: icmp_seq=2 ttl=61 time=0.808 ms (DUP!) 64 bytes from 10.46.21.217: icmp_seq=2 ttl=61 time=0.811 ms (DUP!) 64 bytes from 10.46.21.217: icmp_seq=2 ttl=61 time=0.813 ms (DUP!) 64 bytes from 10.46.21.217: icmp_seq=3 ttl=61 time=0.488 ms 64 bytes from 10.46.21.217: icmp_seq=3 ttl=61 time=0.511 ms (DUP!) 64 bytes from 10.46.21.217: icmp_seq=3 ttl=61 time=0.516 ms (DUP!) 64 bytes from 10.46.21.217: icmp_seq=3 ttl=61 time=0.526 ms (DUP!) 64 bytes from 10.46.21.217: icmp_seq=3 ttl=61 time=0.530 ms (DUP!) 64 bytes from 10.46.21.217: icmp_seq=4 ttl=61 time=0.751 ms --- 10.46.21.217 ping statistics --- 4 packets transmitted, 4 received, +12 duplicates, 0% packet loss, time 73ms NAT entry: _uuid : 77c51435-8fdb-479e-96c1-32a699d949ba external_ids : {"neutron:fip_id"="b35f9c44-ef17-4a15-b12d-60aa6c46dcb8", "neutron:fip_port_id"="a7a8c3a1-7b04-43e8-8806-d6249152046e", "neutron:revision_number"="6", "neutron:router_name"=neutron-9428ae73-26ea-4791-9052-2343c17bac74} external_ip : "10.46.21.217" external_mac : "fa:16:3e:94:ba:64" logical_ip : "192.168.1.155" logical_port : "a7a8c3a1-7b04-43e8-8806-d6249152046e" options : {} type : dnat_and_snat Logical port: ()[root@controller-0 /]# ovn-nbctl list logical_switch_port a7a8c3a1-7b04-43e8-8806-d6249152046e _uuid : d9166867-4693-4a82-9627-be36ca8fdf19 addresses : ["f8:f2:1e:15:a8:86 192.168.1.155"] dhcpv4_options : 2a87fe7c-1306-48de-a469-32d5a2b11d22 dhcpv6_options : [] dynamic_addresses : [] enabled : true external_ids : {"neutron:cidrs"="192.168.1.155/24", "neutron:device_id"="6f2c56a8-a6f2-4fac-8854-89c29e61fc47", "neutron:device_owner"="compute:nova", "neutron:network_name"=neutron-15881ab0-811e-4b9a-b42e-5d303c8a8590, "neutron:port_fip"="10.46.21.217", "neutron:port_name"=sriov-port-tenant-1, "neutron:project_id"=cf1cccae7da14b4086ee62f55a76e61e, "neutron:revision_number"="3", "neutron:security_group_ids"="f18b8db5-3019-4473-8d16-7f82f4847fe6"} ha_chassis_group : d3e65462-ba47-4842-b021-f05397faa2d8 name : "a7a8c3a1-7b04-43e8-8806-d6249152046e" options : {} parent_name : [] port_security : ["f8:f2:1e:15:a8:86 192.168.1.155"] tag : [] tag_request : [] type : external ******* external port ******** up : true Port_Binding: _uuid : 157ca3b8-6d19-440e-a3fe-a3f66af037d0 chassis : 4ae1fd78-a9d5-4b0b-aeab-5d07ee297c8c datapath : 1c0e6422-5a39-4ea5-a95e-d4ee445ea1be encap : [] external_ids : {name=sriov-port-tenant-1, "neutron:cidrs"="192.168.1.155/24", "neutron:device_id"="6f2c56a8-a6f2-4fac-8854-89c29e61fc47", "neutron:device_owner"="compute:nova", "neutron:network_name"=neutron-15881ab0-811e-4b9a-b42e-5d303c8a8590, "neutron:port_fip"="10.46.21.217", "neutron:port_name"=sriov-port-tenant-1, "neutron:project_id"=cf1cccae7da14b4086ee62f55a76e61e, "neutron:revision_number"="3", "neutron:security_group_ids"="f18b8db5-3019-4473-8d16-7f82f4847fe6"} gateway_chassis : [] ha_chassis_group : 01963956-6990-4438-8269-685f71c7e1ab logical_port : "a7a8c3a1-7b04-43e8-8806-d6249152046e" mac : ["f8:f2:1e:15:a8:86 192.168.1.155"] nat_addresses : [] options : {} parent_port : [] tag : [] tunnel_key : 4 type : external virtual_parent : [] --- ()[root@controller-0 /]# ovn-sbctl show Chassis "fc062427-7332-43f7-b61b-eb00f9f5ba98" hostname: computesriov-1.localdomain Encap geneve ip: "10.20.2.62" options: {csum="true"} Port_Binding "75a36288-df94-4d79-960c-6360a901c436" Chassis "23f36188-5536-434c-ac1d-0c3dd92db54d" hostname: controller-1.localdomain Encap geneve ip: "10.20.2.66" options: {csum="true"} Port_Binding cr-lrp-06a377a5-2ba6-4778-b2f0-2a84f86b2030 Chassis "13969db2-fde1-42d0-8e6b-e709c9d07981" hostname: controller-2.localdomain Encap geneve ip: "10.20.2.70" options: {csum="true"} Chassis "479e0c95-8438-4a22-aa6f-68673be395bf" hostname: controller-0.localdomain Encap geneve ip: "10.20.2.67" options: {csum="true"} Port_Binding "64aa14e1-8bae-4c3e-a2c5-0d8167346446" Port_Binding "a7a8c3a1-7b04-43e8-8806-d6249152046e" Chassis "369294de-5897-49c6-94b8-710cd8ae68dd" hostname: computesriov-0.localdomain Encap geneve ip: "10.20.2.73" options: {csum="true"} Port_Binding "78b29a5f-0c2c-4075-8658-b279a658dcdf" Router port cr-lrp-06a377a5-2ba6-4778-b2f0-2a84f86b2030 is on 'controller-1' while the SRIOV machine (OVN external port) is on 'controller-0'. If I ping from controller-1 (L2 on the provider network) to the VM FIP, I see DUPs but also packets coming from the fixed IP (without NAT): [root@controller-2 ~]# ping 10.46.21.217 -c2 PING 10.46.21.217 (10.46.21.217) 56(84) bytes of data. 64 bytes from 192.168.1.155: icmp_seq=2 ttl=63 time=1.23 ms 64 bytes from 10.46.21.217: icmp_seq=2 ttl=64 time=1.37 ms (DUP!) 64 bytes from 192.168.1.155: icmp_seq=2 ttl=63 time=1.38 ms (DUP!) 64 bytes from 192.168.1.155: icmp_seq=2 ttl=63 time=1.44 ms (DUP!) ^C --- 10.46.21.217 ping statistics --- 2 packets transmitted, 1 received, +3 duplicates, 50% packet loss, time 23ms FWIW, I have configured all nodes with the ovn-chassis-mac-mappings addresses as all the OVN networks are VLAN (not Geneve): [stack@undercloud-0 ~]$ cat servers.txt controller-2 ctlplane=192.168.25.18 controller-1 ctlplane=192.168.25.10 controller-0 ctlplane=192.168.25.17 computesriov-0 ctlplane=192.168.25.16 computesriov-1 ctlplane=192.168.25.13 [stack@undercloud-0 ~]$ for i in 10 17 16 13; do ssh heat-admin.25.$i "sudo ovs-vsctl get open . external_ids:ovn-chassis-mac-mappings"; done Warning: Permanently added '192.168.25.10' (ECDSA) to the list of known hosts. "datacentre:1e:02:ad:bb:aa:cc" Warning: Permanently added '192.168.25.17' (ECDSA) to the list of known hosts. "datacentre:1e:02:ad:dd:ff:ee" Warning: Permanently added '192.168.25.16' (ECDSA) to the list of known hosts. "datacentre:1e:02:ad:dd:aa:aa" Warning: Permanently added '192.168.25.13' (ECDSA) to the list of known hosts. "datacentre:1e:02:ad:bb:bb:bb" ** Traffic capture on the actual VM ** [root@localhost ~]# tcpdump -i ens4 -vvne -s0 icmp [70762.165513] device ens4 entered promiscuous mode tcpdump: listening on ens4, link-type EN10MB (Ethernet), capture size 262144 bytes 05:16:53.716310 1e:02:ad:dd:ff:ee > f8:f2:1e:15:a8:86, ethertype 802.1Q (0x8100), length 102: vlan 321, p 0, ethertype IPv4, (tos 0x0, ttl 63, id 20639, offset 0, flags [DF], proto ICMP (1), length 84) 10.46.21.195 > 192.168.1.155: ICMP echo request, id 38793, seq 1, length 64 05:16:53.716339 f8:f2:1e:15:a8:86 > fa:16:3e:87:97:4d, ethertype 802.1Q (0x8100), length 102: vlan 321, p 0, ethertype IPv4, (tos 0x0, ttl 64, id 18375, offset 0, flags [none], proto ICMP (1), length 84) 192.168.1.155 > 10.46.21.195: ICMP echo reply, id 38793, seq 1, length 64 All looks good, an ICMP echo request is received and we can see the outgoing reply. ** Traffic capture on controller-0, chassis hosting the external port ** [root@controller-0 ~]# tcpdump -i ens4 icmp -vvne -s0 tcpdump: listening on ens4, link-type EN10MB (Ethernet), capture size 262144 bytes 09:18:28.725594 fe:53:c1:d0:e5:67 > fa:16:3e:94:ba:64, ethertype 802.1Q (0x8100), length 102: vlan 319, p 0, ethertype IPv4, (tos 0x0, ttl 64, id 14141, offset 0, flags [DF], proto ICMP (1), length 84) 10.46.21.195 > 10.46.21.217: ICMP echo request, id 46834, seq 1, length 64 09:18:28.726381 f8:f2:1e:15:a8:86 > fa:16:3e:87:97:4d, ethertype 802.1Q (0x8100), length 102: vlan 321, p 0, ethertype IPv4, (tos 0x0, ttl 64, id 21943, offset 0, flags [none], proto ICMP (1), length 84) 192.168.1.155 > 10.46.21.195: ICMP echo reply, id 46834, seq 1, length 64 Echo request observed on vlan 319 (provider network, logical switch 'nova') and the same packet seen on vlan 321 (tenant network where the VM is on) ** Traffic capture on controller-1, chassis hosting the gateway port ** [root@controller-1 ~]# tcpdump -i ens4 -vvne icmp -s0 tcpdump: listening on ens4, link-type EN10MB (Ethernet), capture size 262144 bytes 09:24:02.573930 f8:f2:1e:15:a8:86 > fa:16:3e:87:97:4d, ethertype 802.1Q (0x8100), length 102: vlan 321, p 0, ethertype IPv4, (tos 0x0, ttl 64, id 32469, offset 0, flags [none], proto ICMP (1), length 84) 192.168.1.155 > 10.46.21.195: ICMP echo reply, id 9424, seq 1, length 64 09:24:14.183340 fe:53:c1:d0:e5:67 > fa:16:3e:94:ba:64, ethertype 802.1Q (0x8100), length 102: vlan 319, p 0, ethertype IPv4, (tos 0x0, ttl 64, id 41254, offset 0, flags [DF], proto ICMP (1), length 84) 10.46.21.195 > 10.46.21.217: ICMP echo request, id 10328, seq 1, length 64 Reply is observed but also the request *not always* can be seen. ** Traffic capture on controller-2, chassis where the ping is originated ** [root@controller-2 ~]# tcpdump -i any -vvne icmp -s0 tcpdump: listening on any, link-type LINUX_SLL (Linux cooked), capture size 262144 bytes 09:29:13.777350 Out fe:53:c1:d0:e5:67 ethertype IPv4 (0x0800), length 100: (tos 0x0, ttl 64, id 4960, offset 0, flags [DF], proto ICMP (1), length 84) 10.46.21.195 > 10.46.21.217: ICMP echo request, id 36058, seq 1, length 64 09:29:13.778267 P f8:f2:1e:15:a8:86 ethertype 802.1Q (0x8100), length 104: vlan 321, p 0, ethertype IPv4, (tos 0x0, ttl 64, id 24798, offset 0, flags [none], proto ICMP (1), length 84) 192.168.1.155 > 10.46.21.195: ICMP echo reply, id 36058, seq 1, length 64 09:29:13.778460 P fa:16:3e:94:ba:64 ethertype 802.1Q (0x8100), length 104: vlan 319, p 0, ethertype IPv4, (tos 0x0, ttl 63, id 24798, offset 0, flags [none], proto ICMP (1), length 84) 192.168.1.155 > 10.46.21.195: ICMP echo reply, id 36058, seq 1, length 64 09:29:13.778578 P fa:16:3e:94:ba:64 ethertype 802.1Q (0x8100), length 104: vlan 319, p 0, ethertype IPv4, (tos 0x0, ttl 64, id 24798, offset 0, flags [none], proto ICMP (1), length 84) 10.46.21.217 > 10.46.21.195: ICMP echo reply, id 36058, seq 1, length 64 09:29:13.778605 P f8:f2:1e:15:a8:86 ethertype 802.1Q (0x8100), length 104: vlan 321, p 0, ethertype IPv4, (tos 0x0, ttl 64, id 24798, offset 0, flags [none], proto ICMP (1), length 84) 192.168.1.155 > 10.46.21.195: ICMP echo reply, id 36058, seq 1, length 64 09:29:13.778639 In fa:16:3e:94:ba:64 ethertype IPv4 (0x0800), length 100: (tos 0x0, ttl 63, id 24798, offset 0, flags [none], proto ICMP (1), length 84) 192.168.1.155 > 10.46.21.195: ICMP echo reply, id 36058, seq 1, length 64 09:29:13.778737 P fa:16:3e:94:ba:64 ethertype 802.1Q (0x8100), length 104: vlan 319, p 0, ethertype IPv4, (tos 0x0, ttl 63, id 24798, offset 0, flags [none], proto ICMP (1), length 84) 192.168.1.155 > 10.46.21.195: ICMP echo reply, id 36058, seq 1, length 64 09:29:13.778737 In fa:16:3e:94:ba:64 ethertype IPv4 (0x0800), length 100: (tos 0x0, ttl 63, id 24798, offset 0, flags [none], proto ICMP (1), length 84) 192.168.1.155 > 10.46.21.195: ICMP echo reply, id 36058, seq 1, length 64 09:29:13.778771 In fa:16:3e:94:ba:64 ethertype IPv4 (0x0800), length 100: (tos 0x0, ttl 64, id 24798, offset 0, flags [none], proto ICMP (1), length 84) 10.46.21.217 > 10.46.21.195: ICMP echo reply, id 36058, seq 1, length 64 09:29:13.778784 In fa:16:3e:94:ba:64 ethertype IPv4 (0x0800), length 100: (tos 0x0, ttl 63, id 24798, offset 0, flags [none], proto ICMP (1), length 84) 192.168.1.155 > 10.46.21.195: ICMP echo reply, id 36058, seq 1, length 64 The expected result would be that the chassis hosting the external port could be able to handle the routing. The only way I've found for it to be working is through: 1) Colocating gateway and external ports Chassis "479e0c95-8438-4a22-aa6f-68673be395bf" hostname: controller-0.localdomain Encap geneve ip: "10.20.2.67" options: {csum="true"} Port_Binding cr-lrp-06a377a5-2ba6-4778-b2f0-2a84f86b2030 Port_Binding "64aa14e1-8bae-4c3e-a2c5-0d8167346446" Port_Binding "a7a8c3a1-7b04-43e8-8806-d6249152046e" 2) Configure the dnat_and_snat entries for non DVR: _uuid : 77c51435-8fdb-479e-96c1-32a699d949ba external_ids : {"neutron:fip_id"="b35f9c44-ef17-4a15-b12d-60aa6c46dcb8", "neutron:fip_port_id"="a7a8c3a1-7b04-43e8-8806-d6249152046e", "neutron:revision_number"="6", "neutron:router_name"=neutron-9428ae73-26ea-4791-9052-2343c17bac74} external_ip : "10.46.21.217" external_mac : [] logical_ip : "192.168.1.155" logical_port : [] options : {} type : dnat_and_snat 3) Clearing up the chassis mac mappings in the chassis hosting the external and gw port: [root@controller-0 ~]# ovs-vsctl set open . external_ids:ovn-chassis-mac-mappings=\"\" [root@controller-0 ~]# Then ping works: [stack@undercloud-0 ~]$ ping 10.46.21.217 -c4 PING 10.46.21.217 (10.46.21.217) 56(84) bytes of data. 64 bytes from 10.46.21.217: icmp_seq=1 ttl=61 time=0.499 ms 64 bytes from 10.46.21.217: icmp_seq=2 ttl=61 time=0.645 ms 64 bytes from 10.46.21.217: icmp_seq=3 ttl=61 time=0.364 ms 64 bytes from 10.46.21.217: icmp_seq=4 ttl=61 time=0.405 ms [root@controller-2 ~]# ping 10.46.21.217 -c4 PING 10.46.21.217 (10.46.21.217) 56(84) bytes of data. 64 bytes from 10.46.21.217: icmp_seq=1 ttl=63 time=0.472 ms 64 bytes from 10.46.21.217: icmp_seq=2 ttl=63 time=0.616 ms 64 bytes from 10.46.21.217: icmp_seq=3 ttl=63 time=0.376 ms 64 bytes from 10.46.21.217: icmp_seq=4 ttl=63 time=0.363 ms --- 10.46.21.217 ping statistics --- 4 packets transmitted, 4 received, 0% packet loss, time 62ms rtt min/avg/max/mdev = 0.363/0.456/0.616/0.104 ms But this is undesired as we cannot easily guarantee colocation and zero dataplane disruption from the CMS (OpenStack in this case).
Submitted v2 of the patch - https://patchwork.ozlabs.org/project/openvswitch/patch/20200709091156.773306-1-numans@ovn.org/
Submitted v3 of the patch with a different approach to fix this issue - https://patchwork.ozlabs.org/project/openvswitch/patch/20200819074100.2682036-1-numans@ovn.org/
tested with following steps: #server systemctl start openvswitch systemctl start ovn-northd ovn-nbctl set-connection ptcp:6641 ovn-sbctl set-connection ptcp:6642 ovs-vsctl set open . external_ids:system-id=hv1 external_ids:ovn-remote=tcp:20.0.31.25:6642 external_ids:ovn-encap-type=geneve external_ids:ovn-encap-ip=20.0.31.25 systemctl restart ovn-controller ovn-nbctl ls-add ls1 ovn-nbctl lsp-add ls1 ls1p1 ovn-nbctl lsp-set-addresses ls1p1 "00:00:01:01:01:01 192.168.200.1 2001:200::1" ovn-nbctl lsp-add ls1 ls1_ext1 ovn-nbctl lsp-set-addresses ls1_ext1 "00:00:01:01:22:01 192.168.200.22 2001:200::22" ovn-nbctl lsp-set-type ls1_ext1 external ovn-nbctl ha-chassis-group-add hagrp1 ovn-nbctl ha-chassis-group-add-chassis hagrp1 hv1 30 ovn-nbctl ha-chassis-group-add-chassis hagrp1 hv0 20 hagrp1_uuid=`ovn-nbctl --bare --columns _uuid find ha_chassis_group name="hagrp1"` ovn-nbctl --wait=hv set Logical_Switch_Port ls1_ext1 ha-chassis-group=$hagrp1_uuid ovs-vsctl add-br br-phys ovs-vsctl set open . external-ids:ovn-bridge-mappings=phys:br-phys ovs-vsctl add-port br-phys ens5f1 ovn-nbctl lsp-add ls1 ln_port ovn-nbctl lsp-set-type ln_port localnet ovn-nbctl lsp-set-options ln_port network_name=phys ovn-nbctl lsp-set-addresses ln_port unknown dhcp_id=$(ovn-nbctl create DHCP_Options cidr=192.168.200.0/24 options="\"server_id\"=\"192.168.200.254\" \"server_mac\"=\"00:de:ad:ff:01:02\" \"lease_time\"=\"3600\" \"router\"=\"192.168.200.254\"") ovn-nbctl lsp-set-dhcpv4-options ls1p1 $dhcp_id ovn-nbctl lsp-set-dhcpv4-options ls1_ext1 $dhcp_id ovs-vsctl set open . external_ids:ovn-chassis-mac-mappings=phys:f0:00:00:00:01:02 ovs-vsctl add-port br-int ls1p1 -- set interface ls1p1 type=internal ip netns add ls1p1 ip link set ls1p1 netns ls1p1 ip netns exec ls1p1 ip link set ls1p1 address 00:00:01:01:01:01 ip netns exec ls1p1 ip link set ls1p1 up ovs-vsctl set interface ls1p1 external_ids:iface-id=ls1p1 ovs-vsctl add-port br-phys ls1_ext1 -- set interface ls1_ext1 type=internal ip netns add ls1_ext1 ip link set ls1_ext1 netns ls1_ext1 ip netns exec ls1_ext1 ip link set ls1_ext1 address 00:00:01:01:22:01 ip netns exec ls1_ext1 ip link set ls1_ext1 up ovs-vsctl set interface ls1_ext1 external_ids:iface-id=ls1_ext1 ovn-nbctl lr-add lr1 ovn-nbctl lrp-add lr1 lr1-ls1 00:de:ad:ff:01:02 192.168.200.254/24 ovn-nbctl lsp-add ls1 ls1-lr1 ovn-nbctl lsp-set-type ls1-lr1 router ovn-nbctl lsp-set-options ls1-lr1 router-port=lr1-ls1 ovn-nbctl lsp-set-addresses ls1-lr1 00:de:ad:ff:01:02 ovn-nbctl ls-add ls2 ovn-nbctl lsp-add ls2 ls2p1 ovn-nbctl lsp-set-addresses ls2p1 "00:00:02:01:01:01 192.168.202.1 2001:202::1" ovn-nbctl lrp-add lr1 lr1-ls2 00:de:ad:ff:02:02 192.168.202.254/24 ovn-nbctl lsp-add ls2 ls2-lr1 ovn-nbctl lsp-set-type ls2-lr1 router ovn-nbctl lsp-set-options ls2-lr1 router-port=lr1-ls2 ovn-nbctl lsp-set-addresses ls2-lr1 00:de:ad:ff:02:02 ovs-vsctl add-port br-int ls2p1 -- set interface ls2p1 type=internal ip netns add ls2p1 ip link set ls2p1 netns ls2p1 ip netns exec ls2p1 ip link set ls2p1 address 00:00:02:01:01:01 ip netns exec ls2p1 ip addr add 192.168.202.1/24 dev ls2p1 ip netns exec ls2p1 ip link set ls2p1 up ip netns exec ls2p1 ip route add default via 192.168.202.254 dev ls2p1 ovs-vsctl set interface ls2p1 external_ids:iface-id=ls2p1 #client systemctl start openvswitch ovs-vsctl set open . external_ids:system-id=hv0 external_ids:ovn-remote=tcp:20.0.31.25:6642 external_ids:ovn-encap-type=geneve external_ids:ovn-encap-ip=20.0.31.26 systemctl start ovn-controller ovs-vsctl add-br br-phys ovs-vsctl set open . external-ids:ovn-bridge-mappings=phys:br-phys ovs-vsctl set open . external_ids:ovn-chassis-mac-mappings=phys:f0:00:00:00:01:02 ovs-vsctl add-port br-phys p3p2 reproduced on ovn2.13-20.06.1-6.el8fdp.x86_64 [root@dell-per740-12 bz1829762]# rpm -qa | grep -E "openvswitch|ovn" ovn2.13-central-20.06.1-6.el8fdp.x86_64 openvswitch2.13-2.13.0-54.el8fdp.x86_64 kernel-kernel-networking-openvswitch-ovn-common-1.0-7.noarch python3-openvswitch2.13-2.13.0-54.el8fdp.x86_64 ovn2.13-20.06.1-6.el8fdp.x86_64 ovn2.13-host-20.06.1-6.el8fdp.x86_64 openvswitch-selinux-extra-policy-1.0-23.el8fdp.noarch # get ip for ls1_ext1 on server [root@dell-per740-12 bz1829762]# ip netns exec ls1_ext1 dhclient -v ls1_ext1 #ping ls2p1 on ls1_ext1 [root@dell-per740-12 bz1829762]# ip netns exec ls1_ext1 ping 192.168.202.1 -c 5 PING 192.168.202.1 (192.168.202.1) 56(84) bytes of data. 64 bytes from 192.168.202.1: icmp_seq=1 ttl=63 time=2.31 ms 64 bytes from 192.168.202.1: icmp_seq=1 ttl=63 time=2.53 ms (DUP!) 64 bytes from 192.168.202.1: icmp_seq=2 ttl=63 time=0.070 ms 64 bytes from 192.168.202.1: icmp_seq=2 ttl=63 time=0.315 ms (DUP!) 64 bytes from 192.168.202.1: icmp_seq=3 ttl=63 time=0.063 ms 64 bytes from 192.168.202.1: icmp_seq=3 ttl=63 time=0.311 ms (DUP!) 64 bytes from 192.168.202.1: icmp_seq=4 ttl=63 time=0.065 ms 64 bytes from 192.168.202.1: icmp_seq=4 ttl=63 time=0.276 ms (DUP!) 64 bytes from 192.168.202.1: icmp_seq=5 ttl=63 time=0.062 ms --- 192.168.202.1 ping statistics --- 5 packets transmitted, 5 received, +4 duplicates, 0% packet loss, time 118ms rtt min/avg/max/mdev = 0.062/0.666/2.531/0.944 ms <=== get DUP Verify on ovn2.13-20.06.2-1.el8fdp.x86_64: [root@dell-per740-12 bz1829762]# rpm -qa | grep -E "openvswitch|ovn" ovn2.13-host-20.06.2-1.el8fdp.x86_64 openvswitch2.13-2.13.0-54.el8fdp.x86_64 kernel-kernel-networking-openvswitch-ovn-common-1.0-7.noarch python3-openvswitch2.13-2.13.0-54.el8fdp.x86_64 ovn2.13-central-20.06.2-1.el8fdp.x86_64 openvswitch-selinux-extra-policy-1.0-23.el8fdp.noarch ovn2.13-20.06.2-1.el8fdp.x86_64 [root@dell-per740-12 bz1829762]# ip netns exec ls1_ext1 ping 192.168.202.1 -c 5 PING 192.168.202.1 (192.168.202.1) 56(84) bytes of data. 64 bytes from 192.168.202.1: icmp_seq=1 ttl=63 time=2.31 ms 64 bytes from 192.168.202.1: icmp_seq=2 ttl=63 time=0.073 ms 64 bytes from 192.168.202.1: icmp_seq=3 ttl=63 time=0.063 ms 64 bytes from 192.168.202.1: icmp_seq=4 ttl=63 time=0.064 ms 64 bytes from 192.168.202.1: icmp_seq=5 ttl=63 time=0.063 ms --- 192.168.202.1 ping statistics --- 5 packets transmitted, 5 received, 0% packet loss, time 88ms rtt min/avg/max/mdev = 0.063/0.514/2.310/0.898 ms <=== no DUP
also verified on rhel7 version: [root@dell-per740-42 bz1829762]# rpm -qa | grep -E "openvswitch|ovn" openvswitch2.13-2.13.0-45.el7fdp.x86_64 openvswitch-selinux-extra-policy-1.0-15.el7fdp.noarch kernel-kernel-networking-openvswitch-ovn-common-1.0-7.noarch ovn2.13-20.06.2-1.el7fdp.x86_64 ovn2.13-host-20.06.2-1.el7fdp.x86_64 ovn2.13-central-20.06.2-1.el7fdp.x86_64 [root@dell-per740-42 bz1829762]# ip netns exec ls1_ext1 ping 192.168.202.1 -c 5 PING 192.168.202.1 (192.168.202.1) 56(84) bytes of data. 64 bytes from 192.168.202.1: icmp_seq=1 ttl=63 time=1.97 ms 64 bytes from 192.168.202.1: icmp_seq=2 ttl=63 time=0.065 ms 64 bytes from 192.168.202.1: icmp_seq=3 ttl=63 time=0.053 ms 64 bytes from 192.168.202.1: icmp_seq=4 ttl=63 time=0.053 ms 64 bytes from 192.168.202.1: icmp_seq=5 ttl=63 time=0.053 ms --- 192.168.202.1 ping statistics --- 5 packets transmitted, 5 received, 0% packet loss, time 4005ms rtt min/avg/max/mdev = 0.053/0.439/1.972/0.766 ms
Since the problem described in this bug report should be resolved in a recent advisory, it has been closed with a resolution of ERRATA. For information on the advisory (ovn2.13 bug fix and enhancement update), and where to find the updated files, follow the link below. If the solution does not work for you, open a new bug report. https://access.redhat.com/errata/RHBA-2020:3769