Bug 2133139

Summary: [OVN]if only set one port-mapping for one of LB backends,there will only generate groups for this backends in "ovs-ofctl dump-groups br-int"
Product: Red Hat Enterprise Linux Fast Datapath Reporter: ying xu <yinxu>
Component: OVNAssignee: OVN Team <ovnteam>
Status: NEW --- QA Contact: ying xu <yinxu>
Severity: medium Docs Contact:
Priority: medium    
Version: FDP 22.ICC: ctrautma, dceara, jiji, nusiddiq
Target Milestone: ---Flags: dceara: needinfo? (nusiddiq)
Target Release: ---   
Hardware: Unspecified   
OS: Unspecified   
Whiteboard:
Fixed In Version: Doc Type: If docs needed, set a value
Doc Text:
Story Points: ---
Clone Of: Environment:
Last Closed: Type: Bug
Regression: --- Mount Type: ---
Documentation: --- CRM:
Verified Versions: Category: ---
oVirt Team: --- RHEL 7.3 requirements from Atomic Host:
Cloudforms Team: --- Target Upstream Version:
Embargoed:

Description ying xu 2022-10-08 04:25:07 UTC
Description of problem:
if only set one port-mapping for one of LB backends,there will only generate groups for this backends in "ovs-ofctl dump-groups br-int"

Version-Release number of selected component (if applicable):
# rpm -qa|grep ovn
ovn22.03-22.03.0-101.el8fdp.x86_64
ovn22.03-central-22.03.0-101.el8fdp.x86_64
ovn22.03-host-22.03.0-101.el8fdp.x86_64

How reproducible:
always

Steps to Reproduce:
server:
ip netns add client0
ip link add veth0_c0_p type veth peer name veth0_c0 netns client0
ip link set veth0_c0_p up
ovs-vsctl add-port br-provider veth0_c0_p
ip netns exec client0 ip link set lo up
ip netns exec client0 ip link set veth0_c0 up
ip netns exec client0 ip addr add 172.16.1.1/24 dev veth0_c0
ip netns exec client0 ip addr add 2002::1/64 dev veth0_c0

ovn-nbctl ls-add ls1
ovn-nbctl lsp-add ls1 ls1p1
ovn-nbctl lsp-set-addresses ls1p1 "00:01:02:01:01:01 192.168.0.1"
ovn-nbctl lsp-set-addresses ls1p1 00:01:02:01:01:01
ovn-nbctl lsp-add ls1 ls1p2
ovn-nbctl lsp-set-addresses ls1p2 "00:01:02:01:01:02 192.168.0.2"
ovn-nbctl lsp-set-addresses ls1p2 00:01:02:01:01:02

ovn-nbctl lsp-add ls1 ls1p3
ovn-nbctl lsp-set-addresses ls1p3 00:01:02:01:01:04

ovn-nbctl ls-add ls2
ovn-nbctl lsp-add ls2 ls2p1
ovn-nbctl lsp-set-addresses ls2p1 00:01:02:01:01:03
ovs-vsctl add-port br-int vm1 -- set interface vm1 type=internal
ip netns add server0
ip link set vm1 netns server0
ip netns exec server0 ip link set lo up
ip netns exec server0 ip link set vm1 up
ip netns exec server0 ip link set vm1 address 00:01:02:01:01:01
ip netns exec server0 ip addr add 192.168.0.1/24 dev vm1
ip netns exec server0 ip addr add 3001::1/64 dev vm1
ip netns exec server0 ip route add default via 192.168.0.254 dev vm1
ip netns exec server0 ip -6 route add default via 3001::a dev vm1
ovs-vsctl set Interface vm1 external_ids:iface-id=ls1p1

ovs-vsctl add-port br-int vm2 -- set interface vm2 type=internal
ip netns add server1
ip link set vm2 netns server1
ip netns exec server1 ip link set lo up
ip netns exec server1 ip link set vm2 up
ip netns exec server1 ip link set vm2 address 00:01:02:01:01:02
ip netns exec server1 ip addr add 192.168.0.2/24 dev vm2
ip netns exec server1 ip addr add 3001::2/64 dev vm2
ip netns exec server1 ip route add default via 192.168.0.254 dev vm2
ip netns exec server1 ip -6 route add default via 3001::a dev vm2
ovs-vsctl set Interface vm2 external_ids:iface-id=ls1p2

ovn-nbctl lr-add lr1
ovn-nbctl lrp-add lr1 lr1ls1 00:01:02:0d:01:01 192.168.0.254/24 3001::a/64
ovn-nbctl lrp-add lr1 lr1ls2 00:01:02:0d:01:02 192.168.1.254/24 3001:1::a/64
ovn-nbctl lsp-add ls1 ls1lr1
ovn-nbctl lsp-set-type ls1lr1 router
ovn-nbctl lsp-set-options ls1lr1 router-port=lr1ls1
ovn-nbctl lsp-set-addresses ls1lr1 "00:01:02:0d:01:01 192.168.0.254 3001::a"
ovn-nbctl lsp-add ls2 ls2lr1
ovn-nbctl lsp-set-type ls2lr1 router
ovn-nbctl lsp-set-options ls2lr1 router-port=lr1ls2
ovn-nbctl lsp-set-addresses ls2lr1 "00:01:02:0d:01:02 192.168.1.254 3001:1::a"

ovn-nbctl lrp-add lr1 lr1p 00:01:02:0d:0f:01 172.16.1.254/24 2002::a/64
 #add udp lb on sw
ovn-nbctl lb-add lb0 192.168.2.1:12345 192.168.0.1:12345,192.168.0.2:12345
ovn-nbctl --may-exist lb-add lb0 192.168.2.1:12345 192.168.0.1:12345,192.168.0.2:12345
ovn-nbctl lb-add lb0 [3000::100]:12345 [3001::1]:12345,[3001::2]:12345
ovn-nbctl --may-exist lb-add lb0 [3000::100]:12345 [3001::1]:12345,[3001::2]:12345
 uuid=`ovn-nbctl list Load_Balancer |grep uuid|awk '{printf $3}'`
ovn-nbctl set Load_Balancer $uuid protocol=udp
ovn-nbctl show
ovn-sbctl show
 ovn-nbctl set  Logical_Router lr1 options:chassis="hv1"

 #create load balance check
 uuid3=`ovn-nbctl --id=@hc create Load_Balancer_Health_Check vip="192.168.2.1\:12345" -- add Load_Balancer $uuid health_check @hc`
 ovn-nbctl set Load_Balancer_Health_Check $uuid3 options:interval=5 options:timeout=20 options:success_count=3 options:failure_count=3
 ovn-nbctl --wait=sb set load_balancer $uuid ip_port_mappings:192.168.0.1=ls1p1:192.168.0.254



ovn-nbctl --wait=hv set NB_Global . options:use_logical_dp_groups=true

ovn-nbctl lr-lb-add lr1 lb0
ovn-nbctl --may-exist lr-lb-add lr1 lb0
ovn-nbctl lr-lb-list lr1



client:
ovs-vsctl add-port br-int vm3 -- set interface vm3 type=internal
ip netns add vm3
ip link set vm3 netns vm3
ip netns exec vm3 ip link set lo up
ip netns exec vm3 ip link set vm3 up
ip netns exec vm3 ip link set vm3 address 00:01:02:01:01:03
ip netns exec vm3 ip addr add 192.168.1.1/24 dev vm3
ip netns exec vm3 ip addr add 3001:1::1/64 dev vm3
ip netns exec vm3 ip route add default via 192.168.1.254 dev vm3
ip netns exec vm3 ip -6 route add default via 3001:1::a dev vm3
ovs-vsctl set Interface vm3 external_ids:iface-id=ls2p1
ovs-vsctl add-port br-int vm4 -- set interface vm4 type=internal
ip netns add vm4
ip link set vm4 netns vm4
ip netns exec vm4 ip link set lo up
ip netns exec vm4 ip link set vm4 up
ip netns exec vm4 ip link set vm4 address 00:01:02:01:01:04
ip netns exec vm4 ip addr add 192.168.0.4/24 dev vm4
ip netns exec vm4 ip addr add 3001::4/64 dev vm4
ip netns exec vm4 ip route add default via 192.168.0.254 dev vm4
ip netns exec vm4 ip -6 route add default via 3001::a dev vm4
ovs-vsctl set Interface vm4 external_ids:iface-id=ls1p3



set udp server on the server system:
ip netns exec server0 nc -l 12345 --udp -k --sh-exec ls&
ip netns exec server1 nc -l 12345 --udp -k --sh-exec ls&

ip netns exec server0 tcpdump -i any -U -w lr0.pcap&
ip netns exec server1 tcpdump -i any -U -w lr1.pcap&

on client:
                for i in `seq 10`;do
                        rlRun "ip netns exec vm4 nc --udp 192.168.2.1 12345 < /tmp/send.pkt"
                        sleep 1
                        rlRun "ip netns exec vm4 nc --udp 3000::100 12345 < /tmp/send.pkt"
                        sleep 1
                done

Actual results:
tcpdump on server0 and server1,then check the packets:
:: [ 23:44:29 ] :: [  BEGIN   ] :: Running 'tcpdump -r lr0.pcap -nn -v|grep 192.168.1.1.*192.168.0.1.12345'
reading from file lr0.pcap, link-type LINUX_SLL (Linux cooked v1)
dropped privs to tcpdump
    192.168.1.1.40831 > 192.168.0.1.12345: UDP, length 48
    192.168.1.1.40863 > 192.168.0.1.12345: UDP, length 48
    192.168.1.1.36508 > 192.168.0.1.12345: UDP, length 48
    192.168.1.1.33864 > 192.168.0.1.12345: UDP, length 48
    192.168.1.1.48278 > 192.168.0.1.12345: UDP, length 48
    192.168.1.1.41628 > 192.168.0.1.12345: UDP, length 48
    192.168.1.1.37264 > 192.168.0.1.12345: UDP, length 48
    192.168.1.1.46535 > 192.168.0.1.12345: UDP, length 48
    192.168.1.1.43652 > 192.168.0.1.12345: UDP, length 48
    192.168.1.1.48231 > 192.168.0.1.12345: UDP, length 48
:: [ 23:44:29 ] :: [   PASS   ] :: Command 'tcpdump -r lr0.pcap -nn -v|grep 192.168.1.1.*192.168.0.1.12345' (Expected 0, got 0)
:: [ 23:44:29 ] :: [  BEGIN   ] :: Running 'tcpdump -r lr1.pcap -nn -v|grep 192.168.1.1.*192.168.0.2.12345'
reading from file lr1.pcap, link-type LINUX_SLL (Linux cooked v1)
dropped privs to tcpdump
:: [ 23:44:29 ] :: [   FAIL   ] :: Command 'tcpdump -r lr1.pcap -nn -v|grep 192.168.1.1.*192.168.0.2.12345' (Expected 0, got 1)


Expected results:
on the before version:all backends can recieve the packets.

Additional info:
ovs-ofctl dump-groups br-int: only generate 192.168.0.1 in group
NXST_GROUP_DESC reply (xid=0x2):
 group_id=1,type=select,selection_method=dp_hash,bucket=bucket_id:0,weight:100,actions=ct(commit,table=15,zone=NXM_NX_REG11[0..15],nat(dst=192.168.0.1:12345),exec(load:0x1->NXM_NX_CT_MARK[1]))

but on the before version,both 192.168.0.1 and 192.168.0.2

the same issue in 22.06-59

Comment 1 Dumitru Ceara 2022-10-18 12:10:59 UTC
This behavior change was introduced with [0].

However, I'm not sure it's worth changing it back.  I don't think it makes a lot of sense to create a load balancer which enables healthcheck only for some of its backends.  Numan, what do you think?


[0] https://github.com/ovn-org/ovn/commit/7341c10f88#diff-97e16400e2bcbb4b65f7f3b8f2c05e9e8e56148df77719b71d60f235e3bcc0edR3781