Description of problem: [ovn 20.E]selection_fields for udp and sctp case doesn't work correctly Version-Release number of selected component (if applicable): # rpm -qa|grep ovn ovn2.13-central-2.13.0-34.el7fdp.x86_64 ovn2.13-2.13.0-34.el7fdp.x86_64 ovn2.13-host-2.13.0-34.el7fdp.x86_64 How reproducible: always Steps to Reproduce: server: rlRun "ovn-nbctl ls-add ls1" rlRun "ovn-nbctl lsp-add ls1 ls1p1" rlRun "ovn-nbctl lsp-set-addresses ls1p1 00:01:02:01:01:01" rlRun "ovn-nbctl lsp-add ls1 ls1p2" rlRun "ovn-nbctl lsp-set-addresses ls1p2 00:01:02:01:01:02" rlRun "ovn-nbctl lsp-add ls1 ls1p3" rlRun "ovn-nbctl lsp-set-addresses ls1p3 00:01:02:01:01:04" rlRun "ovn-nbctl ls-add ls2" rlRun "ovn-nbctl lsp-add ls2 ls2p1" rlRun "ovn-nbctl lsp-set-addresses ls2p1 00:01:02:01:01:03" rlRun "ovs-vsctl add-port br-int vm1 -- set interface vm1 type=internal" rlRun "ip netns add server0" rlRun "ip link set vm1 netns server0" rlRun "ip netns exec server0 ip link set lo up" rlRun "ip netns exec server0 ip link set vm1 up" rlRun "ip netns exec server0 ip link set vm1 address 00:01:02:01:01:01" rlRun "ip netns exec server0 ip addr add 192.168.0.1/24 dev vm1" rlRun "ip netns exec server0 ip addr add 3001::1/64 dev vm1" rlRun "ip netns exec server0 ip route add default via 192.168.0.254 dev vm1" rlRun "ip netns exec server0 ip -6 route add default via 3001::a dev vm1" rlRun "ovs-vsctl set Interface vm1 external_ids:iface-id=ls1p1" rlRun "ovs-vsctl add-port br-int vm2 -- set interface vm2 type=internal" rlRun "ip netns add server1" rlRun "ip link set vm2 netns server1" rlRun "ip netns exec server1 ip link set lo up" rlRun "ip netns exec server1 ip link set vm2 up" rlRun "ip netns exec server1 ip link set vm2 address 00:01:02:01:01:02" rlRun "ip netns exec server1 ip addr add 192.168.0.2/24 dev vm2" rlRun "ip netns exec server1 ip addr add 3001::2/64 dev vm2" rlRun "ip netns exec server1 ip route add default via 192.168.0.254 dev vm2" rlRun "ip netns exec server1 ip -6 route add default via 3001::a dev vm2" rlRun "ovs-vsctl set Interface vm2 external_ids:iface-id=ls1p2" rlRun "ovn-nbctl lr-add lr1" rlRun "ovn-nbctl lrp-add lr1 lr1ls1 00:01:02:0d:01:01 192.168.0.254/24 3001::a/64" rlRun "ovn-nbctl lrp-add lr1 lr1ls2 00:01:02:0d:01:02 192.168.1.254/24 3001:1::a/64" rlRun "ovn-nbctl lsp-add ls1 ls1lr1" rlRun "ovn-nbctl lsp-set-type ls1lr1 router" rlRun "ovn-nbctl lsp-set-options ls1lr1 router-port=lr1ls1" rlRun "ovn-nbctl lsp-set-addresses ls1lr1 \"00:01:02:0d:01:01 192.168.0.254 3001::a\"" rlRun "ovn-nbctl lsp-add ls2 ls2lr1" rlRun "ovn-nbctl lsp-set-type ls2lr1 router" rlRun "ovn-nbctl lsp-set-options ls2lr1 router-port=lr1ls2" rlRun "ovn-nbctl lsp-set-addresses ls2lr1 \"00:01:02:0d:01:02 192.168.1.254 3001:1::a\"" rlRun "ovn-nbctl lrp-add lr1 lr1p 00:01:02:0d:0f:01 172.16.1.254/24 2002::a/64" rlRun "ovn-nbctl lb-add lb0 192.168.2.1:12345 192.168.0.1:12345,192.168.0.2:12345" rlRun "ovn-nbctl lb-add lb0 [3000::100]:12345 [3001::1]:12345,[3001::2]:12345" uuid=`ovn-nbctl list Load_Balancer |grep uuid|awk '{printf $3}'` rlRun "ovn-nbctl set load_balancer $uuid selection_fields=\"ip_src,ip_dst\"" rlRun "ovn-nbctl show" rlRun "ovn-sbctl show" ovn-nbctl set Logical_Router lr1 options:chassis="hv1" rlRun "ovn-nbctl ls-lb-add ls1 lb0" rlRun "ovn-nbctl lb-add lb1 192.168.2.1:12345 192.168.0.1:12345,192.168.0.2:12345" rlRun "ovn-nbctl lb-add lb1 [3000::100]:12345 [3001::1]:12345,[3001::2]:12345" uuid1=`ovn-nbctl lb-list lb1|grep lb1|awk '{printf $1}' ` rlRun "ovn-nbctl set load_balancer $uuid1 protocol=\"udp\"" rlRun "ovn-nbctl set load_balancer $uuid1 selection_fields=\"ip_src,ip_dst,tp_src,tp_dst\"" rlRun "ovn-nbctl list load_balancer" rlRun "ovn-nbctl ls-lb-add ls1 lb1" client: ovs-vsctl add-port br-int vm4 -- set interface vm4 type=internal ip netns add vm4 ip link set vm4 netns vm4 ip netns exec vm4 ip link set lo up ip netns exec vm4 ip link set vm4 up ip netns exec vm4 ip link set vm4 address 00:01:02:01:01:04 ip netns exec vm4 ip addr add 192.168.0.4/24 dev vm4 ip netns exec vm4 ip addr add 3001::4/64 dev vm4 ip netns exec vm4 ip route add default via 192.168.0.254 dev vm4 ip netns exec vm4 ip -6 route add default via 3001::a dev vm4 ovs-vsctl set Interface vm4 external_ids:iface-id=ls1p3 # ovn-nbctl list load_balancer _uuid : 99e5fc15-4c50-48de-a76b-32da19713170 external_ids : {} health_check : [] ip_port_mappings : {} name : lb0 protocol : tcp selection_fields : [ip_dst, ip_src] vips : {"192.168.2.1:12345"="192.168.0.1:12345,192.168.0.2:12345", "[3000::100]:12345"="[3001::1]:12345,[3001::2]:12345"} _uuid : 6760b0f3-f598-43eb-b950-a664f152ab18 external_ids : {} health_check : [] ip_port_mappings : {} name : lb1 protocol : udp selection_fields : [ip_dst, ip_src, tp_dst, tp_src] vips : {"192.168.2.1:12345"="192.168.0.1:12345,192.168.0.2:12345", "[3000::100]:12345"="[3001::1]:12345,[3001::2]:12345"} # ovs-ofctl dump-groups br-int NXST_GROUP_DESC reply (xid=0x2): group_id=4,type=select,selection_method=hash,fields(ip_src,ip_dst),bucket=bucket_id:0,weight:100,actions=ct(commit,table=19,zone=NXM_NX_REG13[0..15],nat(dst=[3001::1]:12345)),bucket=bucket_id:1,weight:100,actions=ct(commit,table=19,zone=NXM_NX_REG13[0..15],nat(dst=[3001::2]:12345)) group_id=3,type=select,selection_method=hash,fields(ip_src,ip_dst),bucket=bucket_id:0,weight:100,actions=ct(commit,table=19,zone=NXM_NX_REG13[0..15],nat(dst=192.168.0.1:12345)),bucket=bucket_id:1,weight:100,actions=ct(commit,table=19,zone=NXM_NX_REG13[0..15],nat(dst=192.168.0.2:12345)) group_id=1,type=select,selection_method=hash,fields(ip_src,ip_dst,tcp_src,tcp_dst),bucket=bucket_id:0,weight:100,actions=ct(commit,table=19,zone=NXM_NX_REG13[0..15],nat(dst=192.168.0.1:12345)),bucket=bucket_id:1,weight:100,actions=ct(commit,table=19,zone=NXM_NX_REG13[0..15],nat(dst=192.168.0.2:12345))-----------------here should be udp_dst,udp_src,not tcp group_id=2,type=select,selection_method=hash,fields(ip_src,ip_dst,tcp_src,tcp_dst),bucket=bucket_id:0,weight:100,actions=ct(commit,table=19,zone=NXM_NX_REG13[0..15],nat(dst=[3001::1]:12345)),bucket=bucket_id:1,weight:100,actions=ct(commit,table=19,zone=NXM_NX_REG13[0..15],nat(dst=[3001::2]:12345)) from vm4 send udp traffic with different ports to the vip,only one backend can receive the traffic,just like the selection fields is ip_src,ip_dst.(But for tcp case, it works correctly) Expected results: group_id=1,type=select,selection_method=hash,fields(ip_src,ip_dst,tcp_src,tcp_dst) should be group_id=1,type=select,selection_method=hash,fields(ip_src,ip_dst,udp_src,udp_dst) send udp traffic with different ports to the vip,all backends can receive the traffic send udp traffic with the same port to the vip,only one backend can receive the traffic(this is OK now) Additional info: sctp case has the same issue.
Patch submitted for review - https://patchwork.ozlabs.org/project/openvswitch/patch/20200622174124.1271839-1-numans@ovn.org/
verified on verson: # rpm -qa|grep ovn ovn2.13-20.06.1-4.el8fdp.x86_64 ovn2.13-host-20.06.1-4.el8fdp.x86_64 ovn2.13-central-20.06.1-4.el8fdp.x86_64 when set selection_fields="ip_src,ip_dst,tp_src,tp_dst" for udp and sctp, group_id=6,type=select,selection_method=hash,fields(ip_src,ip_dst,sctp_src,sctp_dst),bucket=bucket_id:0,weight:100,actions=ct(commit,table=15,zone=NXM_NX_REG11[0..15],nat(dst=192.168.0.1:12345)),bucket=bucket_id:1,weight:100,actions=ct(commit,table=15,zone=NXM_NX_REG11[0..15],nat(dst=192.168.0.2:12345)) group_id=5,type=select,selection_method=hash,fields(ip_src,ip_dst,sctp_src,sctp_dst),bucket=bucket_id:0,weight:100,actions=ct(commit,table=15,zone=NXM_NX_REG11[0..15],nat(dst=[3001::1]:12345)),bucket=bucket_id:1,weight:100,actions=ct(commit,table=15,zone=NXM_NX_REG11[0..15],nat(dst=[3001::2]:12345)) group_id=2,type=select,selection_method=hash,fields(ip_src,ip_dst,tcp_src,tcp_dst),bucket=bucket_id:0,weight:100,actions=ct(commit,table=15,zone=NXM_NX_REG11[0..15],nat(dst=192.168.0.1:12345)),bucket=bucket_id:1,weight:100,actions=ct(commit,table=15,zone=NXM_NX_REG11[0..15],nat(dst=192.168.0.2:12345)) group_id=1,type=select,selection_method=hash,fields(ip_src,ip_dst,tcp_src,tcp_dst),bucket=bucket_id:0,weight:100,actions=ct(commit,table=15,zone=NXM_NX_REG11[0..15],nat(dst=[3001::1]:12345)),bucket=bucket_id:1,weight:100,actions=ct(commit,table=15,zone=NXM_NX_REG11[0..15],nat(dst=[3001::2]:12345)) group_id=3,type=select,selection_method=hash,fields(ip_src,ip_dst,udp_src,udp_dst),bucket=bucket_id:0,weight:100,actions=ct(commit,table=15,zone=NXM_NX_REG11[0..15],nat(dst=192.168.0.1:12345)),bucket=bucket_id:1,weight:100,actions=ct(commit,table=15,zone=NXM_NX_REG11[0..15],nat(dst=192.168.0.2:12345)) group_id=4,type=select,selection_method=hash,fields(ip_src,ip_dst,udp_src,udp_dst),bucket=bucket_id:0,weight:100,actions=ct(commit,table=15,zone=NXM_NX_REG11[0..15],nat(dst=[3001::1]:12345)),bucket=bucket_id:1,weight:100,actions=ct(commit,table=15,zone=NXM_NX_REG11[0..15],nat(dst=[3001::2]:12345)) send udp/sctp traffic with different ports to the vip,all backends can receive the traffic send udp/sctp traffic with the same port to the vip,only one backend can receive the traffic
Since the problem described in this bug report should be resolved in a recent advisory, it has been closed with a resolution of ERRATA. For information on the advisory (ovn2.13 bug fix and enhancement update), and where to find the updated files, follow the link below. If the solution does not work for you, open a new bug report. https://access.redhat.com/errata/RHBA-2020:3488