From 8549af9f1767a08d917c0d639a3174f9bf0a88db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20=C5=BBenczykowski?= Date: Thu, 25 Feb 2021 19:11:34 -0800 Subject: [PATCH] ebpf offload: bump size of ipv4 tethering maps MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For ipv6 we need 1 entry per client, so 64 seems like plenty, while for ipv4 we need 1 entry per flow, so even 1024 seems like it might not be enough, but it's much better than 64. Nucca says: # cat proc/sys/net/netfilter/nf_conntrack_buckets 65536 # cat proc/sys/net/netfilter/nf_conntrack_max 262144 per https://www.kernel.org/doc/Documentation/networking/nf_conntrack-sysctl.txt the default “nf_conntrack_max” is “nf_conntrack_buckets * 4”. Test: TreeHugger Signed-off-by: Maciej Żenczykowski Change-Id: Ib7d1d8c19bc688c442d842cf5c9f45cdf1241754 --- Tethering/bpf_progs/offload.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Tethering/bpf_progs/offload.c b/Tethering/bpf_progs/offload.c index a5aae90b5e..432fc324eb 100644 --- a/Tethering/bpf_progs/offload.c +++ b/Tethering/bpf_progs/offload.c @@ -101,7 +101,7 @@ DEFINE_BPF_MAP_GRW(tether_downstream6_map, HASH, TetherDownstream6Key, Tether6Va AID_NETWORK_STACK) DEFINE_BPF_MAP_GRW(tether_downstream64_map, HASH, TetherDownstream64Key, TetherDownstream64Value, - 64, AID_NETWORK_STACK) + 1024, AID_NETWORK_STACK) DEFINE_BPF_MAP_GRW(tether_upstream6_map, HASH, TetherUpstream6Key, Tether6Value, 64, AID_NETWORK_STACK) @@ -341,9 +341,9 @@ DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream6_rawip$stub", AID_ROOT, AID // ----- IPv4 Support ----- -DEFINE_BPF_MAP_GRW(tether_downstream4_map, HASH, Tether4Key, Tether4Value, 64, AID_NETWORK_STACK) +DEFINE_BPF_MAP_GRW(tether_downstream4_map, HASH, Tether4Key, Tether4Value, 1024, AID_NETWORK_STACK) -DEFINE_BPF_MAP_GRW(tether_upstream4_map, HASH, Tether4Key, Tether4Value, 64, AID_NETWORK_STACK) +DEFINE_BPF_MAP_GRW(tether_upstream4_map, HASH, Tether4Key, Tether4Value, 1024, AID_NETWORK_STACK) static inline __always_inline int do_forward4(struct __sk_buff* skb, const bool is_ethernet, const bool downstream, const bool updatetime) {