Merge changes Ib48cc2b8,I3f0a12f1 into main

* changes:
  type safety for is_ethernet
  type safety for 'bool egress'
This commit is contained in:
Maciej Żenczykowski
2023-10-09 17:23:32 +00:00
committed by Gerrit Code Review
4 changed files with 44 additions and 29 deletions

View File

@@ -87,17 +87,17 @@ static inline __always_inline void try_make_writable(struct __sk_buff* skb, int
if (skb->data_end - skb->data < len) bpf_skb_pull_data(skb, len); if (skb->data_end - skb->data < len) bpf_skb_pull_data(skb, len);
} }
// constants for passing in to 'bool egress' struct egress_bool { bool egress; };
static const bool INGRESS = false; #define INGRESS ((struct egress_bool){ .egress = false })
static const bool EGRESS = true; #define EGRESS ((struct egress_bool){ .egress = true })
// constants for passing in to 'bool downstream' // constants for passing in to 'bool downstream'
static const bool UPSTREAM = false; static const bool UPSTREAM = false;
static const bool DOWNSTREAM = true; static const bool DOWNSTREAM = true;
// constants for passing in to 'bool is_ethernet' struct rawip_bool { bool rawip; };
static const bool RAWIP = false; #define ETHER ((struct rawip_bool){ .rawip = false })
static const bool ETHER = true; #define RAWIP ((struct rawip_bool){ .rawip = true })
// constants for passing in to 'bool updatetime' // constants for passing in to 'bool updatetime'
static const bool NO_UPDATETIME = false; static const bool NO_UPDATETIME = false;

View File

@@ -55,8 +55,10 @@ struct frag_hdr {
DEFINE_BPF_MAP_GRW(clat_ingress6_map, HASH, ClatIngress6Key, ClatIngress6Value, 16, AID_SYSTEM) DEFINE_BPF_MAP_GRW(clat_ingress6_map, HASH, ClatIngress6Key, ClatIngress6Value, 16, AID_SYSTEM)
static inline __always_inline int nat64(struct __sk_buff* skb, static inline __always_inline int nat64(struct __sk_buff* skb,
const bool is_ethernet, const struct rawip_bool rawip,
const struct kver_uint kver) { const struct kver_uint kver) {
const bool is_ethernet = !rawip.rawip;
// Require ethernet dst mac address to be our unicast address. // Require ethernet dst mac address to be our unicast address.
if (is_ethernet && (skb->pkt_type != PACKET_HOST)) return TC_ACT_PIPE; if (is_ethernet && (skb->pkt_type != PACKET_HOST)) return TC_ACT_PIPE;

View File

@@ -178,7 +178,7 @@ static __always_inline int is_system_uid(uint32_t uid) {
#define DEFINE_UPDATE_STATS(the_stats_map, TypeOfKey) \ #define DEFINE_UPDATE_STATS(the_stats_map, TypeOfKey) \
static __always_inline inline void update_##the_stats_map(const struct __sk_buff* const skb, \ static __always_inline inline void update_##the_stats_map(const struct __sk_buff* const skb, \
const TypeOfKey* const key, \ const TypeOfKey* const key, \
const bool egress, \ const struct egress_bool egress, \
const struct kver_uint kver) { \ const struct kver_uint kver) { \
StatsValue* value = bpf_##the_stats_map##_lookup_elem(key); \ StatsValue* value = bpf_##the_stats_map##_lookup_elem(key); \
if (!value) { \ if (!value) { \
@@ -199,7 +199,7 @@ static __always_inline int is_system_uid(uint32_t uid) {
packets = (payload + mss - 1) / mss; \ packets = (payload + mss - 1) / mss; \
bytes = tcp_overhead * packets + payload; \ bytes = tcp_overhead * packets + payload; \
} \ } \
if (egress) { \ if (egress.egress) { \
__sync_fetch_and_add(&value->txPackets, packets); \ __sync_fetch_and_add(&value->txPackets, packets); \
__sync_fetch_and_add(&value->txBytes, bytes); \ __sync_fetch_and_add(&value->txBytes, bytes); \
} else { \ } else { \
@@ -242,7 +242,7 @@ static __always_inline inline int bpf_skb_load_bytes_net(const struct __sk_buff*
} }
static __always_inline inline void do_packet_tracing( static __always_inline inline void do_packet_tracing(
const struct __sk_buff* const skb, const bool egress, const uint32_t uid, const struct __sk_buff* const skb, const struct egress_bool egress, const uint32_t uid,
const uint32_t tag, const bool enable_tracing, const struct kver_uint kver) { const uint32_t tag, const bool enable_tracing, const struct kver_uint kver) {
if (!enable_tracing) return; if (!enable_tracing) return;
if (!KVER_IS_AT_LEAST(kver, 5, 8, 0)) return; if (!KVER_IS_AT_LEAST(kver, 5, 8, 0)) return;
@@ -317,8 +317,8 @@ static __always_inline inline void do_packet_tracing(
pkt->sport = sport; pkt->sport = sport;
pkt->dport = dport; pkt->dport = dport;
pkt->egress = egress; pkt->egress = egress.egress;
pkt->wakeup = !egress && (skb->mark & 0x80000000); // Fwmark.ingress_cpu_wakeup pkt->wakeup = !egress.egress && (skb->mark & 0x80000000); // Fwmark.ingress_cpu_wakeup
pkt->ipProto = proto; pkt->ipProto = proto;
pkt->tcpFlags = flags; pkt->tcpFlags = flags;
pkt->ipVersion = ipVersion; pkt->ipVersion = ipVersion;
@@ -326,7 +326,8 @@ static __always_inline inline void do_packet_tracing(
bpf_packet_trace_ringbuf_submit(pkt); bpf_packet_trace_ringbuf_submit(pkt);
} }
static __always_inline inline bool skip_owner_match(struct __sk_buff* skb, bool egress, static __always_inline inline bool skip_owner_match(struct __sk_buff* skb,
const struct egress_bool egress,
const struct kver_uint kver) { const struct kver_uint kver) {
uint32_t flag = 0; uint32_t flag = 0;
if (skb->protocol == htons(ETH_P_IP)) { if (skb->protocol == htons(ETH_P_IP)) {
@@ -358,7 +359,7 @@ static __always_inline inline bool skip_owner_match(struct __sk_buff* skb, bool
return false; return false;
} }
// Always allow RST's, and additionally allow ingress FINs // Always allow RST's, and additionally allow ingress FINs
return flag & (TCP_FLAG_RST | (egress ? 0 : TCP_FLAG_FIN)); // false on read failure return flag & (TCP_FLAG_RST | (egress.egress ? 0 : TCP_FLAG_FIN)); // false on read failure
} }
static __always_inline inline BpfConfig getConfig(uint32_t configKey) { static __always_inline inline BpfConfig getConfig(uint32_t configKey) {
@@ -401,7 +402,8 @@ static __always_inline inline bool ingress_should_discard(struct __sk_buff* skb,
} }
static __always_inline inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid, static __always_inline inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid,
bool egress, const struct kver_uint kver) { const struct egress_bool egress,
const struct kver_uint kver) {
if (is_system_uid(uid)) return PASS; if (is_system_uid(uid)) return PASS;
if (skip_owner_match(skb, egress, kver)) return PASS; if (skip_owner_match(skb, egress, kver)) return PASS;
@@ -414,7 +416,7 @@ static __always_inline inline int bpf_owner_match(struct __sk_buff* skb, uint32_
if (isBlockedByUidRules(enabledRules, uidRules)) return DROP; if (isBlockedByUidRules(enabledRules, uidRules)) return DROP;
if (!egress && skb->ifindex != 1) { if (!egress.egress && skb->ifindex != 1) {
if (ingress_should_discard(skb, kver)) return DROP; if (ingress_should_discard(skb, kver)) return DROP;
if (uidRules & IIF_MATCH) { if (uidRules & IIF_MATCH) {
if (allowed_iif && skb->ifindex != allowed_iif) { if (allowed_iif && skb->ifindex != allowed_iif) {
@@ -434,7 +436,7 @@ static __always_inline inline int bpf_owner_match(struct __sk_buff* skb, uint32_
static __always_inline inline void update_stats_with_config(const uint32_t selectedMap, static __always_inline inline void update_stats_with_config(const uint32_t selectedMap,
const struct __sk_buff* const skb, const struct __sk_buff* const skb,
const StatsKey* const key, const StatsKey* const key,
const bool egress, const struct egress_bool egress,
const struct kver_uint kver) { const struct kver_uint kver) {
if (selectedMap == SELECT_MAP_A) { if (selectedMap == SELECT_MAP_A) {
update_stats_map_A(skb, key, egress, kver); update_stats_map_A(skb, key, egress, kver);
@@ -443,7 +445,8 @@ static __always_inline inline void update_stats_with_config(const uint32_t selec
} }
} }
static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, bool egress, static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb,
const struct egress_bool egress,
const bool enable_tracing, const bool enable_tracing,
const struct kver_uint kver) { const struct kver_uint kver) {
uint32_t sock_uid = bpf_get_socket_uid(skb); uint32_t sock_uid = bpf_get_socket_uid(skb);
@@ -462,7 +465,7 @@ static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, boo
// interface is accounted for and subject to usage restrictions. // interface is accounted for and subject to usage restrictions.
// CLAT IPv6 TX sockets are *always* tagged with CLAT uid, see tagSocketAsClat() // CLAT IPv6 TX sockets are *always* tagged with CLAT uid, see tagSocketAsClat()
// CLAT daemon receives via an untagged AF_PACKET socket. // CLAT daemon receives via an untagged AF_PACKET socket.
if (egress && uid == AID_CLAT) return PASS; if (egress.egress && uid == AID_CLAT) return PASS;
int match = bpf_owner_match(skb, sock_uid, egress, kver); int match = bpf_owner_match(skb, sock_uid, egress, kver);
@@ -478,7 +481,7 @@ static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, boo
} }
// If an outbound packet is going to be dropped, we do not count that traffic. // If an outbound packet is going to be dropped, we do not count that traffic.
if (egress && (match == DROP)) return DROP; if (egress.egress && (match == DROP)) return DROP;
StatsKey key = {.uid = uid, .tag = tag, .counterSet = 0, .ifaceIndex = skb->ifindex}; StatsKey key = {.uid = uid, .tag = tag, .counterSet = 0, .ifaceIndex = skb->ifindex};

View File

@@ -124,8 +124,12 @@ DEFINE_BPF_MAP_GRW(tether_downstream64_map, HASH, TetherDownstream64Key, TetherD
DEFINE_BPF_MAP_GRW(tether_upstream6_map, HASH, TetherUpstream6Key, Tether6Value, 64, DEFINE_BPF_MAP_GRW(tether_upstream6_map, HASH, TetherUpstream6Key, Tether6Value, 64,
TETHERING_GID) TETHERING_GID)
static inline __always_inline int do_forward6(struct __sk_buff* skb, const bool is_ethernet, static inline __always_inline int do_forward6(struct __sk_buff* skb,
const bool downstream, const struct kver_uint kver) { const struct rawip_bool rawip,
const bool downstream,
const struct kver_uint kver) {
const bool is_ethernet = !rawip.rawip;
// Must be meta-ethernet IPv6 frame // Must be meta-ethernet IPv6 frame
if (skb->protocol != htons(ETH_P_IPV6)) return TC_ACT_PIPE; if (skb->protocol != htons(ETH_P_IPV6)) return TC_ACT_PIPE;
@@ -356,9 +360,10 @@ DEFINE_BPF_MAP_GRW(tether_upstream4_map, HASH, Tether4Key, Tether4Value, 1024, T
static inline __always_inline int do_forward4_bottom(struct __sk_buff* skb, static inline __always_inline int do_forward4_bottom(struct __sk_buff* skb,
const int l2_header_size, void* data, const void* data_end, const int l2_header_size, void* data, const void* data_end,
struct ethhdr* eth, struct iphdr* ip, const bool is_ethernet, struct ethhdr* eth, struct iphdr* ip, const struct rawip_bool rawip,
const bool downstream, const bool updatetime, const bool is_tcp, const bool downstream, const bool updatetime, const bool is_tcp,
const struct kver_uint kver) { const struct kver_uint kver) {
const bool is_ethernet = !rawip.rawip;
struct tcphdr* tcph = is_tcp ? (void*)(ip + 1) : NULL; struct tcphdr* tcph = is_tcp ? (void*)(ip + 1) : NULL;
struct udphdr* udph = is_tcp ? NULL : (void*)(ip + 1); struct udphdr* udph = is_tcp ? NULL : (void*)(ip + 1);
@@ -547,8 +552,13 @@ static inline __always_inline int do_forward4_bottom(struct __sk_buff* skb,
return bpf_redirect(v->oif, 0 /* this is effectively BPF_F_EGRESS */); return bpf_redirect(v->oif, 0 /* this is effectively BPF_F_EGRESS */);
} }
static inline __always_inline int do_forward4(struct __sk_buff* skb, const bool is_ethernet, static inline __always_inline int do_forward4(struct __sk_buff* skb,
const bool downstream, const bool updatetime, const struct kver_uint kver) { const struct rawip_bool rawip,
const bool downstream,
const bool updatetime,
const struct kver_uint kver) {
const bool is_ethernet = !rawip.rawip;
// Require ethernet dst mac address to be our unicast address. // Require ethernet dst mac address to be our unicast address.
if (is_ethernet && (skb->pkt_type != PACKET_HOST)) return TC_ACT_PIPE; if (is_ethernet && (skb->pkt_type != PACKET_HOST)) return TC_ACT_PIPE;
@@ -636,10 +646,10 @@ static inline __always_inline int do_forward4(struct __sk_buff* skb, const bool
// if the underlying requisite kernel support (bpf_ktime_get_boot_ns) was backported. // if the underlying requisite kernel support (bpf_ktime_get_boot_ns) was backported.
if (is_tcp) { if (is_tcp) {
return do_forward4_bottom(skb, l2_header_size, data, data_end, eth, ip, return do_forward4_bottom(skb, l2_header_size, data, data_end, eth, ip,
is_ethernet, downstream, updatetime, /* is_tcp */ true, kver); rawip, downstream, updatetime, /* is_tcp */ true, kver);
} else { } else {
return do_forward4_bottom(skb, l2_header_size, data, data_end, eth, ip, return do_forward4_bottom(skb, l2_header_size, data, data_end, eth, ip,
is_ethernet, downstream, updatetime, /* is_tcp */ false, kver); rawip, downstream, updatetime, /* is_tcp */ false, kver);
} }
} }
@@ -797,12 +807,12 @@ DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_ether$stub", TETHERING_UID
DEFINE_BPF_MAP_GRW(tether_dev_map, DEVMAP_HASH, uint32_t, uint32_t, 64, TETHERING_GID) DEFINE_BPF_MAP_GRW(tether_dev_map, DEVMAP_HASH, uint32_t, uint32_t, 64, TETHERING_GID)
static inline __always_inline int do_xdp_forward6(struct xdp_md *ctx, const bool is_ethernet, static inline __always_inline int do_xdp_forward6(struct xdp_md *ctx, const struct rawip_bool rawip,
const bool downstream) { const bool downstream) {
return XDP_PASS; return XDP_PASS;
} }
static inline __always_inline int do_xdp_forward4(struct xdp_md *ctx, const bool is_ethernet, static inline __always_inline int do_xdp_forward4(struct xdp_md *ctx, const struct rawip_bool rawip,
const bool downstream) { const bool downstream) {
return XDP_PASS; return XDP_PASS;
} }