Merge changes If33414ec,I8f211e47,If05c0c53 into main

* changes:
  netd.c: factor out get_app_permissions()
  bpf_progs: use macros for kernel version
  type safety for kernel version
This commit is contained in:
Treehugger Robot
2023-10-09 14:39:54 +00:00
committed by Gerrit Code Review
8 changed files with 102 additions and 94 deletions

View File

@@ -63,12 +63,12 @@ static inline __always_inline int block_port(struct bpf_sock_addr *ctx) {
BPFLOADER_MIN_VER, BPFLOADER_MAX_VER, MANDATORY, \ BPFLOADER_MIN_VER, BPFLOADER_MAX_VER, MANDATORY, \
"", "netd_readonly/", LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG) "", "netd_readonly/", LOAD_ON_ENG, LOAD_ON_USER, LOAD_ON_USERDEBUG)
DEFINE_NETD_RO_BPF_PROG("bind4/block_port", bind4_block_port, KVER(4, 19, 0)) DEFINE_NETD_RO_BPF_PROG("bind4/block_port", bind4_block_port, KVER_4_19)
(struct bpf_sock_addr *ctx) { (struct bpf_sock_addr *ctx) {
return block_port(ctx); return block_port(ctx);
} }
DEFINE_NETD_RO_BPF_PROG("bind6/block_port", bind6_block_port, KVER(4, 19, 0)) DEFINE_NETD_RO_BPF_PROG("bind6/block_port", bind6_block_port, KVER_4_19)
(struct bpf_sock_addr *ctx) { (struct bpf_sock_addr *ctx) {
return block_port(ctx); return block_port(ctx);
} }

View File

@@ -102,5 +102,3 @@ static const bool ETHER = true;
// constants for passing in to 'bool updatetime' // constants for passing in to 'bool updatetime'
static const bool NO_UPDATETIME = false; static const bool NO_UPDATETIME = false;
static const bool UPDATETIME = true; static const bool UPDATETIME = true;
#define KVER_4_14 KVER(4, 14, 0)

View File

@@ -56,7 +56,7 @@ DEFINE_BPF_MAP_GRW(clat_ingress6_map, HASH, ClatIngress6Key, ClatIngress6Value,
static inline __always_inline int nat64(struct __sk_buff* skb, static inline __always_inline int nat64(struct __sk_buff* skb,
const bool is_ethernet, const bool is_ethernet,
const unsigned kver) { const struct kver_uint kver) {
// Require ethernet dst mac address to be our unicast address. // Require ethernet dst mac address to be our unicast address.
if (is_ethernet && (skb->pkt_type != PACKET_HOST)) return TC_ACT_PIPE; if (is_ethernet && (skb->pkt_type != PACKET_HOST)) return TC_ACT_PIPE;
@@ -115,7 +115,7 @@ static inline __always_inline int nat64(struct __sk_buff* skb,
if (proto == IPPROTO_FRAGMENT) { if (proto == IPPROTO_FRAGMENT) {
// Fragment handling requires bpf_skb_adjust_room which is 4.14+ // Fragment handling requires bpf_skb_adjust_room which is 4.14+
if (kver < KVER_4_14) return TC_ACT_PIPE; if (!KVER_IS_AT_LEAST(kver, 4, 14, 0)) return TC_ACT_PIPE;
// Must have (ethernet and) ipv6 header and ipv6 fragment extension header // Must have (ethernet and) ipv6 header and ipv6 fragment extension header
if (data + l2_header_size + sizeof(*ip6) + sizeof(struct frag_hdr) > data_end) if (data + l2_header_size + sizeof(*ip6) + sizeof(struct frag_hdr) > data_end)
@@ -233,7 +233,7 @@ static inline __always_inline int nat64(struct __sk_buff* skb,
// //
// Note: we currently have no TreeHugger coverage for 4.9-T devices (there are no such // Note: we currently have no TreeHugger coverage for 4.9-T devices (there are no such
// Pixel or cuttlefish devices), so likely you won't notice for months if this breaks... // Pixel or cuttlefish devices), so likely you won't notice for months if this breaks...
if (kver >= KVER_4_14 && frag_off != htons(IP_DF)) { if (KVER_IS_AT_LEAST(kver, 4, 14, 0) && frag_off != htons(IP_DF)) {
// If we're converting an IPv6 Fragment, we need to trim off 8 more bytes // If we're converting an IPv6 Fragment, we need to trim off 8 more bytes
// We're beyond recovery on error here... but hard to imagine how this could fail. // We're beyond recovery on error here... but hard to imagine how this could fail.
if (bpf_skb_adjust_room(skb, -(__s32)sizeof(struct frag_hdr), BPF_ADJ_ROOM_NET, /*flags*/0)) if (bpf_skb_adjust_room(skb, -(__s32)sizeof(struct frag_hdr), BPF_ADJ_ROOM_NET, /*flags*/0))

View File

@@ -222,7 +222,7 @@ static inline __always_inline void match_policy(struct __sk_buff* skb, bool ipv4
} }
DEFINE_BPF_PROG_KVER("schedcls/set_dscp_ether", AID_ROOT, AID_SYSTEM, schedcls_set_dscp_ether, DEFINE_BPF_PROG_KVER("schedcls/set_dscp_ether", AID_ROOT, AID_SYSTEM, schedcls_set_dscp_ether,
KVER(5, 15, 0)) KVER_5_15)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
if (skb->pkt_type != PACKET_HOST) return TC_ACT_PIPE; if (skb->pkt_type != PACKET_HOST) return TC_ACT_PIPE;

View File

@@ -179,7 +179,7 @@ static __always_inline int is_system_uid(uint32_t uid) {
static __always_inline inline void update_##the_stats_map(const struct __sk_buff* const skb, \ static __always_inline inline void update_##the_stats_map(const struct __sk_buff* const skb, \
const TypeOfKey* const key, \ const TypeOfKey* const key, \
const bool egress, \ const bool egress, \
const unsigned kver) { \ const struct kver_uint kver) { \
StatsValue* value = bpf_##the_stats_map##_lookup_elem(key); \ StatsValue* value = bpf_##the_stats_map##_lookup_elem(key); \
if (!value) { \ if (!value) { \
StatsValue newValue = {}; \ StatsValue newValue = {}; \
@@ -219,7 +219,7 @@ static __always_inline inline int bpf_skb_load_bytes_net(const struct __sk_buff*
const int L3_off, const int L3_off,
void* const to, void* const to,
const int len, const int len,
const unsigned kver) { const struct kver_uint kver) {
// 'kver' (here and throughout) is the compile time guaranteed minimum kernel version, // 'kver' (here and throughout) is the compile time guaranteed minimum kernel version,
// ie. we're building (a version of) the bpf program for kver (or newer!) kernels. // ie. we're building (a version of) the bpf program for kver (or newer!) kernels.
// //
@@ -236,16 +236,16 @@ static __always_inline inline int bpf_skb_load_bytes_net(const struct __sk_buff*
// //
// For similar reasons this will fail with non-offloaded VLAN tags on < 4.19 kernels, // For similar reasons this will fail with non-offloaded VLAN tags on < 4.19 kernels,
// since those extend the ethernet header from 14 to 18 bytes. // since those extend the ethernet header from 14 to 18 bytes.
return kver >= KVER(4, 19, 0) return KVER_IS_AT_LEAST(kver, 4, 19, 0)
? bpf_skb_load_bytes_relative(skb, L3_off, to, len, BPF_HDR_START_NET) ? bpf_skb_load_bytes_relative(skb, L3_off, to, len, BPF_HDR_START_NET)
: bpf_skb_load_bytes(skb, L3_off, to, len); : bpf_skb_load_bytes(skb, L3_off, to, len);
} }
static __always_inline inline void do_packet_tracing( static __always_inline inline void do_packet_tracing(
const struct __sk_buff* const skb, const bool egress, const uint32_t uid, const struct __sk_buff* const skb, const bool egress, const uint32_t uid,
const uint32_t tag, const bool enable_tracing, const unsigned kver) { const uint32_t tag, const bool enable_tracing, const struct kver_uint kver) {
if (!enable_tracing) return; if (!enable_tracing) return;
if (kver < KVER(5, 8, 0)) return; if (!KVER_IS_AT_LEAST(kver, 5, 8, 0)) return;
uint32_t mapKey = 0; uint32_t mapKey = 0;
bool* traceConfig = bpf_packet_trace_enabled_map_lookup_elem(&mapKey); bool* traceConfig = bpf_packet_trace_enabled_map_lookup_elem(&mapKey);
@@ -327,7 +327,7 @@ static __always_inline inline void do_packet_tracing(
} }
static __always_inline inline bool skip_owner_match(struct __sk_buff* skb, bool egress, static __always_inline inline bool skip_owner_match(struct __sk_buff* skb, bool egress,
const unsigned kver) { const struct kver_uint kver) {
uint32_t flag = 0; uint32_t flag = 0;
if (skb->protocol == htons(ETH_P_IP)) { if (skb->protocol == htons(ETH_P_IP)) {
uint8_t proto; uint8_t proto;
@@ -372,11 +372,11 @@ static __always_inline inline BpfConfig getConfig(uint32_t configKey) {
} }
static __always_inline inline bool ingress_should_discard(struct __sk_buff* skb, static __always_inline inline bool ingress_should_discard(struct __sk_buff* skb,
const unsigned kver) { const struct kver_uint kver) {
// Require 4.19, since earlier kernels don't have bpf_skb_load_bytes_relative() which // Require 4.19, since earlier kernels don't have bpf_skb_load_bytes_relative() which
// provides relative to L3 header reads. Without that we could fetch the wrong bytes. // provides relative to L3 header reads. Without that we could fetch the wrong bytes.
// Additionally earlier bpf verifiers are much harder to please. // Additionally earlier bpf verifiers are much harder to please.
if (kver < KVER(4, 19, 0)) return false; if (!KVER_IS_AT_LEAST(kver, 4, 19, 0)) return false;
IngressDiscardKey k = {}; IngressDiscardKey k = {};
if (skb->protocol == htons(ETH_P_IP)) { if (skb->protocol == htons(ETH_P_IP)) {
@@ -401,7 +401,7 @@ static __always_inline inline bool ingress_should_discard(struct __sk_buff* skb,
} }
static __always_inline inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid, static __always_inline inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid,
bool egress, const unsigned kver) { bool egress, const struct kver_uint kver) {
if (is_system_uid(uid)) return PASS; if (is_system_uid(uid)) return PASS;
if (skip_owner_match(skb, egress, kver)) return PASS; if (skip_owner_match(skb, egress, kver)) return PASS;
@@ -435,7 +435,7 @@ static __always_inline inline void update_stats_with_config(const uint32_t selec
const struct __sk_buff* const skb, const struct __sk_buff* const skb,
const StatsKey* const key, const StatsKey* const key,
const bool egress, const bool egress,
const unsigned kver) { const struct kver_uint kver) {
if (selectedMap == SELECT_MAP_A) { if (selectedMap == SELECT_MAP_A) {
update_stats_map_A(skb, key, egress, kver); update_stats_map_A(skb, key, egress, kver);
} else { } else {
@@ -445,7 +445,7 @@ static __always_inline inline void update_stats_with_config(const uint32_t selec
static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, bool egress, static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, bool egress,
const bool enable_tracing, const bool enable_tracing,
const unsigned kver) { const struct kver_uint kver) {
uint32_t sock_uid = bpf_get_socket_uid(skb); uint32_t sock_uid = bpf_get_socket_uid(skb);
uint64_t cookie = bpf_get_socket_cookie(skb); uint64_t cookie = bpf_get_socket_cookie(skb);
UidTagValue* utag = bpf_cookie_tag_map_lookup_elem(&cookie); UidTagValue* utag = bpf_cookie_tag_map_lookup_elem(&cookie);
@@ -505,64 +505,64 @@ static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, boo
// This program is optional, and enables tracing on Android U+, 5.8+ on user builds. // This program is optional, and enables tracing on Android U+, 5.8+ on user builds.
DEFINE_BPF_PROG_EXT("cgroupskb/ingress/stats$trace_user", AID_ROOT, AID_SYSTEM, DEFINE_BPF_PROG_EXT("cgroupskb/ingress/stats$trace_user", AID_ROOT, AID_SYSTEM,
bpf_cgroup_ingress_trace_user, KVER(5, 8, 0), KVER_INF, bpf_cgroup_ingress_trace_user, KVER_5_8, KVER_INF,
BPFLOADER_IGNORED_ON_VERSION, BPFLOADER_MAX_VER, OPTIONAL, BPFLOADER_IGNORED_ON_VERSION, BPFLOADER_MAX_VER, OPTIONAL,
"fs_bpf_netd_readonly", "", "fs_bpf_netd_readonly", "",
IGNORE_ON_ENG, LOAD_ON_USER, IGNORE_ON_USERDEBUG) IGNORE_ON_ENG, LOAD_ON_USER, IGNORE_ON_USERDEBUG)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return bpf_traffic_account(skb, INGRESS, TRACE_ON, KVER(5, 8, 0)); return bpf_traffic_account(skb, INGRESS, TRACE_ON, KVER_5_8);
} }
// This program is required, and enables tracing on Android U+, 5.8+, userdebug/eng. // This program is required, and enables tracing on Android U+, 5.8+, userdebug/eng.
DEFINE_BPF_PROG_EXT("cgroupskb/ingress/stats$trace", AID_ROOT, AID_SYSTEM, DEFINE_BPF_PROG_EXT("cgroupskb/ingress/stats$trace", AID_ROOT, AID_SYSTEM,
bpf_cgroup_ingress_trace, KVER(5, 8, 0), KVER_INF, bpf_cgroup_ingress_trace, KVER_5_8, KVER_INF,
BPFLOADER_IGNORED_ON_VERSION, BPFLOADER_MAX_VER, MANDATORY, BPFLOADER_IGNORED_ON_VERSION, BPFLOADER_MAX_VER, MANDATORY,
"fs_bpf_netd_readonly", "", "fs_bpf_netd_readonly", "",
LOAD_ON_ENG, IGNORE_ON_USER, LOAD_ON_USERDEBUG) LOAD_ON_ENG, IGNORE_ON_USER, LOAD_ON_USERDEBUG)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return bpf_traffic_account(skb, INGRESS, TRACE_ON, KVER(5, 8, 0)); return bpf_traffic_account(skb, INGRESS, TRACE_ON, KVER_5_8);
} }
DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/ingress/stats$4_19", AID_ROOT, AID_SYSTEM, DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/ingress/stats$4_19", AID_ROOT, AID_SYSTEM,
bpf_cgroup_ingress_4_19, KVER(4, 19, 0), KVER_INF) bpf_cgroup_ingress_4_19, KVER_4_19, KVER_INF)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return bpf_traffic_account(skb, INGRESS, TRACE_OFF, KVER(4, 19, 0)); return bpf_traffic_account(skb, INGRESS, TRACE_OFF, KVER_4_19);
} }
DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/ingress/stats$4_14", AID_ROOT, AID_SYSTEM, DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/ingress/stats$4_14", AID_ROOT, AID_SYSTEM,
bpf_cgroup_ingress_4_14, KVER_NONE, KVER(4, 19, 0)) bpf_cgroup_ingress_4_14, KVER_NONE, KVER_4_19)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return bpf_traffic_account(skb, INGRESS, TRACE_OFF, KVER_NONE); return bpf_traffic_account(skb, INGRESS, TRACE_OFF, KVER_NONE);
} }
// This program is optional, and enables tracing on Android U+, 5.8+ on user builds. // This program is optional, and enables tracing on Android U+, 5.8+ on user builds.
DEFINE_BPF_PROG_EXT("cgroupskb/egress/stats$trace_user", AID_ROOT, AID_SYSTEM, DEFINE_BPF_PROG_EXT("cgroupskb/egress/stats$trace_user", AID_ROOT, AID_SYSTEM,
bpf_cgroup_egress_trace_user, KVER(5, 8, 0), KVER_INF, bpf_cgroup_egress_trace_user, KVER_5_8, KVER_INF,
BPFLOADER_IGNORED_ON_VERSION, BPFLOADER_MAX_VER, OPTIONAL, BPFLOADER_IGNORED_ON_VERSION, BPFLOADER_MAX_VER, OPTIONAL,
"fs_bpf_netd_readonly", "", "fs_bpf_netd_readonly", "",
LOAD_ON_ENG, IGNORE_ON_USER, LOAD_ON_USERDEBUG) LOAD_ON_ENG, IGNORE_ON_USER, LOAD_ON_USERDEBUG)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return bpf_traffic_account(skb, EGRESS, TRACE_ON, KVER(5, 8, 0)); return bpf_traffic_account(skb, EGRESS, TRACE_ON, KVER_5_8);
} }
// This program is required, and enables tracing on Android U+, 5.8+, userdebug/eng. // This program is required, and enables tracing on Android U+, 5.8+, userdebug/eng.
DEFINE_BPF_PROG_EXT("cgroupskb/egress/stats$trace", AID_ROOT, AID_SYSTEM, DEFINE_BPF_PROG_EXT("cgroupskb/egress/stats$trace", AID_ROOT, AID_SYSTEM,
bpf_cgroup_egress_trace, KVER(5, 8, 0), KVER_INF, bpf_cgroup_egress_trace, KVER_5_8, KVER_INF,
BPFLOADER_IGNORED_ON_VERSION, BPFLOADER_MAX_VER, MANDATORY, BPFLOADER_IGNORED_ON_VERSION, BPFLOADER_MAX_VER, MANDATORY,
"fs_bpf_netd_readonly", "", "fs_bpf_netd_readonly", "",
LOAD_ON_ENG, IGNORE_ON_USER, LOAD_ON_USERDEBUG) LOAD_ON_ENG, IGNORE_ON_USER, LOAD_ON_USERDEBUG)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return bpf_traffic_account(skb, EGRESS, TRACE_ON, KVER(5, 8, 0)); return bpf_traffic_account(skb, EGRESS, TRACE_ON, KVER_5_8);
} }
DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/egress/stats$4_19", AID_ROOT, AID_SYSTEM, DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/egress/stats$4_19", AID_ROOT, AID_SYSTEM,
bpf_cgroup_egress_4_19, KVER(4, 19, 0), KVER_INF) bpf_cgroup_egress_4_19, KVER_4_19, KVER_INF)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return bpf_traffic_account(skb, EGRESS, TRACE_OFF, KVER(4, 19, 0)); return bpf_traffic_account(skb, EGRESS, TRACE_OFF, KVER_4_19);
} }
DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/egress/stats$4_14", AID_ROOT, AID_SYSTEM, DEFINE_NETD_BPF_PROG_KVER_RANGE("cgroupskb/egress/stats$4_14", AID_ROOT, AID_SYSTEM,
bpf_cgroup_egress_4_14, KVER_NONE, KVER(4, 19, 0)) bpf_cgroup_egress_4_14, KVER_NONE, KVER_4_19)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return bpf_traffic_account(skb, EGRESS, TRACE_OFF, KVER_NONE); return bpf_traffic_account(skb, EGRESS, TRACE_OFF, KVER_NONE);
} }
@@ -637,9 +637,7 @@ DEFINE_XTBPF_PROG("skfilter/denylist/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_den
return BPF_NOMATCH; return BPF_NOMATCH;
} }
DEFINE_NETD_BPF_PROG_KVER("cgroupsock/inet/create", AID_ROOT, AID_ROOT, inet_socket_create, static __always_inline inline uint8_t get_app_permissions() {
KVER(4, 14, 0))
(struct bpf_sock* sk) {
uint64_t gid_uid = bpf_get_current_uid_gid(); uint64_t gid_uid = bpf_get_current_uid_gid();
/* /*
* A given app is guaranteed to have the same app ID in all the profiles in * A given app is guaranteed to have the same app ID in all the profiles in
@@ -649,13 +647,15 @@ DEFINE_NETD_BPF_PROG_KVER("cgroupsock/inet/create", AID_ROOT, AID_ROOT, inet_soc
*/ */
uint32_t appId = (gid_uid & 0xffffffff) % AID_USER_OFFSET; // == PER_USER_RANGE == 100000 uint32_t appId = (gid_uid & 0xffffffff) % AID_USER_OFFSET; // == PER_USER_RANGE == 100000
uint8_t* permissions = bpf_uid_permission_map_lookup_elem(&appId); uint8_t* permissions = bpf_uid_permission_map_lookup_elem(&appId);
if (!permissions) { // if UID not in map, then default to just INTERNET permission.
// UID not in map. Default to just INTERNET permission. return permissions ? *permissions : BPF_PERMISSION_INTERNET;
return 1; }
}
DEFINE_NETD_BPF_PROG_KVER("cgroupsock/inet/create", AID_ROOT, AID_ROOT, inet_socket_create,
KVER_4_14)
(struct bpf_sock* sk) {
// A return value of 1 means allow, everything else means deny. // A return value of 1 means allow, everything else means deny.
return (*permissions & BPF_PERMISSION_INTERNET) == BPF_PERMISSION_INTERNET; return (get_app_permissions() & BPF_PERMISSION_INTERNET) ? 1 : 0;
} }
LICENSE("Apache 2.0"); LICENSE("Apache 2.0");

View File

@@ -125,7 +125,7 @@ DEFINE_BPF_MAP_GRW(tether_upstream6_map, HASH, TetherUpstream6Key, Tether6Value,
TETHERING_GID) TETHERING_GID)
static inline __always_inline int do_forward6(struct __sk_buff* skb, const bool is_ethernet, static inline __always_inline int do_forward6(struct __sk_buff* skb, const bool is_ethernet,
const bool downstream, const unsigned kver) { const bool downstream, const struct kver_uint kver) {
// Must be meta-ethernet IPv6 frame // Must be meta-ethernet IPv6 frame
if (skb->protocol != htons(ETH_P_IPV6)) return TC_ACT_PIPE; if (skb->protocol != htons(ETH_P_IPV6)) return TC_ACT_PIPE;
@@ -324,26 +324,26 @@ DEFINE_BPF_PROG("schedcls/tether_upstream6_ether", TETHERING_UID, TETHERING_GID,
// //
// Hence, these mandatory (must load successfully) implementations for 4.14+ kernels: // Hence, these mandatory (must load successfully) implementations for 4.14+ kernels:
DEFINE_BPF_PROG_KVER("schedcls/tether_downstream6_rawip$4_14", TETHERING_UID, TETHERING_GID, DEFINE_BPF_PROG_KVER("schedcls/tether_downstream6_rawip$4_14", TETHERING_UID, TETHERING_GID,
sched_cls_tether_downstream6_rawip_4_14, KVER(4, 14, 0)) sched_cls_tether_downstream6_rawip_4_14, KVER_4_14)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return do_forward6(skb, RAWIP, DOWNSTREAM, KVER(4, 14, 0)); return do_forward6(skb, RAWIP, DOWNSTREAM, KVER_4_14);
} }
DEFINE_BPF_PROG_KVER("schedcls/tether_upstream6_rawip$4_14", TETHERING_UID, TETHERING_GID, DEFINE_BPF_PROG_KVER("schedcls/tether_upstream6_rawip$4_14", TETHERING_UID, TETHERING_GID,
sched_cls_tether_upstream6_rawip_4_14, KVER(4, 14, 0)) sched_cls_tether_upstream6_rawip_4_14, KVER_4_14)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return do_forward6(skb, RAWIP, UPSTREAM, KVER(4, 14, 0)); return do_forward6(skb, RAWIP, UPSTREAM, KVER_4_14);
} }
// and define no-op stubs for pre-4.14 kernels. // and define no-op stubs for pre-4.14 kernels.
DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream6_rawip$stub", TETHERING_UID, TETHERING_GID, DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream6_rawip$stub", TETHERING_UID, TETHERING_GID,
sched_cls_tether_downstream6_rawip_stub, KVER_NONE, KVER(4, 14, 0)) sched_cls_tether_downstream6_rawip_stub, KVER_NONE, KVER_4_14)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return TC_ACT_PIPE; return TC_ACT_PIPE;
} }
DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream6_rawip$stub", TETHERING_UID, TETHERING_GID, DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream6_rawip$stub", TETHERING_UID, TETHERING_GID,
sched_cls_tether_upstream6_rawip_stub, KVER_NONE, KVER(4, 14, 0)) sched_cls_tether_upstream6_rawip_stub, KVER_NONE, KVER_4_14)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return TC_ACT_PIPE; return TC_ACT_PIPE;
} }
@@ -358,7 +358,7 @@ static inline __always_inline int do_forward4_bottom(struct __sk_buff* skb,
const int l2_header_size, void* data, const void* data_end, const int l2_header_size, void* data, const void* data_end,
struct ethhdr* eth, struct iphdr* ip, const bool is_ethernet, struct ethhdr* eth, struct iphdr* ip, const bool is_ethernet,
const bool downstream, const bool updatetime, const bool is_tcp, const bool downstream, const bool updatetime, const bool is_tcp,
const unsigned kver) { const struct kver_uint kver) {
struct tcphdr* tcph = is_tcp ? (void*)(ip + 1) : NULL; struct tcphdr* tcph = is_tcp ? (void*)(ip + 1) : NULL;
struct udphdr* udph = is_tcp ? NULL : (void*)(ip + 1); struct udphdr* udph = is_tcp ? NULL : (void*)(ip + 1);
@@ -548,7 +548,7 @@ static inline __always_inline int do_forward4_bottom(struct __sk_buff* skb,
} }
static inline __always_inline int do_forward4(struct __sk_buff* skb, const bool is_ethernet, static inline __always_inline int do_forward4(struct __sk_buff* skb, const bool is_ethernet,
const bool downstream, const bool updatetime, const unsigned kver) { const bool downstream, const bool updatetime, const struct kver_uint kver) {
// Require ethernet dst mac address to be our unicast address. // Require ethernet dst mac address to be our unicast address.
if (is_ethernet && (skb->pkt_type != PACKET_HOST)) return TC_ACT_PIPE; if (is_ethernet && (skb->pkt_type != PACKET_HOST)) return TC_ACT_PIPE;
@@ -646,27 +646,27 @@ static inline __always_inline int do_forward4(struct __sk_buff* skb, const bool
// Full featured (required) implementations for 5.8+ kernels (these are S+ by definition) // Full featured (required) implementations for 5.8+ kernels (these are S+ by definition)
DEFINE_BPF_PROG_KVER("schedcls/tether_downstream4_rawip$5_8", TETHERING_UID, TETHERING_GID, DEFINE_BPF_PROG_KVER("schedcls/tether_downstream4_rawip$5_8", TETHERING_UID, TETHERING_GID,
sched_cls_tether_downstream4_rawip_5_8, KVER(5, 8, 0)) sched_cls_tether_downstream4_rawip_5_8, KVER_5_8)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return do_forward4(skb, RAWIP, DOWNSTREAM, UPDATETIME, KVER(5, 8, 0)); return do_forward4(skb, RAWIP, DOWNSTREAM, UPDATETIME, KVER_5_8);
} }
DEFINE_BPF_PROG_KVER("schedcls/tether_upstream4_rawip$5_8", TETHERING_UID, TETHERING_GID, DEFINE_BPF_PROG_KVER("schedcls/tether_upstream4_rawip$5_8", TETHERING_UID, TETHERING_GID,
sched_cls_tether_upstream4_rawip_5_8, KVER(5, 8, 0)) sched_cls_tether_upstream4_rawip_5_8, KVER_5_8)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return do_forward4(skb, RAWIP, UPSTREAM, UPDATETIME, KVER(5, 8, 0)); return do_forward4(skb, RAWIP, UPSTREAM, UPDATETIME, KVER_5_8);
} }
DEFINE_BPF_PROG_KVER("schedcls/tether_downstream4_ether$5_8", TETHERING_UID, TETHERING_GID, DEFINE_BPF_PROG_KVER("schedcls/tether_downstream4_ether$5_8", TETHERING_UID, TETHERING_GID,
sched_cls_tether_downstream4_ether_5_8, KVER(5, 8, 0)) sched_cls_tether_downstream4_ether_5_8, KVER_5_8)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return do_forward4(skb, ETHER, DOWNSTREAM, UPDATETIME, KVER(5, 8, 0)); return do_forward4(skb, ETHER, DOWNSTREAM, UPDATETIME, KVER_5_8);
} }
DEFINE_BPF_PROG_KVER("schedcls/tether_upstream4_ether$5_8", TETHERING_UID, TETHERING_GID, DEFINE_BPF_PROG_KVER("schedcls/tether_upstream4_ether$5_8", TETHERING_UID, TETHERING_GID,
sched_cls_tether_upstream4_ether_5_8, KVER(5, 8, 0)) sched_cls_tether_upstream4_ether_5_8, KVER_5_8)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return do_forward4(skb, ETHER, UPSTREAM, UPDATETIME, KVER(5, 8, 0)); return do_forward4(skb, ETHER, UPSTREAM, UPDATETIME, KVER_5_8);
} }
// Full featured (optional) implementations for 4.14-S, 4.19-S & 5.4-S kernels // Full featured (optional) implementations for 4.14-S, 4.19-S & 5.4-S kernels
@@ -675,33 +675,33 @@ DEFINE_BPF_PROG_KVER("schedcls/tether_upstream4_ether$5_8", TETHERING_UID, TETHE
DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_rawip$opt", DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_rawip$opt",
TETHERING_UID, TETHERING_GID, TETHERING_UID, TETHERING_GID,
sched_cls_tether_downstream4_rawip_opt, sched_cls_tether_downstream4_rawip_opt,
KVER(4, 14, 0), KVER(5, 8, 0)) KVER_4_14, KVER_5_8)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return do_forward4(skb, RAWIP, DOWNSTREAM, UPDATETIME, KVER(4, 14, 0)); return do_forward4(skb, RAWIP, DOWNSTREAM, UPDATETIME, KVER_4_14);
} }
DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$opt", DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$opt",
TETHERING_UID, TETHERING_GID, TETHERING_UID, TETHERING_GID,
sched_cls_tether_upstream4_rawip_opt, sched_cls_tether_upstream4_rawip_opt,
KVER(4, 14, 0), KVER(5, 8, 0)) KVER_4_14, KVER_5_8)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return do_forward4(skb, RAWIP, UPSTREAM, UPDATETIME, KVER(4, 14, 0)); return do_forward4(skb, RAWIP, UPSTREAM, UPDATETIME, KVER_4_14);
} }
DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_ether$opt", DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_ether$opt",
TETHERING_UID, TETHERING_GID, TETHERING_UID, TETHERING_GID,
sched_cls_tether_downstream4_ether_opt, sched_cls_tether_downstream4_ether_opt,
KVER(4, 14, 0), KVER(5, 8, 0)) KVER_4_14, KVER_5_8)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return do_forward4(skb, ETHER, DOWNSTREAM, UPDATETIME, KVER(4, 14, 0)); return do_forward4(skb, ETHER, DOWNSTREAM, UPDATETIME, KVER_4_14);
} }
DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_ether$opt", DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_ether$opt",
TETHERING_UID, TETHERING_GID, TETHERING_UID, TETHERING_GID,
sched_cls_tether_upstream4_ether_opt, sched_cls_tether_upstream4_ether_opt,
KVER(4, 14, 0), KVER(5, 8, 0)) KVER_4_14, KVER_5_8)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return do_forward4(skb, ETHER, UPSTREAM, UPDATETIME, KVER(4, 14, 0)); return do_forward4(skb, ETHER, UPSTREAM, UPDATETIME, KVER_4_14);
} }
// Partial (TCP-only: will not update 'last_used' field) implementations for 4.14+ kernels. // Partial (TCP-only: will not update 'last_used' field) implementations for 4.14+ kernels.
@@ -719,15 +719,15 @@ DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_ether$opt",
// RAWIP: Required for 5.4-R kernels -- which always support bpf_skb_change_head(). // RAWIP: Required for 5.4-R kernels -- which always support bpf_skb_change_head().
DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_rawip$5_4", TETHERING_UID, TETHERING_GID, DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_rawip$5_4", TETHERING_UID, TETHERING_GID,
sched_cls_tether_downstream4_rawip_5_4, KVER(5, 4, 0), KVER(5, 8, 0)) sched_cls_tether_downstream4_rawip_5_4, KVER_5_4, KVER_5_8)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return do_forward4(skb, RAWIP, DOWNSTREAM, NO_UPDATETIME, KVER(5, 4, 0)); return do_forward4(skb, RAWIP, DOWNSTREAM, NO_UPDATETIME, KVER_5_4);
} }
DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$5_4", TETHERING_UID, TETHERING_GID, DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$5_4", TETHERING_UID, TETHERING_GID,
sched_cls_tether_upstream4_rawip_5_4, KVER(5, 4, 0), KVER(5, 8, 0)) sched_cls_tether_upstream4_rawip_5_4, KVER_5_4, KVER_5_8)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return do_forward4(skb, RAWIP, UPSTREAM, NO_UPDATETIME, KVER(5, 4, 0)); return do_forward4(skb, RAWIP, UPSTREAM, NO_UPDATETIME, KVER_5_4);
} }
// RAWIP: Optional for 4.14/4.19 (R) kernels -- which support bpf_skb_change_head(). // RAWIP: Optional for 4.14/4.19 (R) kernels -- which support bpf_skb_change_head().
@@ -736,31 +736,31 @@ DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$5_4", TETHERING_UID,
DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_rawip$4_14", DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_rawip$4_14",
TETHERING_UID, TETHERING_GID, TETHERING_UID, TETHERING_GID,
sched_cls_tether_downstream4_rawip_4_14, sched_cls_tether_downstream4_rawip_4_14,
KVER(4, 14, 0), KVER(5, 4, 0)) KVER_4_14, KVER_5_4)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return do_forward4(skb, RAWIP, DOWNSTREAM, NO_UPDATETIME, KVER(4, 14, 0)); return do_forward4(skb, RAWIP, DOWNSTREAM, NO_UPDATETIME, KVER_4_14);
} }
DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$4_14", DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$4_14",
TETHERING_UID, TETHERING_GID, TETHERING_UID, TETHERING_GID,
sched_cls_tether_upstream4_rawip_4_14, sched_cls_tether_upstream4_rawip_4_14,
KVER(4, 14, 0), KVER(5, 4, 0)) KVER_4_14, KVER_5_4)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return do_forward4(skb, RAWIP, UPSTREAM, NO_UPDATETIME, KVER(4, 14, 0)); return do_forward4(skb, RAWIP, UPSTREAM, NO_UPDATETIME, KVER_4_14);
} }
// ETHER: Required for 4.14-Q/R, 4.19-Q/R & 5.4-R kernels. // ETHER: Required for 4.14-Q/R, 4.19-Q/R & 5.4-R kernels.
DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_ether$4_14", TETHERING_UID, TETHERING_GID, DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_ether$4_14", TETHERING_UID, TETHERING_GID,
sched_cls_tether_downstream4_ether_4_14, KVER(4, 14, 0), KVER(5, 8, 0)) sched_cls_tether_downstream4_ether_4_14, KVER_4_14, KVER_5_8)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return do_forward4(skb, ETHER, DOWNSTREAM, NO_UPDATETIME, KVER(4, 14, 0)); return do_forward4(skb, ETHER, DOWNSTREAM, NO_UPDATETIME, KVER_4_14);
} }
DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_ether$4_14", TETHERING_UID, TETHERING_GID, DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_ether$4_14", TETHERING_UID, TETHERING_GID,
sched_cls_tether_upstream4_ether_4_14, KVER(4, 14, 0), KVER(5, 8, 0)) sched_cls_tether_upstream4_ether_4_14, KVER_4_14, KVER_5_8)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return do_forward4(skb, ETHER, UPSTREAM, NO_UPDATETIME, KVER(4, 14, 0)); return do_forward4(skb, ETHER, UPSTREAM, NO_UPDATETIME, KVER_4_14);
} }
// Placeholder (no-op) implementations for older Q kernels // Placeholder (no-op) implementations for older Q kernels
@@ -768,13 +768,13 @@ DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_ether$4_14", TETHERING_UID
// RAWIP: 4.9-P/Q, 4.14-P/Q & 4.19-Q kernels -- without bpf_skb_change_head() for tc programs // RAWIP: 4.9-P/Q, 4.14-P/Q & 4.19-Q kernels -- without bpf_skb_change_head() for tc programs
DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_rawip$stub", TETHERING_UID, TETHERING_GID, DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_rawip$stub", TETHERING_UID, TETHERING_GID,
sched_cls_tether_downstream4_rawip_stub, KVER_NONE, KVER(5, 4, 0)) sched_cls_tether_downstream4_rawip_stub, KVER_NONE, KVER_5_4)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return TC_ACT_PIPE; return TC_ACT_PIPE;
} }
DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$stub", TETHERING_UID, TETHERING_GID, DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$stub", TETHERING_UID, TETHERING_GID,
sched_cls_tether_upstream4_rawip_stub, KVER_NONE, KVER(5, 4, 0)) sched_cls_tether_upstream4_rawip_stub, KVER_NONE, KVER_5_4)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return TC_ACT_PIPE; return TC_ACT_PIPE;
} }
@@ -782,13 +782,13 @@ DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_rawip$stub", TETHERING_UID
// ETHER: 4.9-P/Q kernel // ETHER: 4.9-P/Q kernel
DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_ether$stub", TETHERING_UID, TETHERING_GID, DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_downstream4_ether$stub", TETHERING_UID, TETHERING_GID,
sched_cls_tether_downstream4_ether_stub, KVER_NONE, KVER(4, 14, 0)) sched_cls_tether_downstream4_ether_stub, KVER_NONE, KVER_4_14)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return TC_ACT_PIPE; return TC_ACT_PIPE;
} }
DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_ether$stub", TETHERING_UID, TETHERING_GID, DEFINE_BPF_PROG_KVER_RANGE("schedcls/tether_upstream4_ether$stub", TETHERING_UID, TETHERING_GID,
sched_cls_tether_upstream4_ether_stub, KVER_NONE, KVER(4, 14, 0)) sched_cls_tether_upstream4_ether_stub, KVER_NONE, KVER_4_14)
(struct __sk_buff* skb) { (struct __sk_buff* skb) {
return TC_ACT_PIPE; return TC_ACT_PIPE;
} }
@@ -840,7 +840,7 @@ static inline __always_inline int do_xdp_forward_rawip(struct xdp_md *ctx, const
} }
#define DEFINE_XDP_PROG(str, func) \ #define DEFINE_XDP_PROG(str, func) \
DEFINE_BPF_PROG_KVER(str, TETHERING_UID, TETHERING_GID, func, KVER(5, 9, 0))(struct xdp_md *ctx) DEFINE_BPF_PROG_KVER(str, TETHERING_UID, TETHERING_GID, func, KVER_5_9)(struct xdp_md *ctx)
DEFINE_XDP_PROG("xdp/tether_downstream_ether", DEFINE_XDP_PROG("xdp/tether_downstream_ether",
xdp_tether_downstream_ether) { xdp_tether_downstream_ether) {

View File

@@ -49,7 +49,7 @@ DEFINE_BPF_MAP_GRW(tether_downstream6_map, HASH, TetherDownstream6Key, Tether6Va
DEFINE_BPF_MAP_GRW(bitmap, ARRAY, int, uint64_t, 2, TETHERING_GID) DEFINE_BPF_MAP_GRW(bitmap, ARRAY, int, uint64_t, 2, TETHERING_GID)
DEFINE_BPF_PROG_KVER("xdp/drop_ipv4_udp_ether", TETHERING_UID, TETHERING_GID, DEFINE_BPF_PROG_KVER("xdp/drop_ipv4_udp_ether", TETHERING_UID, TETHERING_GID,
xdp_test, KVER(5, 9, 0)) xdp_test, KVER_5_9)
(struct xdp_md *ctx) { (struct xdp_md *ctx) {
void *data = (void *)(long)ctx->data; void *data = (void *)(long)ctx->data;
void *data_end = (void *)(long)ctx->data_end; void *data_end = (void *)(long)ctx->data_end;

View File

@@ -105,9 +105,19 @@
* implemented in the kernel sources. * implemented in the kernel sources.
*/ */
#define KVER_NONE 0 struct kver_uint { unsigned int kver; };
#define KVER(a, b, c) (((a) << 24) + ((b) << 16) + (c)) #define KVER_(v) ((struct kver_uint){ .kver = (v) })
#define KVER_INF 0xFFFFFFFFu #define KVER(a, b, c) KVER_(((a) << 24) + ((b) << 16) + (c))
#define KVER_NONE KVER_(0)
#define KVER_4_14 KVER(4, 14, 0)
#define KVER_4_19 KVER(4, 19, 0)
#define KVER_5_4 KVER(5, 4, 0)
#define KVER_5_8 KVER(5, 8, 0)
#define KVER_5_9 KVER(5, 9, 0)
#define KVER_5_15 KVER(5, 15, 0)
#define KVER_INF KVER_(0xFFFFFFFFu)
#define KVER_IS_AT_LEAST(kver, a, b, c) ((kver).kver >= KVER(a, b, c).kver)
/* /*
* BPFFS (ie. /sys/fs/bpf) labelling is as follows: * BPFFS (ie. /sys/fs/bpf) labelling is as follows:
@@ -211,8 +221,8 @@ static void (*bpf_ringbuf_submit_unsafe)(const void* data, __u64 flags) = (void*
.mode = (md), \ .mode = (md), \
.bpfloader_min_ver = (minloader), \ .bpfloader_min_ver = (minloader), \
.bpfloader_max_ver = (maxloader), \ .bpfloader_max_ver = (maxloader), \
.min_kver = (minkver), \ .min_kver = (minkver).kver, \
.max_kver = (maxkver), \ .max_kver = (maxkver).kver, \
.selinux_context = (selinux), \ .selinux_context = (selinux), \
.pin_subdir = (pindir), \ .pin_subdir = (pindir), \
.shared = (share).shared, \ .shared = (share).shared, \
@@ -232,7 +242,7 @@ static void (*bpf_ringbuf_submit_unsafe)(const void* data, __u64 flags) = (void*
selinux, pindir, share, min_loader, max_loader, \ selinux, pindir, share, min_loader, max_loader, \
ignore_eng, ignore_user, ignore_userdebug) \ ignore_eng, ignore_user, ignore_userdebug) \
DEFINE_BPF_MAP_BASE(the_map, RINGBUF, 0, 0, size_bytes, usr, grp, md, \ DEFINE_BPF_MAP_BASE(the_map, RINGBUF, 0, 0, size_bytes, usr, grp, md, \
selinux, pindir, share, KVER(5, 8, 0), KVER_INF, \ selinux, pindir, share, KVER_5_8, KVER_INF, \
min_loader, max_loader, ignore_eng, ignore_user, \ min_loader, max_loader, ignore_eng, ignore_user, \
ignore_userdebug); \ ignore_userdebug); \
\ \
@@ -364,8 +374,8 @@ static long (*bpf_get_current_comm)(void* buf, uint32_t buf_size) = (void*) BPF_
const struct bpf_prog_def SECTION("progs") the_prog##_def = { \ const struct bpf_prog_def SECTION("progs") the_prog##_def = { \
.uid = (prog_uid), \ .uid = (prog_uid), \
.gid = (prog_gid), \ .gid = (prog_gid), \
.min_kver = (min_kv), \ .min_kver = (min_kv).kver, \
.max_kver = (max_kv), \ .max_kver = (max_kv).kver, \
.optional = (opt).optional, \ .optional = (opt).optional, \
.bpfloader_min_ver = (min_loader), \ .bpfloader_min_ver = (min_loader), \
.bpfloader_max_ver = (max_loader), \ .bpfloader_max_ver = (max_loader), \
@@ -423,8 +433,8 @@ static long (*bpf_get_current_comm)(void* buf, uint32_t buf_size) = (void*) BPF_
// programs with no kernel version requirements // programs with no kernel version requirements
#define DEFINE_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \ #define DEFINE_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \
DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, 0, KVER_INF, \ DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, KVER_NONE, KVER_INF, \
MANDATORY) MANDATORY)
#define DEFINE_OPTIONAL_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \ #define DEFINE_OPTIONAL_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \
DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, 0, KVER_INF, \ DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, KVER_NONE, KVER_INF, \
OPTIONAL) OPTIONAL)