Snap for 6578268 from 11b584739778d1b86235c5ba844652adc438c842 to rvc-release
Change-Id: Ie92c8ccd61e1a1d06c1f5ace09067fe398bcaaa1
This commit is contained in:
@@ -25,7 +25,7 @@ java_defaults {
|
||||
],
|
||||
static_libs: [
|
||||
"androidx.annotation_annotation",
|
||||
"netd_aidl_interface-V3-java",
|
||||
"netd_aidl_interface-java",
|
||||
"netlink-client",
|
||||
"networkstack-aidl-interfaces-java",
|
||||
"android.hardware.tetheroffload.config-V1.0-java",
|
||||
|
||||
@@ -33,7 +33,6 @@ import android.net.LinkAddress;
|
||||
import android.net.LinkProperties;
|
||||
import android.net.MacAddress;
|
||||
import android.net.RouteInfo;
|
||||
import android.net.TetherOffloadRuleParcel;
|
||||
import android.net.TetheredClient;
|
||||
import android.net.TetheringManager;
|
||||
import android.net.TetheringRequestParcel;
|
||||
@@ -65,6 +64,8 @@ import androidx.annotation.Nullable;
|
||||
import com.android.internal.util.MessageUtils;
|
||||
import com.android.internal.util.State;
|
||||
import com.android.internal.util.StateMachine;
|
||||
import com.android.networkstack.tethering.BpfCoordinator;
|
||||
import com.android.networkstack.tethering.BpfCoordinator.Ipv6ForwardingRule;
|
||||
import com.android.networkstack.tethering.PrivateAddressCoordinator;
|
||||
|
||||
import java.io.IOException;
|
||||
@@ -76,7 +77,6 @@ import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Random;
|
||||
@@ -225,6 +225,8 @@ public class IpServer extends StateMachine {
|
||||
|
||||
private final SharedLog mLog;
|
||||
private final INetd mNetd;
|
||||
@NonNull
|
||||
private final BpfCoordinator mBpfCoordinator;
|
||||
private final Callback mCallback;
|
||||
private final InterfaceController mInterfaceCtrl;
|
||||
private final PrivateAddressCoordinator mPrivateAddressCoordinator;
|
||||
@@ -269,43 +271,6 @@ public class IpServer extends StateMachine {
|
||||
}
|
||||
}
|
||||
|
||||
static class Ipv6ForwardingRule {
|
||||
public final int upstreamIfindex;
|
||||
public final int downstreamIfindex;
|
||||
public final Inet6Address address;
|
||||
public final MacAddress srcMac;
|
||||
public final MacAddress dstMac;
|
||||
|
||||
Ipv6ForwardingRule(int upstreamIfindex, int downstreamIfIndex, Inet6Address address,
|
||||
MacAddress srcMac, MacAddress dstMac) {
|
||||
this.upstreamIfindex = upstreamIfindex;
|
||||
this.downstreamIfindex = downstreamIfIndex;
|
||||
this.address = address;
|
||||
this.srcMac = srcMac;
|
||||
this.dstMac = dstMac;
|
||||
}
|
||||
|
||||
public Ipv6ForwardingRule onNewUpstream(int newUpstreamIfindex) {
|
||||
return new Ipv6ForwardingRule(newUpstreamIfindex, downstreamIfindex, address, srcMac,
|
||||
dstMac);
|
||||
}
|
||||
|
||||
// Don't manipulate TetherOffloadRuleParcel directly because implementing onNewUpstream()
|
||||
// would be error-prone due to generated stable AIDL classes not having a copy constructor.
|
||||
public TetherOffloadRuleParcel toTetherOffloadRuleParcel() {
|
||||
final TetherOffloadRuleParcel parcel = new TetherOffloadRuleParcel();
|
||||
parcel.inputInterfaceIndex = upstreamIfindex;
|
||||
parcel.outputInterfaceIndex = downstreamIfindex;
|
||||
parcel.destination = address.getAddress();
|
||||
parcel.prefixLength = 128;
|
||||
parcel.srcL2Address = srcMac.toByteArray();
|
||||
parcel.dstL2Address = dstMac.toByteArray();
|
||||
return parcel;
|
||||
}
|
||||
}
|
||||
private final LinkedHashMap<Inet6Address, Ipv6ForwardingRule> mIpv6ForwardingRules =
|
||||
new LinkedHashMap<>();
|
||||
|
||||
private final IpNeighborMonitor mIpNeighborMonitor;
|
||||
|
||||
private LinkAddress mIpv4Address;
|
||||
@@ -314,11 +279,13 @@ public class IpServer extends StateMachine {
|
||||
// object. It helps to reduce the arguments of the constructor.
|
||||
public IpServer(
|
||||
String ifaceName, Looper looper, int interfaceType, SharedLog log,
|
||||
INetd netd, Callback callback, boolean usingLegacyDhcp, boolean usingBpfOffload,
|
||||
INetd netd, @NonNull BpfCoordinator coordinator, Callback callback,
|
||||
boolean usingLegacyDhcp, boolean usingBpfOffload,
|
||||
PrivateAddressCoordinator addressCoordinator, Dependencies deps) {
|
||||
super(ifaceName, looper);
|
||||
mLog = log.forSubComponent(ifaceName);
|
||||
mNetd = netd;
|
||||
mBpfCoordinator = coordinator;
|
||||
mCallback = callback;
|
||||
mInterfaceCtrl = new InterfaceController(ifaceName, mNetd, mLog);
|
||||
mIfaceName = ifaceName;
|
||||
@@ -749,6 +716,14 @@ public class IpServer extends StateMachine {
|
||||
}
|
||||
|
||||
upstreamIfindex = mDeps.getIfindex(upstreamIface);
|
||||
|
||||
// Add upstream index to name mapping for the tether stats usage in the coordinator.
|
||||
// Although this mapping could be added by both class Tethering and IpServer, adding
|
||||
// mapping from IpServer guarantees that the mapping is added before the adding
|
||||
// forwarding rules. That is because there are different state machines in both
|
||||
// classes. It is hard to guarantee the link property update order between multiple
|
||||
// state machines.
|
||||
mBpfCoordinator.addUpstreamNameToLookupTable(upstreamIfindex, upstreamIface);
|
||||
}
|
||||
|
||||
// If v6only is null, we pass in null to setRaParams(), which handles
|
||||
@@ -864,43 +839,29 @@ public class IpServer extends StateMachine {
|
||||
// TODO: Perhaps remove this protection check.
|
||||
if (!mUsingBpfOffload) return;
|
||||
|
||||
try {
|
||||
mNetd.tetherOffloadRuleAdd(rule.toTetherOffloadRuleParcel());
|
||||
mIpv6ForwardingRules.put(rule.address, rule);
|
||||
} catch (RemoteException | ServiceSpecificException e) {
|
||||
mLog.e("Could not add IPv6 downstream rule: ", e);
|
||||
}
|
||||
mBpfCoordinator.tetherOffloadRuleAdd(this, rule);
|
||||
}
|
||||
|
||||
private void removeIpv6ForwardingRule(Ipv6ForwardingRule rule, boolean removeFromMap) {
|
||||
// Theoretically, we don't need this check because IP neighbor monitor doesn't start if BPF
|
||||
// offload is disabled. Add this check just in case.
|
||||
private void removeIpv6ForwardingRule(Ipv6ForwardingRule rule) {
|
||||
// TODO: Perhaps remove this protection check.
|
||||
// See the related comment in #addIpv6ForwardingRule.
|
||||
if (!mUsingBpfOffload) return;
|
||||
|
||||
try {
|
||||
mNetd.tetherOffloadRuleRemove(rule.toTetherOffloadRuleParcel());
|
||||
if (removeFromMap) {
|
||||
mIpv6ForwardingRules.remove(rule.address);
|
||||
}
|
||||
} catch (RemoteException | ServiceSpecificException e) {
|
||||
mLog.e("Could not remove IPv6 downstream rule: ", e);
|
||||
}
|
||||
mBpfCoordinator.tetherOffloadRuleRemove(this, rule);
|
||||
}
|
||||
|
||||
private void clearIpv6ForwardingRules() {
|
||||
for (Ipv6ForwardingRule rule : mIpv6ForwardingRules.values()) {
|
||||
removeIpv6ForwardingRule(rule, false /*removeFromMap*/);
|
||||
}
|
||||
mIpv6ForwardingRules.clear();
|
||||
if (!mUsingBpfOffload) return;
|
||||
|
||||
mBpfCoordinator.tetherOffloadRuleClear(this);
|
||||
}
|
||||
|
||||
// Convenience method to replace a rule with the same rule on a new upstream interface.
|
||||
// Allows replacing the rules in one iteration pass without ConcurrentModificationExceptions.
|
||||
// Relies on the fact that rules are in a map indexed by IP address.
|
||||
private void updateIpv6ForwardingRule(Ipv6ForwardingRule rule, int newIfindex) {
|
||||
addIpv6ForwardingRule(rule.onNewUpstream(newIfindex));
|
||||
removeIpv6ForwardingRule(rule, false /*removeFromMap*/);
|
||||
private void updateIpv6ForwardingRule(int newIfindex) {
|
||||
// TODO: Perhaps remove this protection check.
|
||||
// See the related comment in #addIpv6ForwardingRule.
|
||||
if (!mUsingBpfOffload) return;
|
||||
|
||||
mBpfCoordinator.tetherOffloadRuleUpdate(this, newIfindex);
|
||||
}
|
||||
|
||||
// Handles all updates to IPv6 forwarding rules. These can currently change only if the upstream
|
||||
@@ -916,9 +877,7 @@ public class IpServer extends StateMachine {
|
||||
// If the upstream interface has changed, remove all rules and re-add them with the new
|
||||
// upstream interface.
|
||||
if (prevUpstreamIfindex != upstreamIfindex) {
|
||||
for (Ipv6ForwardingRule rule : mIpv6ForwardingRules.values()) {
|
||||
updateIpv6ForwardingRule(rule, upstreamIfindex);
|
||||
}
|
||||
updateIpv6ForwardingRule(upstreamIfindex);
|
||||
}
|
||||
|
||||
// If we're here to process a NeighborEvent, do so now.
|
||||
@@ -938,7 +897,7 @@ public class IpServer extends StateMachine {
|
||||
if (e.isValid()) {
|
||||
addIpv6ForwardingRule(rule);
|
||||
} else {
|
||||
removeIpv6ForwardingRule(rule, true /*removeFromMap*/);
|
||||
removeIpv6ForwardingRule(rule);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -15,18 +15,93 @@
|
||||
*/
|
||||
package android.net.util;
|
||||
|
||||
import android.net.TetherStatsParcel;
|
||||
import android.net.TetheringRequestParcel;
|
||||
|
||||
import androidx.annotation.NonNull;
|
||||
|
||||
import java.io.FileDescriptor;
|
||||
import java.net.SocketException;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Native methods for tethering utilization.
|
||||
* The classes and the methods for tethering utilization.
|
||||
*
|
||||
* {@hide}
|
||||
*/
|
||||
public class TetheringUtils {
|
||||
/**
|
||||
* The object which records offload Tx/Rx forwarded bytes/packets.
|
||||
* TODO: Replace the inner class ForwardedStats of class OffloadHardwareInterface with
|
||||
* this class as well.
|
||||
*/
|
||||
public static class ForwardedStats {
|
||||
public final long rxBytes;
|
||||
public final long rxPackets;
|
||||
public final long txBytes;
|
||||
public final long txPackets;
|
||||
|
||||
public ForwardedStats() {
|
||||
rxBytes = 0;
|
||||
rxPackets = 0;
|
||||
txBytes = 0;
|
||||
txPackets = 0;
|
||||
}
|
||||
|
||||
public ForwardedStats(long rxBytes, long txBytes) {
|
||||
this.rxBytes = rxBytes;
|
||||
this.rxPackets = 0;
|
||||
this.txBytes = txBytes;
|
||||
this.txPackets = 0;
|
||||
}
|
||||
|
||||
public ForwardedStats(long rxBytes, long rxPackets, long txBytes, long txPackets) {
|
||||
this.rxBytes = rxBytes;
|
||||
this.rxPackets = rxPackets;
|
||||
this.txBytes = txBytes;
|
||||
this.txPackets = txPackets;
|
||||
}
|
||||
|
||||
public ForwardedStats(@NonNull TetherStatsParcel tetherStats) {
|
||||
rxBytes = tetherStats.rxBytes;
|
||||
rxPackets = tetherStats.rxPackets;
|
||||
txBytes = tetherStats.txBytes;
|
||||
txPackets = tetherStats.txPackets;
|
||||
}
|
||||
|
||||
public ForwardedStats(@NonNull ForwardedStats other) {
|
||||
rxBytes = other.rxBytes;
|
||||
rxPackets = other.rxPackets;
|
||||
txBytes = other.txBytes;
|
||||
txPackets = other.txPackets;
|
||||
}
|
||||
|
||||
/** Add Tx/Rx bytes/packets and return the result as a new object. */
|
||||
@NonNull
|
||||
public ForwardedStats add(@NonNull ForwardedStats other) {
|
||||
return new ForwardedStats(rxBytes + other.rxBytes, rxPackets + other.rxPackets,
|
||||
txBytes + other.txBytes, txPackets + other.txPackets);
|
||||
}
|
||||
|
||||
/** Subtract Tx/Rx bytes/packets and return the result as a new object. */
|
||||
@NonNull
|
||||
public ForwardedStats subtract(@NonNull ForwardedStats other) {
|
||||
// TODO: Perhaps throw an exception if any negative difference value just in case.
|
||||
final long rxBytesDiff = Math.max(rxBytes - other.rxBytes, 0);
|
||||
final long rxPacketsDiff = Math.max(rxPackets - other.rxPackets, 0);
|
||||
final long txBytesDiff = Math.max(txBytes - other.txBytes, 0);
|
||||
final long txPacketsDiff = Math.max(txPackets - other.txPackets, 0);
|
||||
return new ForwardedStats(rxBytesDiff, rxPacketsDiff, txBytesDiff, txPacketsDiff);
|
||||
}
|
||||
|
||||
/** Returns the string representation of this object. */
|
||||
@NonNull
|
||||
public String toString() {
|
||||
return String.format("ForwardedStats(rxb: %d, rxp: %d, txb: %d, txp: %d)", rxBytes,
|
||||
rxPackets, txBytes, txPackets);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Configures a socket for receiving ICMPv6 router solicitations and sending advertisements.
|
||||
* @param fd the socket's {@link FileDescriptor}.
|
||||
|
||||
@@ -0,0 +1,637 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.android.networkstack.tethering;
|
||||
|
||||
import static android.net.NetworkStats.DEFAULT_NETWORK_NO;
|
||||
import static android.net.NetworkStats.METERED_NO;
|
||||
import static android.net.NetworkStats.ROAMING_NO;
|
||||
import static android.net.NetworkStats.SET_DEFAULT;
|
||||
import static android.net.NetworkStats.TAG_NONE;
|
||||
import static android.net.NetworkStats.UID_ALL;
|
||||
import static android.net.NetworkStats.UID_TETHERING;
|
||||
import static android.net.netstats.provider.NetworkStatsProvider.QUOTA_UNLIMITED;
|
||||
|
||||
import android.app.usage.NetworkStatsManager;
|
||||
import android.net.INetd;
|
||||
import android.net.MacAddress;
|
||||
import android.net.NetworkStats;
|
||||
import android.net.NetworkStats.Entry;
|
||||
import android.net.TetherOffloadRuleParcel;
|
||||
import android.net.TetherStatsParcel;
|
||||
import android.net.ip.IpServer;
|
||||
import android.net.netstats.provider.NetworkStatsProvider;
|
||||
import android.net.util.SharedLog;
|
||||
import android.net.util.TetheringUtils.ForwardedStats;
|
||||
import android.os.Handler;
|
||||
import android.os.RemoteException;
|
||||
import android.os.ServiceSpecificException;
|
||||
import android.text.TextUtils;
|
||||
import android.util.Log;
|
||||
import android.util.SparseArray;
|
||||
|
||||
import androidx.annotation.NonNull;
|
||||
import androidx.annotation.Nullable;
|
||||
|
||||
import com.android.internal.annotations.VisibleForTesting;
|
||||
|
||||
import java.net.Inet6Address;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* This coordinator is responsible for providing BPF offload relevant functionality.
|
||||
* - Get tethering stats.
|
||||
* - Set data limit.
|
||||
* - Set global alert.
|
||||
* - Add/remove forwarding rules.
|
||||
*
|
||||
* @hide
|
||||
*/
|
||||
public class BpfCoordinator {
|
||||
private static final String TAG = BpfCoordinator.class.getSimpleName();
|
||||
@VisibleForTesting
|
||||
static final int DEFAULT_PERFORM_POLL_INTERVAL_MS = 5000; // TODO: Make it customizable.
|
||||
|
||||
@VisibleForTesting
|
||||
enum StatsType {
|
||||
STATS_PER_IFACE,
|
||||
STATS_PER_UID,
|
||||
}
|
||||
|
||||
@NonNull
|
||||
private final Handler mHandler;
|
||||
@NonNull
|
||||
private final INetd mNetd;
|
||||
@NonNull
|
||||
private final SharedLog mLog;
|
||||
@NonNull
|
||||
private final Dependencies mDeps;
|
||||
@Nullable
|
||||
private final BpfTetherStatsProvider mStatsProvider;
|
||||
|
||||
// Tracks whether BPF tethering is started or not. This is set by tethering before it
|
||||
// starts the first IpServer and is cleared by tethering shortly before the last IpServer
|
||||
// is stopped. Note that rule updates (especially deletions, but sometimes additions as
|
||||
// well) may arrive when this is false. If they do, they must be communicated to netd.
|
||||
// Changes in data limits may also arrive when this is false, and if they do, they must
|
||||
// also be communicated to netd.
|
||||
private boolean mPollingStarted = false;
|
||||
|
||||
// Tracking remaining alert quota. Unlike limit quota is subject to interface, the alert
|
||||
// quota is interface independent and global for tether offload.
|
||||
private long mRemainingAlertQuota = QUOTA_UNLIMITED;
|
||||
|
||||
// Maps upstream interface index to offloaded traffic statistics.
|
||||
// Always contains the latest total bytes/packets, since each upstream was started, received
|
||||
// from the BPF maps for each interface.
|
||||
private final SparseArray<ForwardedStats> mStats = new SparseArray<>();
|
||||
|
||||
// Maps upstream interface names to interface quotas.
|
||||
// Always contains the latest value received from the framework for each interface, regardless
|
||||
// of whether offload is currently running (or is even supported) on that interface. Only
|
||||
// includes interfaces that have a quota set. Note that this map is used for storing the quota
|
||||
// which is set from the service. Because the service uses the interface name to present the
|
||||
// interface, this map uses the interface name to be the mapping index.
|
||||
private final HashMap<String, Long> mInterfaceQuotas = new HashMap<>();
|
||||
|
||||
// Maps upstream interface index to interface names.
|
||||
// Store all interface name since boot. Used for lookup what interface name it is from the
|
||||
// tether stats got from netd because netd reports interface index to present an interface.
|
||||
// TODO: Remove the unused interface name.
|
||||
private final SparseArray<String> mInterfaceNames = new SparseArray<>();
|
||||
|
||||
// Map of downstream rule maps. Each of these maps represents the IPv6 forwarding rules for a
|
||||
// given downstream. Each map:
|
||||
// - Is owned by the IpServer that is responsible for that downstream.
|
||||
// - Must only be modified by that IpServer.
|
||||
// - Is created when the IpServer adds its first rule, and deleted when the IpServer deletes
|
||||
// its last rule (or clears its rules).
|
||||
// TODO: Perhaps seal the map and rule operations which communicates with netd into a class.
|
||||
// TODO: Does this need to be a LinkedHashMap or can it just be a HashMap? Also, could it be
|
||||
// a ConcurrentHashMap, in order to avoid the copies in tetherOffloadRuleClear
|
||||
// and tetherOffloadRuleUpdate?
|
||||
// TODO: Perhaps use one-dimensional map and access specific downstream rules via downstream
|
||||
// index. For doing that, IpServer must guarantee that it always has a valid IPv6 downstream
|
||||
// interface index while calling function to clear all rules. IpServer may be calling clear
|
||||
// rules function without a valid IPv6 downstream interface index even if it may have one
|
||||
// before. IpServer would need to call getInterfaceParams() in the constructor instead of when
|
||||
// startIpv6() is called, and make mInterfaceParams final.
|
||||
private final HashMap<IpServer, LinkedHashMap<Inet6Address, Ipv6ForwardingRule>>
|
||||
mIpv6ForwardingRules = new LinkedHashMap<>();
|
||||
|
||||
// Runnable that used by scheduling next polling of stats.
|
||||
private final Runnable mScheduledPollingTask = () -> {
|
||||
updateForwardedStatsFromNetd();
|
||||
maybeSchedulePollingStats();
|
||||
};
|
||||
|
||||
@VisibleForTesting
|
||||
public static class Dependencies {
|
||||
int getPerformPollInterval() {
|
||||
// TODO: Consider make this configurable.
|
||||
return DEFAULT_PERFORM_POLL_INTERVAL_MS;
|
||||
}
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public BpfCoordinator(@NonNull Handler handler, @NonNull INetd netd,
|
||||
@NonNull NetworkStatsManager nsm, @NonNull SharedLog log, @NonNull Dependencies deps) {
|
||||
mHandler = handler;
|
||||
mNetd = netd;
|
||||
mLog = log.forSubComponent(TAG);
|
||||
BpfTetherStatsProvider provider = new BpfTetherStatsProvider();
|
||||
try {
|
||||
nsm.registerNetworkStatsProvider(getClass().getSimpleName(), provider);
|
||||
} catch (RuntimeException e) {
|
||||
// TODO: Perhaps not allow to use BPF offload because the reregistration failure
|
||||
// implied that no data limit could be applies on a metered upstream if any.
|
||||
Log.wtf(TAG, "Cannot register offload stats provider: " + e);
|
||||
provider = null;
|
||||
}
|
||||
mStatsProvider = provider;
|
||||
mDeps = deps;
|
||||
}
|
||||
|
||||
/**
|
||||
* Start BPF tethering offload stats polling when the first upstream is started.
|
||||
* Note that this can be only called on handler thread.
|
||||
* TODO: Perhaps check BPF support before starting.
|
||||
* TODO: Start the stats polling only if there is any client on the downstream.
|
||||
*/
|
||||
public void startPolling() {
|
||||
if (mPollingStarted) return;
|
||||
|
||||
mPollingStarted = true;
|
||||
maybeSchedulePollingStats();
|
||||
|
||||
mLog.i("Polling started");
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop BPF tethering offload stats polling.
|
||||
* The data limit cleanup and the tether stats maps cleanup are not implemented here.
|
||||
* These cleanups rely on all IpServers calling #tetherOffloadRuleRemove. After the
|
||||
* last rule is removed from the upstream, #tetherOffloadRuleRemove does the cleanup
|
||||
* functionality.
|
||||
* Note that this can be only called on handler thread.
|
||||
*/
|
||||
public void stopPolling() {
|
||||
if (!mPollingStarted) return;
|
||||
|
||||
// Stop scheduled polling tasks and poll the latest stats from BPF maps.
|
||||
if (mHandler.hasCallbacks(mScheduledPollingTask)) {
|
||||
mHandler.removeCallbacks(mScheduledPollingTask);
|
||||
}
|
||||
updateForwardedStatsFromNetd();
|
||||
mPollingStarted = false;
|
||||
|
||||
mLog.i("Polling stopped");
|
||||
}
|
||||
|
||||
/**
|
||||
* Add forwarding rule. After adding the first rule on a given upstream, must add the data
|
||||
* limit on the given upstream.
|
||||
* Note that this can be only called on handler thread.
|
||||
*/
|
||||
public void tetherOffloadRuleAdd(
|
||||
@NonNull final IpServer ipServer, @NonNull final Ipv6ForwardingRule rule) {
|
||||
try {
|
||||
// TODO: Perhaps avoid to add a duplicate rule.
|
||||
mNetd.tetherOffloadRuleAdd(rule.toTetherOffloadRuleParcel());
|
||||
} catch (RemoteException | ServiceSpecificException e) {
|
||||
mLog.e("Could not add IPv6 forwarding rule: ", e);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!mIpv6ForwardingRules.containsKey(ipServer)) {
|
||||
mIpv6ForwardingRules.put(ipServer, new LinkedHashMap<Inet6Address,
|
||||
Ipv6ForwardingRule>());
|
||||
}
|
||||
LinkedHashMap<Inet6Address, Ipv6ForwardingRule> rules = mIpv6ForwardingRules.get(ipServer);
|
||||
|
||||
// Setup the data limit on the given upstream if the first rule is added.
|
||||
final int upstreamIfindex = rule.upstreamIfindex;
|
||||
if (!isAnyRuleOnUpstream(upstreamIfindex)) {
|
||||
// If failed to set a data limit, probably should not use this upstream, because
|
||||
// the upstream may not want to blow through the data limit that was told to apply.
|
||||
// TODO: Perhaps stop the coordinator.
|
||||
boolean success = updateDataLimit(upstreamIfindex);
|
||||
if (!success) {
|
||||
final String iface = mInterfaceNames.get(upstreamIfindex);
|
||||
mLog.e("Setting data limit for " + iface + " failed.");
|
||||
}
|
||||
}
|
||||
|
||||
// Must update the adding rule after calling #isAnyRuleOnUpstream because it needs to
|
||||
// check if it is about adding a first rule for a given upstream.
|
||||
rules.put(rule.address, rule);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove forwarding rule. After removing the last rule on a given upstream, must clear
|
||||
* data limit, update the last tether stats and remove the tether stats in the BPF maps.
|
||||
* Note that this can be only called on handler thread.
|
||||
*/
|
||||
public void tetherOffloadRuleRemove(
|
||||
@NonNull final IpServer ipServer, @NonNull final Ipv6ForwardingRule rule) {
|
||||
try {
|
||||
// TODO: Perhaps avoid to remove a non-existent rule.
|
||||
mNetd.tetherOffloadRuleRemove(rule.toTetherOffloadRuleParcel());
|
||||
} catch (RemoteException | ServiceSpecificException e) {
|
||||
mLog.e("Could not remove IPv6 forwarding rule: ", e);
|
||||
return;
|
||||
}
|
||||
|
||||
LinkedHashMap<Inet6Address, Ipv6ForwardingRule> rules = mIpv6ForwardingRules.get(ipServer);
|
||||
if (rules == null) return;
|
||||
|
||||
// Must remove rules before calling #isAnyRuleOnUpstream because it needs to check if
|
||||
// the last rule is removed for a given upstream. If no rule is removed, return early.
|
||||
// Avoid unnecessary work on a non-existent rule which may have never been added or
|
||||
// removed already.
|
||||
if (rules.remove(rule.address) == null) return;
|
||||
|
||||
// Remove the downstream entry if it has no more rule.
|
||||
if (rules.isEmpty()) {
|
||||
mIpv6ForwardingRules.remove(ipServer);
|
||||
}
|
||||
|
||||
// Do cleanup functionality if there is no more rule on the given upstream.
|
||||
final int upstreamIfindex = rule.upstreamIfindex;
|
||||
if (!isAnyRuleOnUpstream(upstreamIfindex)) {
|
||||
try {
|
||||
final TetherStatsParcel stats =
|
||||
mNetd.tetherOffloadGetAndClearStats(upstreamIfindex);
|
||||
// Update the last stats delta and delete the local cache for a given upstream.
|
||||
updateQuotaAndStatsFromSnapshot(new TetherStatsParcel[] {stats});
|
||||
mStats.remove(upstreamIfindex);
|
||||
} catch (RemoteException | ServiceSpecificException e) {
|
||||
Log.wtf(TAG, "Exception when cleanup tether stats for upstream index "
|
||||
+ upstreamIfindex + ": ", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all forwarding rules for a given downstream.
|
||||
* Note that this can be only called on handler thread.
|
||||
*/
|
||||
public void tetherOffloadRuleClear(@NonNull final IpServer ipServer) {
|
||||
final LinkedHashMap<Inet6Address, Ipv6ForwardingRule> rules = mIpv6ForwardingRules.get(
|
||||
ipServer);
|
||||
if (rules == null) return;
|
||||
|
||||
// Need to build a rule list because the rule map may be changed in the iteration.
|
||||
for (final Ipv6ForwardingRule rule : new ArrayList<Ipv6ForwardingRule>(rules.values())) {
|
||||
tetherOffloadRuleRemove(ipServer, rule);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update existing forwarding rules to new upstream for a given downstream.
|
||||
* Note that this can be only called on handler thread.
|
||||
*/
|
||||
public void tetherOffloadRuleUpdate(@NonNull final IpServer ipServer, int newUpstreamIfindex) {
|
||||
final LinkedHashMap<Inet6Address, Ipv6ForwardingRule> rules = mIpv6ForwardingRules.get(
|
||||
ipServer);
|
||||
if (rules == null) return;
|
||||
|
||||
// Need to build a rule list because the rule map may be changed in the iteration.
|
||||
for (final Ipv6ForwardingRule rule : new ArrayList<Ipv6ForwardingRule>(rules.values())) {
|
||||
// Remove the old rule before adding the new one because the map uses the same key for
|
||||
// both rules. Reversing the processing order causes that the new rule is removed as
|
||||
// unexpected.
|
||||
// TODO: Add new rule first to reduce the latency which has no rule.
|
||||
tetherOffloadRuleRemove(ipServer, rule);
|
||||
tetherOffloadRuleAdd(ipServer, rule.onNewUpstream(newUpstreamIfindex));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add upstream name to lookup table. The lookup table is used for tether stats interface name
|
||||
* lookup because the netd only reports interface index in BPF tether stats but the service
|
||||
* expects the interface name in NetworkStats object.
|
||||
* Note that this can be only called on handler thread.
|
||||
*/
|
||||
public void addUpstreamNameToLookupTable(int upstreamIfindex, @NonNull String upstreamIface) {
|
||||
if (upstreamIfindex == 0 || TextUtils.isEmpty(upstreamIface)) return;
|
||||
|
||||
// The same interface index to name mapping may be added by different IpServer objects or
|
||||
// re-added by reconnection on the same upstream interface. Ignore the duplicate one.
|
||||
final String iface = mInterfaceNames.get(upstreamIfindex);
|
||||
if (iface == null) {
|
||||
mInterfaceNames.put(upstreamIfindex, upstreamIface);
|
||||
} else if (!TextUtils.equals(iface, upstreamIface)) {
|
||||
Log.wtf(TAG, "The upstream interface name " + upstreamIface
|
||||
+ " is different from the existing interface name "
|
||||
+ iface + " for index " + upstreamIfindex);
|
||||
}
|
||||
}
|
||||
|
||||
/** IPv6 forwarding rule class. */
|
||||
public static class Ipv6ForwardingRule {
|
||||
public final int upstreamIfindex;
|
||||
public final int downstreamIfindex;
|
||||
|
||||
@NonNull
|
||||
public final Inet6Address address;
|
||||
@NonNull
|
||||
public final MacAddress srcMac;
|
||||
@NonNull
|
||||
public final MacAddress dstMac;
|
||||
|
||||
public Ipv6ForwardingRule(int upstreamIfindex, int downstreamIfIndex,
|
||||
@NonNull Inet6Address address, @NonNull MacAddress srcMac,
|
||||
@NonNull MacAddress dstMac) {
|
||||
this.upstreamIfindex = upstreamIfindex;
|
||||
this.downstreamIfindex = downstreamIfIndex;
|
||||
this.address = address;
|
||||
this.srcMac = srcMac;
|
||||
this.dstMac = dstMac;
|
||||
}
|
||||
|
||||
/** Return a new rule object which updates with new upstream index. */
|
||||
@NonNull
|
||||
public Ipv6ForwardingRule onNewUpstream(int newUpstreamIfindex) {
|
||||
return new Ipv6ForwardingRule(newUpstreamIfindex, downstreamIfindex, address, srcMac,
|
||||
dstMac);
|
||||
}
|
||||
|
||||
/**
|
||||
* Don't manipulate TetherOffloadRuleParcel directly because implementing onNewUpstream()
|
||||
* would be error-prone due to generated stable AIDL classes not having a copy constructor.
|
||||
*/
|
||||
@NonNull
|
||||
public TetherOffloadRuleParcel toTetherOffloadRuleParcel() {
|
||||
final TetherOffloadRuleParcel parcel = new TetherOffloadRuleParcel();
|
||||
parcel.inputInterfaceIndex = upstreamIfindex;
|
||||
parcel.outputInterfaceIndex = downstreamIfindex;
|
||||
parcel.destination = address.getAddress();
|
||||
parcel.prefixLength = 128;
|
||||
parcel.srcL2Address = srcMac.toByteArray();
|
||||
parcel.dstL2Address = dstMac.toByteArray();
|
||||
return parcel;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (!(o instanceof Ipv6ForwardingRule)) return false;
|
||||
Ipv6ForwardingRule that = (Ipv6ForwardingRule) o;
|
||||
return this.upstreamIfindex == that.upstreamIfindex
|
||||
&& this.downstreamIfindex == that.downstreamIfindex
|
||||
&& Objects.equals(this.address, that.address)
|
||||
&& Objects.equals(this.srcMac, that.srcMac)
|
||||
&& Objects.equals(this.dstMac, that.dstMac);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
// TODO: if this is ever used in production code, don't pass ifindices
|
||||
// to Objects.hash() to avoid autoboxing overhead.
|
||||
return Objects.hash(upstreamIfindex, downstreamIfindex, address, srcMac, dstMac);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A BPF tethering stats provider to provide network statistics to the system.
|
||||
* Note that this class' data may only be accessed on the handler thread.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
class BpfTetherStatsProvider extends NetworkStatsProvider {
|
||||
// The offloaded traffic statistics per interface that has not been reported since the
|
||||
// last call to pushTetherStats. Only the interfaces that were ever tethering upstreams
|
||||
// and has pending tether stats delta are included in this NetworkStats object.
|
||||
private NetworkStats mIfaceStats = new NetworkStats(0L, 0);
|
||||
|
||||
// The same stats as above, but counts network stats per uid.
|
||||
private NetworkStats mUidStats = new NetworkStats(0L, 0);
|
||||
|
||||
@Override
|
||||
public void onRequestStatsUpdate(int token) {
|
||||
mHandler.post(() -> pushTetherStats());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onSetAlert(long quotaBytes) {
|
||||
mHandler.post(() -> updateAlertQuota(quotaBytes));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onSetLimit(@NonNull String iface, long quotaBytes) {
|
||||
if (quotaBytes < QUOTA_UNLIMITED) {
|
||||
throw new IllegalArgumentException("invalid quota value " + quotaBytes);
|
||||
}
|
||||
|
||||
mHandler.post(() -> {
|
||||
final Long curIfaceQuota = mInterfaceQuotas.get(iface);
|
||||
|
||||
if (null == curIfaceQuota && QUOTA_UNLIMITED == quotaBytes) return;
|
||||
|
||||
if (quotaBytes == QUOTA_UNLIMITED) {
|
||||
mInterfaceQuotas.remove(iface);
|
||||
} else {
|
||||
mInterfaceQuotas.put(iface, quotaBytes);
|
||||
}
|
||||
maybeUpdateDataLimit(iface);
|
||||
});
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
void pushTetherStats() {
|
||||
try {
|
||||
// The token is not used for now. See b/153606961.
|
||||
notifyStatsUpdated(0 /* token */, mIfaceStats, mUidStats);
|
||||
|
||||
// Clear the accumulated tether stats delta after reported. Note that create a new
|
||||
// empty object because NetworkStats#clear is @hide.
|
||||
mIfaceStats = new NetworkStats(0L, 0);
|
||||
mUidStats = new NetworkStats(0L, 0);
|
||||
} catch (RuntimeException e) {
|
||||
mLog.e("Cannot report network stats: ", e);
|
||||
}
|
||||
}
|
||||
|
||||
private void accumulateDiff(@NonNull NetworkStats ifaceDiff,
|
||||
@NonNull NetworkStats uidDiff) {
|
||||
mIfaceStats = mIfaceStats.add(ifaceDiff);
|
||||
mUidStats = mUidStats.add(uidDiff);
|
||||
}
|
||||
}
|
||||
|
||||
private int getInterfaceIndexFromRules(@NonNull String ifName) {
|
||||
for (LinkedHashMap<Inet6Address, Ipv6ForwardingRule> rules : mIpv6ForwardingRules
|
||||
.values()) {
|
||||
for (Ipv6ForwardingRule rule : rules.values()) {
|
||||
final int upstreamIfindex = rule.upstreamIfindex;
|
||||
if (TextUtils.equals(ifName, mInterfaceNames.get(upstreamIfindex))) {
|
||||
return upstreamIfindex;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
private long getQuotaBytes(@NonNull String iface) {
|
||||
final Long limit = mInterfaceQuotas.get(iface);
|
||||
final long quotaBytes = (limit != null) ? limit : QUOTA_UNLIMITED;
|
||||
|
||||
return quotaBytes;
|
||||
}
|
||||
|
||||
private boolean sendDataLimitToNetd(int ifIndex, long quotaBytes) {
|
||||
if (ifIndex == 0) {
|
||||
Log.wtf(TAG, "Invalid interface index.");
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
mNetd.tetherOffloadSetInterfaceQuota(ifIndex, quotaBytes);
|
||||
} catch (RemoteException | ServiceSpecificException e) {
|
||||
mLog.e("Exception when updating quota " + quotaBytes + ": ", e);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Handle the data limit update from the service which is the stats provider registered for.
|
||||
private void maybeUpdateDataLimit(@NonNull String iface) {
|
||||
// Set data limit only on a given upstream which has at least one rule. If we can't get
|
||||
// an interface index for a given interface name, it means either there is no rule for
|
||||
// a given upstream or the interface name is not an upstream which is monitored by the
|
||||
// coordinator.
|
||||
final int ifIndex = getInterfaceIndexFromRules(iface);
|
||||
if (ifIndex == 0) return;
|
||||
|
||||
final long quotaBytes = getQuotaBytes(iface);
|
||||
sendDataLimitToNetd(ifIndex, quotaBytes);
|
||||
}
|
||||
|
||||
// Handle the data limit update while adding forwarding rules.
|
||||
private boolean updateDataLimit(int ifIndex) {
|
||||
final String iface = mInterfaceNames.get(ifIndex);
|
||||
if (iface == null) {
|
||||
mLog.e("Fail to get the interface name for index " + ifIndex);
|
||||
return false;
|
||||
}
|
||||
final long quotaBytes = getQuotaBytes(iface);
|
||||
return sendDataLimitToNetd(ifIndex, quotaBytes);
|
||||
}
|
||||
|
||||
private boolean isAnyRuleOnUpstream(int upstreamIfindex) {
|
||||
for (LinkedHashMap<Inet6Address, Ipv6ForwardingRule> rules : mIpv6ForwardingRules
|
||||
.values()) {
|
||||
for (Ipv6ForwardingRule rule : rules.values()) {
|
||||
if (upstreamIfindex == rule.upstreamIfindex) return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@NonNull
|
||||
private NetworkStats buildNetworkStats(@NonNull StatsType type, int ifIndex,
|
||||
@NonNull final ForwardedStats diff) {
|
||||
NetworkStats stats = new NetworkStats(0L, 0);
|
||||
final String iface = mInterfaceNames.get(ifIndex);
|
||||
if (iface == null) {
|
||||
// TODO: Use Log.wtf once the coordinator owns full control of tether stats from netd.
|
||||
// For now, netd may add the empty stats for the upstream which is not monitored by
|
||||
// the coordinator. Silently ignore it.
|
||||
return stats;
|
||||
}
|
||||
final int uid = (type == StatsType.STATS_PER_UID) ? UID_TETHERING : UID_ALL;
|
||||
// Note that the argument 'metered', 'roaming' and 'defaultNetwork' are not recorded for
|
||||
// network stats snapshot. See NetworkStatsRecorder#recordSnapshotLocked.
|
||||
return stats.addEntry(new Entry(iface, uid, SET_DEFAULT, TAG_NONE, METERED_NO,
|
||||
ROAMING_NO, DEFAULT_NETWORK_NO, diff.rxBytes, diff.rxPackets,
|
||||
diff.txBytes, diff.txPackets, 0L /* operations */));
|
||||
}
|
||||
|
||||
private void updateAlertQuota(long newQuota) {
|
||||
if (newQuota < QUOTA_UNLIMITED) {
|
||||
throw new IllegalArgumentException("invalid quota value " + newQuota);
|
||||
}
|
||||
if (mRemainingAlertQuota == newQuota) return;
|
||||
|
||||
mRemainingAlertQuota = newQuota;
|
||||
if (mRemainingAlertQuota == 0) {
|
||||
mLog.i("onAlertReached");
|
||||
if (mStatsProvider != null) mStatsProvider.notifyAlertReached();
|
||||
}
|
||||
}
|
||||
|
||||
private void updateQuotaAndStatsFromSnapshot(
|
||||
@NonNull final TetherStatsParcel[] tetherStatsList) {
|
||||
long usedAlertQuota = 0;
|
||||
for (TetherStatsParcel tetherStats : tetherStatsList) {
|
||||
final Integer ifIndex = tetherStats.ifIndex;
|
||||
final ForwardedStats curr = new ForwardedStats(tetherStats);
|
||||
final ForwardedStats base = mStats.get(ifIndex);
|
||||
final ForwardedStats diff = (base != null) ? curr.subtract(base) : curr;
|
||||
usedAlertQuota += diff.rxBytes + diff.txBytes;
|
||||
|
||||
// Update the local cache for counting tether stats delta.
|
||||
mStats.put(ifIndex, curr);
|
||||
|
||||
// Update the accumulated tether stats delta to the stats provider for the service
|
||||
// querying.
|
||||
if (mStatsProvider != null) {
|
||||
try {
|
||||
mStatsProvider.accumulateDiff(
|
||||
buildNetworkStats(StatsType.STATS_PER_IFACE, ifIndex, diff),
|
||||
buildNetworkStats(StatsType.STATS_PER_UID, ifIndex, diff));
|
||||
} catch (ArrayIndexOutOfBoundsException e) {
|
||||
Log.wtf(TAG, "Fail to update the accumulated stats delta for interface index "
|
||||
+ ifIndex + " : ", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (mRemainingAlertQuota > 0 && usedAlertQuota > 0) {
|
||||
// Trim to zero if overshoot.
|
||||
final long newQuota = Math.max(mRemainingAlertQuota - usedAlertQuota, 0);
|
||||
updateAlertQuota(newQuota);
|
||||
}
|
||||
|
||||
// TODO: Count the used limit quota for notifying data limit reached.
|
||||
}
|
||||
|
||||
private void updateForwardedStatsFromNetd() {
|
||||
final TetherStatsParcel[] tetherStatsList;
|
||||
try {
|
||||
// The reported tether stats are total data usage for all currently-active upstream
|
||||
// interfaces since tethering start.
|
||||
tetherStatsList = mNetd.tetherOffloadGetStats();
|
||||
} catch (RemoteException | ServiceSpecificException e) {
|
||||
mLog.e("Problem fetching tethering stats: ", e);
|
||||
return;
|
||||
}
|
||||
updateQuotaAndStatsFromSnapshot(tetherStatsList);
|
||||
}
|
||||
|
||||
private void maybeSchedulePollingStats() {
|
||||
if (!mPollingStarted) return;
|
||||
|
||||
if (mHandler.hasCallbacks(mScheduledPollingTask)) {
|
||||
mHandler.removeCallbacks(mScheduledPollingTask);
|
||||
}
|
||||
|
||||
mHandler.postDelayed(mScheduledPollingTask, mDeps.getPerformPollInterval());
|
||||
}
|
||||
}
|
||||
@@ -232,6 +232,7 @@ public class Tethering {
|
||||
private final TetheringThreadExecutor mExecutor;
|
||||
private final TetheringNotificationUpdater mNotificationUpdater;
|
||||
private final UserManager mUserManager;
|
||||
private final BpfCoordinator mBpfCoordinator;
|
||||
private final PrivateAddressCoordinator mPrivateAddressCoordinator;
|
||||
private int mActiveDataSubId = INVALID_SUBSCRIPTION_ID;
|
||||
// All the usage of mTetheringEventCallback should run in the same thread.
|
||||
@@ -284,6 +285,8 @@ public class Tethering {
|
||||
mUpstreamNetworkMonitor = mDeps.getUpstreamNetworkMonitor(mContext, mTetherMasterSM, mLog,
|
||||
TetherMasterSM.EVENT_UPSTREAM_CALLBACK);
|
||||
mForwardedDownstreams = new LinkedHashSet<>();
|
||||
mBpfCoordinator = mDeps.getBpfCoordinator(
|
||||
mHandler, mNetd, mLog, new BpfCoordinator.Dependencies());
|
||||
|
||||
IntentFilter filter = new IntentFilter();
|
||||
filter.addAction(ACTION_CARRIER_CONFIG_CHANGED);
|
||||
@@ -1704,6 +1707,9 @@ public class Tethering {
|
||||
chooseUpstreamType(true);
|
||||
mTryCell = false;
|
||||
}
|
||||
|
||||
// TODO: Check the upstream interface if it is managed by BPF offload.
|
||||
mBpfCoordinator.startPolling();
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -1716,6 +1722,7 @@ public class Tethering {
|
||||
mTetherUpstream = null;
|
||||
reportUpstreamChanged(null);
|
||||
}
|
||||
mBpfCoordinator.stopPolling();
|
||||
}
|
||||
|
||||
private boolean updateUpstreamWanted() {
|
||||
@@ -2341,7 +2348,7 @@ public class Tethering {
|
||||
|
||||
mLog.log("adding TetheringInterfaceStateMachine for: " + iface);
|
||||
final TetherState tetherState = new TetherState(
|
||||
new IpServer(iface, mLooper, interfaceType, mLog, mNetd,
|
||||
new IpServer(iface, mLooper, interfaceType, mLog, mNetd, mBpfCoordinator,
|
||||
makeControlCallback(), mConfig.enableLegacyDhcpServer,
|
||||
mConfig.enableBpfOffload, mPrivateAddressCoordinator,
|
||||
mDeps.getIpServerDependencies()));
|
||||
|
||||
@@ -40,6 +40,17 @@ import java.util.ArrayList;
|
||||
* @hide
|
||||
*/
|
||||
public abstract class TetheringDependencies {
|
||||
/**
|
||||
* Get a reference to the BpfCoordinator to be used by tethering.
|
||||
*/
|
||||
public @NonNull BpfCoordinator getBpfCoordinator(
|
||||
@NonNull Handler handler, @NonNull INetd netd, @NonNull SharedLog log,
|
||||
@NonNull BpfCoordinator.Dependencies deps) {
|
||||
final NetworkStatsManager statsManager =
|
||||
(NetworkStatsManager) getContext().getSystemService(Context.NETWORK_STATS_SERVICE);
|
||||
return new BpfCoordinator(handler, netd, statsManager, log, deps);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a reference to the offload hardware interface to be used by tethering.
|
||||
*/
|
||||
|
||||
@@ -24,8 +24,10 @@ import android.app.Notification.Action;
|
||||
import android.app.NotificationChannel;
|
||||
import android.app.NotificationManager;
|
||||
import android.app.PendingIntent;
|
||||
import android.content.ComponentName;
|
||||
import android.content.Context;
|
||||
import android.content.Intent;
|
||||
import android.content.pm.PackageManager;
|
||||
import android.content.res.Configuration;
|
||||
import android.content.res.Resources;
|
||||
import android.net.NetworkCapabilities;
|
||||
@@ -252,6 +254,14 @@ public class TetheringNotificationUpdater {
|
||||
mNotificationManager.cancel(null /* tag */, id);
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
static String getSettingsPackageName(@NonNull final PackageManager pm) {
|
||||
final Intent settingsIntent = new Intent(Settings.ACTION_SETTINGS);
|
||||
final ComponentName settingsComponent = settingsIntent.resolveActivity(pm);
|
||||
return settingsComponent != null
|
||||
? settingsComponent.getPackageName() : "com.android.settings";
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
void notifyTetheringDisabledByRestriction() {
|
||||
final Resources res = getResourcesForSubId(mContext, mActiveDataSubId);
|
||||
@@ -262,8 +272,9 @@ public class TetheringNotificationUpdater {
|
||||
final PendingIntent pi = PendingIntent.getActivity(
|
||||
mContext.createContextAsUser(UserHandle.CURRENT, 0 /* flags */),
|
||||
0 /* requestCode */,
|
||||
new Intent(Settings.ACTION_TETHER_SETTINGS),
|
||||
Intent.FLAG_ACTIVITY_NEW_TASK,
|
||||
new Intent(Settings.ACTION_TETHER_SETTINGS)
|
||||
.setPackage(getSettingsPackageName(mContext.getPackageManager())),
|
||||
Intent.FLAG_ACTIVITY_NEW_TASK | PendingIntent.FLAG_IMMUTABLE,
|
||||
null /* options */);
|
||||
|
||||
showNotification(R.drawable.stat_sys_tether_general, title, message,
|
||||
@@ -284,7 +295,7 @@ public class TetheringNotificationUpdater {
|
||||
mContext.createContextAsUser(UserHandle.CURRENT, 0 /* flags */),
|
||||
0 /* requestCode */,
|
||||
intent,
|
||||
0 /* flags */);
|
||||
PendingIntent.FLAG_IMMUTABLE);
|
||||
final Action action = new Action.Builder(NO_ICON_ID, disableButton, pi).build();
|
||||
|
||||
showNotification(R.drawable.stat_sys_tether_general, title, message,
|
||||
@@ -305,8 +316,9 @@ public class TetheringNotificationUpdater {
|
||||
final PendingIntent pi = PendingIntent.getActivity(
|
||||
mContext.createContextAsUser(UserHandle.CURRENT, 0 /* flags */),
|
||||
0 /* requestCode */,
|
||||
new Intent(Settings.ACTION_TETHER_SETTINGS),
|
||||
Intent.FLAG_ACTIVITY_NEW_TASK,
|
||||
new Intent(Settings.ACTION_TETHER_SETTINGS)
|
||||
.setPackage(getSettingsPackageName(mContext.getPackageManager())),
|
||||
Intent.FLAG_ACTIVITY_NEW_TASK | PendingIntent.FLAG_IMMUTABLE,
|
||||
null /* options */);
|
||||
|
||||
showNotification(R.drawable.stat_sys_tether_general, title, message,
|
||||
|
||||
@@ -54,12 +54,14 @@ import static org.mockito.Mockito.doThrow;
|
||||
import static org.mockito.Mockito.inOrder;
|
||||
import static org.mockito.Mockito.never;
|
||||
import static org.mockito.Mockito.reset;
|
||||
import static org.mockito.Mockito.spy;
|
||||
import static org.mockito.Mockito.timeout;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.verifyNoMoreInteractions;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import android.app.usage.NetworkStatsManager;
|
||||
import android.net.INetd;
|
||||
import android.net.InetAddresses;
|
||||
import android.net.InterfaceConfigurationParcel;
|
||||
@@ -69,6 +71,7 @@ import android.net.LinkProperties;
|
||||
import android.net.MacAddress;
|
||||
import android.net.RouteInfo;
|
||||
import android.net.TetherOffloadRuleParcel;
|
||||
import android.net.TetherStatsParcel;
|
||||
import android.net.dhcp.DhcpServingParamsParcel;
|
||||
import android.net.dhcp.IDhcpEventCallbacks;
|
||||
import android.net.dhcp.IDhcpServer;
|
||||
@@ -80,13 +83,17 @@ import android.net.util.InterfaceParams;
|
||||
import android.net.util.InterfaceSet;
|
||||
import android.net.util.PrefixUtils;
|
||||
import android.net.util.SharedLog;
|
||||
import android.os.Handler;
|
||||
import android.os.RemoteException;
|
||||
import android.os.test.TestLooper;
|
||||
import android.text.TextUtils;
|
||||
|
||||
import androidx.annotation.NonNull;
|
||||
import androidx.test.filters.SmallTest;
|
||||
import androidx.test.runner.AndroidJUnit4;
|
||||
|
||||
import com.android.networkstack.tethering.BpfCoordinator;
|
||||
import com.android.networkstack.tethering.BpfCoordinator.Ipv6ForwardingRule;
|
||||
import com.android.networkstack.tethering.PrivateAddressCoordinator;
|
||||
|
||||
import org.junit.Before;
|
||||
@@ -100,6 +107,7 @@ import org.mockito.Mock;
|
||||
import org.mockito.MockitoAnnotations;
|
||||
|
||||
import java.net.Inet4Address;
|
||||
import java.net.Inet6Address;
|
||||
import java.net.InetAddress;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
@@ -133,6 +141,7 @@ public class IpServerTest {
|
||||
@Mock private IpNeighborMonitor mIpNeighborMonitor;
|
||||
@Mock private IpServer.Dependencies mDependencies;
|
||||
@Mock private PrivateAddressCoordinator mAddressCoordinator;
|
||||
@Mock private NetworkStatsManager mStatsManager;
|
||||
|
||||
@Captor private ArgumentCaptor<DhcpServingParamsParcel> mDhcpParamsCaptor;
|
||||
|
||||
@@ -142,6 +151,7 @@ public class IpServerTest {
|
||||
private IpServer mIpServer;
|
||||
private InterfaceConfigurationParcel mInterfaceConfiguration;
|
||||
private NeighborEventConsumer mNeighborEventConsumer;
|
||||
private BpfCoordinator mBpfCoordinator;
|
||||
|
||||
private void initStateMachine(int interfaceType) throws Exception {
|
||||
initStateMachine(interfaceType, false /* usingLegacyDhcp */, DEFAULT_USING_BPF_OFFLOAD);
|
||||
@@ -179,7 +189,7 @@ public class IpServerTest {
|
||||
neighborCaptor.capture());
|
||||
|
||||
mIpServer = new IpServer(
|
||||
IFACE_NAME, mLooper.getLooper(), interfaceType, mSharedLog, mNetd,
|
||||
IFACE_NAME, mLooper.getLooper(), interfaceType, mSharedLog, mNetd, mBpfCoordinator,
|
||||
mCallback, usingLegacyDhcp, usingBpfOffload, mAddressCoordinator, mDependencies);
|
||||
mIpServer.start();
|
||||
mNeighborEventConsumer = neighborCaptor.getValue();
|
||||
@@ -215,6 +225,10 @@ public class IpServerTest {
|
||||
MockitoAnnotations.initMocks(this);
|
||||
when(mSharedLog.forSubComponent(anyString())).thenReturn(mSharedLog);
|
||||
when(mAddressCoordinator.requestDownstreamAddress(any())).thenReturn(mTestAddress);
|
||||
|
||||
BpfCoordinator bc = new BpfCoordinator(new Handler(mLooper.getLooper()), mNetd,
|
||||
mStatsManager, mSharedLog, new BpfCoordinator.Dependencies());
|
||||
mBpfCoordinator = spy(bc);
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -222,8 +236,8 @@ public class IpServerTest {
|
||||
when(mDependencies.getIpNeighborMonitor(any(), any(), any()))
|
||||
.thenReturn(mIpNeighborMonitor);
|
||||
mIpServer = new IpServer(IFACE_NAME, mLooper.getLooper(), TETHERING_BLUETOOTH, mSharedLog,
|
||||
mNetd, mCallback, false /* usingLegacyDhcp */, DEFAULT_USING_BPF_OFFLOAD,
|
||||
mAddressCoordinator, mDependencies);
|
||||
mNetd, mBpfCoordinator, mCallback, false /* usingLegacyDhcp */,
|
||||
DEFAULT_USING_BPF_OFFLOAD, mAddressCoordinator, mDependencies);
|
||||
mIpServer.start();
|
||||
mLooper.dispatchAll();
|
||||
verify(mCallback).updateInterfaceState(
|
||||
@@ -619,6 +633,10 @@ public class IpServerTest {
|
||||
* (actual: "android.net.TetherOffloadRuleParcel@8c827b0" or some such), but at least it does
|
||||
* work.
|
||||
*
|
||||
* TODO: consider making the error message more readable by adding a method that catching the
|
||||
* AssertionFailedError and throwing a new assertion with more details. See
|
||||
* NetworkMonitorTest#verifyNetworkTested.
|
||||
*
|
||||
* See ConnectivityServiceTest#assertRoutesAdded for an alternative approach which solves the
|
||||
* TooManyActualInvocations problem described above by forcing the caller of the custom assert
|
||||
* method to specify all expected invocations in one call. This is useful when the stable
|
||||
@@ -658,6 +676,27 @@ public class IpServerTest {
|
||||
return argThat(new TetherOffloadRuleParcelMatcher(upstreamIfindex, dst, dstMac));
|
||||
}
|
||||
|
||||
private static Ipv6ForwardingRule makeForwardingRule(
|
||||
int upstreamIfindex, @NonNull InetAddress dst, @NonNull MacAddress dstMac) {
|
||||
return new Ipv6ForwardingRule(upstreamIfindex, TEST_IFACE_PARAMS.index,
|
||||
(Inet6Address) dst, TEST_IFACE_PARAMS.macAddr, dstMac);
|
||||
}
|
||||
|
||||
private TetherStatsParcel buildEmptyTetherStatsParcel(int ifIndex) {
|
||||
TetherStatsParcel parcel = new TetherStatsParcel();
|
||||
parcel.ifIndex = ifIndex;
|
||||
return parcel;
|
||||
}
|
||||
|
||||
private void resetNetdAndBpfCoordinator() throws Exception {
|
||||
reset(mNetd, mBpfCoordinator);
|
||||
when(mNetd.tetherOffloadGetStats()).thenReturn(new TetherStatsParcel[0]);
|
||||
when(mNetd.tetherOffloadGetAndClearStats(UPSTREAM_IFINDEX))
|
||||
.thenReturn(buildEmptyTetherStatsParcel(UPSTREAM_IFINDEX));
|
||||
when(mNetd.tetherOffloadGetAndClearStats(UPSTREAM_IFINDEX2))
|
||||
.thenReturn(buildEmptyTetherStatsParcel(UPSTREAM_IFINDEX2));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void addRemoveipv6ForwardingRules() throws Exception {
|
||||
initTetheredStateMachine(TETHERING_WIFI, UPSTREAM_IFACE, false /* usingLegacyDhcp */,
|
||||
@@ -675,75 +714,100 @@ public class IpServerTest {
|
||||
final MacAddress macA = MacAddress.fromString("00:00:00:00:00:0a");
|
||||
final MacAddress macB = MacAddress.fromString("11:22:33:00:00:0b");
|
||||
|
||||
reset(mNetd);
|
||||
resetNetdAndBpfCoordinator();
|
||||
verifyNoMoreInteractions(mBpfCoordinator, mNetd);
|
||||
|
||||
// TODO: Perhaps verify the interaction of tetherOffloadSetInterfaceQuota and
|
||||
// tetherOffloadGetAndClearStats in netd while the rules are changed.
|
||||
|
||||
// Events on other interfaces are ignored.
|
||||
recvNewNeigh(notMyIfindex, neighA, NUD_REACHABLE, macA);
|
||||
verifyNoMoreInteractions(mNetd);
|
||||
verifyNoMoreInteractions(mBpfCoordinator, mNetd);
|
||||
|
||||
// Events on this interface are received and sent to netd.
|
||||
recvNewNeigh(myIfindex, neighA, NUD_REACHABLE, macA);
|
||||
verify(mBpfCoordinator).tetherOffloadRuleAdd(
|
||||
mIpServer, makeForwardingRule(UPSTREAM_IFINDEX, neighA, macA));
|
||||
verify(mNetd).tetherOffloadRuleAdd(matches(UPSTREAM_IFINDEX, neighA, macA));
|
||||
reset(mNetd);
|
||||
resetNetdAndBpfCoordinator();
|
||||
|
||||
recvNewNeigh(myIfindex, neighB, NUD_REACHABLE, macB);
|
||||
verify(mBpfCoordinator).tetherOffloadRuleAdd(
|
||||
mIpServer, makeForwardingRule(UPSTREAM_IFINDEX, neighB, macB));
|
||||
verify(mNetd).tetherOffloadRuleAdd(matches(UPSTREAM_IFINDEX, neighB, macB));
|
||||
reset(mNetd);
|
||||
resetNetdAndBpfCoordinator();
|
||||
|
||||
// Link-local and multicast neighbors are ignored.
|
||||
recvNewNeigh(myIfindex, neighLL, NUD_REACHABLE, macA);
|
||||
verifyNoMoreInteractions(mNetd);
|
||||
verifyNoMoreInteractions(mBpfCoordinator, mNetd);
|
||||
recvNewNeigh(myIfindex, neighMC, NUD_REACHABLE, macA);
|
||||
verifyNoMoreInteractions(mNetd);
|
||||
verifyNoMoreInteractions(mBpfCoordinator, mNetd);
|
||||
|
||||
// A neighbor that is no longer valid causes the rule to be removed.
|
||||
// NUD_FAILED events do not have a MAC address.
|
||||
recvNewNeigh(myIfindex, neighA, NUD_FAILED, null);
|
||||
verify(mBpfCoordinator).tetherOffloadRuleRemove(
|
||||
mIpServer, makeForwardingRule(UPSTREAM_IFINDEX, neighA, macNull));
|
||||
verify(mNetd).tetherOffloadRuleRemove(matches(UPSTREAM_IFINDEX, neighA, macNull));
|
||||
reset(mNetd);
|
||||
resetNetdAndBpfCoordinator();
|
||||
|
||||
// A neighbor that is deleted causes the rule to be removed.
|
||||
recvDelNeigh(myIfindex, neighB, NUD_STALE, macB);
|
||||
verify(mBpfCoordinator).tetherOffloadRuleRemove(
|
||||
mIpServer, makeForwardingRule(UPSTREAM_IFINDEX, neighB, macNull));
|
||||
verify(mNetd).tetherOffloadRuleRemove(matches(UPSTREAM_IFINDEX, neighB, macNull));
|
||||
reset(mNetd);
|
||||
resetNetdAndBpfCoordinator();
|
||||
|
||||
// Upstream changes result in deleting and re-adding the rules.
|
||||
// Upstream changes result in updating the rules.
|
||||
recvNewNeigh(myIfindex, neighA, NUD_REACHABLE, macA);
|
||||
recvNewNeigh(myIfindex, neighB, NUD_REACHABLE, macB);
|
||||
reset(mNetd);
|
||||
resetNetdAndBpfCoordinator();
|
||||
|
||||
InOrder inOrder = inOrder(mNetd);
|
||||
LinkProperties lp = new LinkProperties();
|
||||
lp.setInterfaceName(UPSTREAM_IFACE2);
|
||||
dispatchTetherConnectionChanged(UPSTREAM_IFACE2, lp, -1);
|
||||
inOrder.verify(mNetd).tetherOffloadRuleAdd(matches(UPSTREAM_IFINDEX2, neighA, macA));
|
||||
verify(mBpfCoordinator).tetherOffloadRuleUpdate(mIpServer, UPSTREAM_IFINDEX2);
|
||||
inOrder.verify(mNetd).tetherOffloadRuleRemove(matches(UPSTREAM_IFINDEX, neighA, macA));
|
||||
inOrder.verify(mNetd).tetherOffloadRuleAdd(matches(UPSTREAM_IFINDEX2, neighB, macB));
|
||||
inOrder.verify(mNetd).tetherOffloadRuleAdd(matches(UPSTREAM_IFINDEX2, neighA, macA));
|
||||
inOrder.verify(mNetd).tetherOffloadRuleRemove(matches(UPSTREAM_IFINDEX, neighB, macB));
|
||||
reset(mNetd);
|
||||
inOrder.verify(mNetd).tetherOffloadRuleAdd(matches(UPSTREAM_IFINDEX2, neighB, macB));
|
||||
resetNetdAndBpfCoordinator();
|
||||
|
||||
// When the upstream is lost, rules are removed.
|
||||
dispatchTetherConnectionChanged(null, null, 0);
|
||||
// Clear function is called two times by:
|
||||
// - processMessage CMD_TETHER_CONNECTION_CHANGED for the upstream is lost.
|
||||
// - processMessage CMD_IPV6_TETHER_UPDATE for the IPv6 upstream is lost.
|
||||
// See dispatchTetherConnectionChanged.
|
||||
verify(mBpfCoordinator, times(2)).tetherOffloadRuleClear(mIpServer);
|
||||
verify(mNetd).tetherOffloadRuleRemove(matches(UPSTREAM_IFINDEX2, neighA, macA));
|
||||
verify(mNetd).tetherOffloadRuleRemove(matches(UPSTREAM_IFINDEX2, neighB, macB));
|
||||
reset(mNetd);
|
||||
resetNetdAndBpfCoordinator();
|
||||
|
||||
// If the upstream is IPv4-only, no rules are added.
|
||||
dispatchTetherConnectionChanged(UPSTREAM_IFACE);
|
||||
reset(mNetd);
|
||||
resetNetdAndBpfCoordinator();
|
||||
recvNewNeigh(myIfindex, neighA, NUD_REACHABLE, macA);
|
||||
verifyNoMoreInteractions(mNetd);
|
||||
// Clear function is called by #updateIpv6ForwardingRules for the IPv6 upstream is lost.
|
||||
verify(mBpfCoordinator).tetherOffloadRuleClear(mIpServer);
|
||||
verifyNoMoreInteractions(mBpfCoordinator, mNetd);
|
||||
|
||||
// Rules can be added again once upstream IPv6 connectivity is available.
|
||||
lp.setInterfaceName(UPSTREAM_IFACE);
|
||||
dispatchTetherConnectionChanged(UPSTREAM_IFACE, lp, -1);
|
||||
recvNewNeigh(myIfindex, neighB, NUD_REACHABLE, macB);
|
||||
verify(mBpfCoordinator).tetherOffloadRuleAdd(
|
||||
mIpServer, makeForwardingRule(UPSTREAM_IFINDEX, neighB, macB));
|
||||
verify(mNetd).tetherOffloadRuleAdd(matches(UPSTREAM_IFINDEX, neighB, macB));
|
||||
verify(mBpfCoordinator, never()).tetherOffloadRuleAdd(
|
||||
mIpServer, makeForwardingRule(UPSTREAM_IFINDEX, neighA, macA));
|
||||
verify(mNetd, never()).tetherOffloadRuleAdd(matches(UPSTREAM_IFINDEX, neighA, macA));
|
||||
|
||||
// If upstream IPv6 connectivity is lost, rules are removed.
|
||||
reset(mNetd);
|
||||
resetNetdAndBpfCoordinator();
|
||||
dispatchTetherConnectionChanged(UPSTREAM_IFACE, null, 0);
|
||||
verify(mBpfCoordinator).tetherOffloadRuleClear(mIpServer);
|
||||
verify(mNetd).tetherOffloadRuleRemove(matches(UPSTREAM_IFINDEX, neighB, macB));
|
||||
|
||||
// When the interface goes down, rules are removed.
|
||||
@@ -751,15 +815,20 @@ public class IpServerTest {
|
||||
dispatchTetherConnectionChanged(UPSTREAM_IFACE, lp, -1);
|
||||
recvNewNeigh(myIfindex, neighA, NUD_REACHABLE, macA);
|
||||
recvNewNeigh(myIfindex, neighB, NUD_REACHABLE, macB);
|
||||
verify(mBpfCoordinator).tetherOffloadRuleAdd(
|
||||
mIpServer, makeForwardingRule(UPSTREAM_IFINDEX, neighA, macA));
|
||||
verify(mNetd).tetherOffloadRuleAdd(matches(UPSTREAM_IFINDEX, neighA, macA));
|
||||
verify(mBpfCoordinator).tetherOffloadRuleAdd(
|
||||
mIpServer, makeForwardingRule(UPSTREAM_IFINDEX, neighB, macB));
|
||||
verify(mNetd).tetherOffloadRuleAdd(matches(UPSTREAM_IFINDEX, neighB, macB));
|
||||
reset(mNetd);
|
||||
resetNetdAndBpfCoordinator();
|
||||
|
||||
mIpServer.stop();
|
||||
mLooper.dispatchAll();
|
||||
verify(mBpfCoordinator).tetherOffloadRuleClear(mIpServer);
|
||||
verify(mNetd).tetherOffloadRuleRemove(matches(UPSTREAM_IFINDEX, neighA, macA));
|
||||
verify(mNetd).tetherOffloadRuleRemove(matches(UPSTREAM_IFINDEX, neighB, macB));
|
||||
reset(mNetd);
|
||||
resetNetdAndBpfCoordinator();
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -769,35 +838,46 @@ public class IpServerTest {
|
||||
final MacAddress macA = MacAddress.fromString("00:00:00:00:00:0a");
|
||||
final MacAddress macNull = MacAddress.fromString("00:00:00:00:00:00");
|
||||
|
||||
reset(mNetd);
|
||||
|
||||
// Expect that rules can be only added/removed when the BPF offload config is enabled.
|
||||
// Note that the usingBpfOffload false case is not a realistic test case. Because IP
|
||||
// Note that the BPF offload disabled case is not a realistic test case. Because IP
|
||||
// neighbor monitor doesn't start if BPF offload is disabled, there should have no
|
||||
// neighbor event listening. This is used for testing the protection check just in case.
|
||||
// TODO: Perhaps remove this test once we don't need this check anymore.
|
||||
for (boolean usingBpfOffload : new boolean[]{true, false}) {
|
||||
initTetheredStateMachine(TETHERING_WIFI, UPSTREAM_IFACE, false /* usingLegacyDhcp */,
|
||||
usingBpfOffload);
|
||||
// TODO: Perhaps remove the BPF offload disabled case test once this check isn't needed
|
||||
// anymore.
|
||||
|
||||
// A neighbor is added.
|
||||
recvNewNeigh(myIfindex, neigh, NUD_REACHABLE, macA);
|
||||
if (usingBpfOffload) {
|
||||
verify(mNetd).tetherOffloadRuleAdd(matches(UPSTREAM_IFINDEX, neigh, macA));
|
||||
} else {
|
||||
verify(mNetd, never()).tetherOffloadRuleAdd(any());
|
||||
}
|
||||
reset(mNetd);
|
||||
// [1] Enable BPF offload.
|
||||
// A neighbor that is added or deleted causes the rule to be added or removed.
|
||||
initTetheredStateMachine(TETHERING_WIFI, UPSTREAM_IFACE, false /* usingLegacyDhcp */,
|
||||
true /* usingBpfOffload */);
|
||||
resetNetdAndBpfCoordinator();
|
||||
|
||||
// A neighbor is deleted.
|
||||
recvDelNeigh(myIfindex, neigh, NUD_STALE, macA);
|
||||
if (usingBpfOffload) {
|
||||
verify(mNetd).tetherOffloadRuleRemove(matches(UPSTREAM_IFINDEX, neigh, macNull));
|
||||
} else {
|
||||
verify(mNetd, never()).tetherOffloadRuleRemove(any());
|
||||
}
|
||||
reset(mNetd);
|
||||
}
|
||||
recvNewNeigh(myIfindex, neigh, NUD_REACHABLE, macA);
|
||||
verify(mBpfCoordinator).tetherOffloadRuleAdd(
|
||||
mIpServer, makeForwardingRule(UPSTREAM_IFINDEX, neigh, macA));
|
||||
verify(mNetd).tetherOffloadRuleAdd(matches(UPSTREAM_IFINDEX, neigh, macA));
|
||||
resetNetdAndBpfCoordinator();
|
||||
|
||||
recvDelNeigh(myIfindex, neigh, NUD_STALE, macA);
|
||||
verify(mBpfCoordinator).tetherOffloadRuleRemove(
|
||||
mIpServer, makeForwardingRule(UPSTREAM_IFINDEX, neigh, macNull));
|
||||
verify(mNetd).tetherOffloadRuleRemove(matches(UPSTREAM_IFINDEX, neigh, macNull));
|
||||
resetNetdAndBpfCoordinator();
|
||||
|
||||
// [2] Disable BPF offload.
|
||||
// A neighbor that is added or deleted doesn’t cause the rule to be added or removed.
|
||||
initTetheredStateMachine(TETHERING_WIFI, UPSTREAM_IFACE, false /* usingLegacyDhcp */,
|
||||
false /* usingBpfOffload */);
|
||||
resetNetdAndBpfCoordinator();
|
||||
|
||||
recvNewNeigh(myIfindex, neigh, NUD_REACHABLE, macA);
|
||||
verify(mBpfCoordinator, never()).tetherOffloadRuleAdd(any(), any());
|
||||
verify(mNetd, never()).tetherOffloadRuleAdd(any());
|
||||
resetNetdAndBpfCoordinator();
|
||||
|
||||
recvDelNeigh(myIfindex, neigh, NUD_STALE, macA);
|
||||
verify(mBpfCoordinator, never()).tetherOffloadRuleRemove(any(), any());
|
||||
verify(mNetd, never()).tetherOffloadRuleRemove(any());
|
||||
resetNetdAndBpfCoordinator();
|
||||
}
|
||||
|
||||
@Test
|
||||
|
||||
@@ -0,0 +1,246 @@
|
||||
/*
|
||||
* Copyright (C) 2020 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.android.networkstack.tethering;
|
||||
|
||||
import static android.net.NetworkStats.DEFAULT_NETWORK_NO;
|
||||
import static android.net.NetworkStats.METERED_NO;
|
||||
import static android.net.NetworkStats.ROAMING_NO;
|
||||
import static android.net.NetworkStats.SET_DEFAULT;
|
||||
import static android.net.NetworkStats.TAG_NONE;
|
||||
import static android.net.NetworkStats.UID_ALL;
|
||||
import static android.net.NetworkStats.UID_TETHERING;
|
||||
import static android.net.netstats.provider.NetworkStatsProvider.QUOTA_UNLIMITED;
|
||||
|
||||
import static com.android.networkstack.tethering.BpfCoordinator
|
||||
.DEFAULT_PERFORM_POLL_INTERVAL_MS;
|
||||
import static com.android.networkstack.tethering.BpfCoordinator.StatsType;
|
||||
import static com.android.networkstack.tethering.BpfCoordinator.StatsType.STATS_PER_IFACE;
|
||||
import static com.android.networkstack.tethering.BpfCoordinator.StatsType.STATS_PER_UID;
|
||||
|
||||
import static junit.framework.Assert.assertNotNull;
|
||||
|
||||
import static org.mockito.Matchers.anyString;
|
||||
import static org.mockito.Mockito.clearInvocations;
|
||||
import static org.mockito.Mockito.never;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import android.annotation.NonNull;
|
||||
import android.app.usage.NetworkStatsManager;
|
||||
import android.net.INetd;
|
||||
import android.net.NetworkStats;
|
||||
import android.net.TetherStatsParcel;
|
||||
import android.net.util.SharedLog;
|
||||
import android.os.Handler;
|
||||
import android.os.test.TestLooper;
|
||||
|
||||
import androidx.test.filters.SmallTest;
|
||||
import androidx.test.runner.AndroidJUnit4;
|
||||
|
||||
import com.android.testutils.TestableNetworkStatsProviderCbBinder;
|
||||
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.MockitoAnnotations;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
@RunWith(AndroidJUnit4.class)
|
||||
@SmallTest
|
||||
public class BpfCoordinatorTest {
|
||||
@Mock private NetworkStatsManager mStatsManager;
|
||||
@Mock private INetd mNetd;
|
||||
// Late init since methods must be called by the thread that created this object.
|
||||
private TestableNetworkStatsProviderCbBinder mTetherStatsProviderCb;
|
||||
private BpfCoordinator.BpfTetherStatsProvider mTetherStatsProvider;
|
||||
private final ArgumentCaptor<ArrayList> mStringArrayCaptor =
|
||||
ArgumentCaptor.forClass(ArrayList.class);
|
||||
private final TestLooper mTestLooper = new TestLooper();
|
||||
private BpfCoordinator.Dependencies mDeps =
|
||||
new BpfCoordinator.Dependencies() {
|
||||
@Override
|
||||
int getPerformPollInterval() {
|
||||
return DEFAULT_PERFORM_POLL_INTERVAL_MS;
|
||||
}
|
||||
};
|
||||
|
||||
@Before public void setUp() {
|
||||
MockitoAnnotations.initMocks(this);
|
||||
}
|
||||
|
||||
private void waitForIdle() {
|
||||
mTestLooper.dispatchAll();
|
||||
}
|
||||
|
||||
private void setupFunctioningNetdInterface() throws Exception {
|
||||
when(mNetd.tetherOffloadGetStats()).thenReturn(new TetherStatsParcel[0]);
|
||||
}
|
||||
|
||||
@NonNull
|
||||
private BpfCoordinator makeBpfCoordinator() throws Exception {
|
||||
BpfCoordinator coordinator = new BpfCoordinator(
|
||||
new Handler(mTestLooper.getLooper()), mNetd, mStatsManager, new SharedLog("test"),
|
||||
mDeps);
|
||||
final ArgumentCaptor<BpfCoordinator.BpfTetherStatsProvider>
|
||||
tetherStatsProviderCaptor =
|
||||
ArgumentCaptor.forClass(BpfCoordinator.BpfTetherStatsProvider.class);
|
||||
verify(mStatsManager).registerNetworkStatsProvider(anyString(),
|
||||
tetherStatsProviderCaptor.capture());
|
||||
mTetherStatsProvider = tetherStatsProviderCaptor.getValue();
|
||||
assertNotNull(mTetherStatsProvider);
|
||||
mTetherStatsProviderCb = new TestableNetworkStatsProviderCbBinder();
|
||||
mTetherStatsProvider.setProviderCallbackBinder(mTetherStatsProviderCb);
|
||||
return coordinator;
|
||||
}
|
||||
|
||||
@NonNull
|
||||
private static NetworkStats.Entry buildTestEntry(@NonNull StatsType how,
|
||||
@NonNull String iface, long rxBytes, long rxPackets, long txBytes, long txPackets) {
|
||||
return new NetworkStats.Entry(iface, how == STATS_PER_IFACE ? UID_ALL : UID_TETHERING,
|
||||
SET_DEFAULT, TAG_NONE, METERED_NO, ROAMING_NO, DEFAULT_NETWORK_NO, rxBytes,
|
||||
rxPackets, txBytes, txPackets, 0L);
|
||||
}
|
||||
|
||||
@NonNull
|
||||
private static TetherStatsParcel buildTestTetherStatsParcel(@NonNull Integer ifIndex,
|
||||
long rxBytes, long rxPackets, long txBytes, long txPackets) {
|
||||
final TetherStatsParcel parcel = new TetherStatsParcel();
|
||||
parcel.ifIndex = ifIndex;
|
||||
parcel.rxBytes = rxBytes;
|
||||
parcel.rxPackets = rxPackets;
|
||||
parcel.txBytes = txBytes;
|
||||
parcel.txPackets = txPackets;
|
||||
return parcel;
|
||||
}
|
||||
|
||||
private void setTetherOffloadStatsList(TetherStatsParcel[] tetherStatsList) throws Exception {
|
||||
when(mNetd.tetherOffloadGetStats()).thenReturn(tetherStatsList);
|
||||
mTestLooper.moveTimeForward(DEFAULT_PERFORM_POLL_INTERVAL_MS);
|
||||
waitForIdle();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetForwardedStats() throws Exception {
|
||||
setupFunctioningNetdInterface();
|
||||
|
||||
final BpfCoordinator coordinator = makeBpfCoordinator();
|
||||
coordinator.startPolling();
|
||||
|
||||
final String wlanIface = "wlan0";
|
||||
final Integer wlanIfIndex = 100;
|
||||
final String mobileIface = "rmnet_data0";
|
||||
final Integer mobileIfIndex = 101;
|
||||
|
||||
// Add interface name to lookup table. In realistic case, the upstream interface name will
|
||||
// be added by IpServer when IpServer has received with a new IPv6 upstream update event.
|
||||
coordinator.addUpstreamNameToLookupTable(wlanIfIndex, wlanIface);
|
||||
coordinator.addUpstreamNameToLookupTable(mobileIfIndex, mobileIface);
|
||||
|
||||
// [1] Both interface stats are changed.
|
||||
// Setup the tether stats of wlan and mobile interface. Note that move forward the time of
|
||||
// the looper to make sure the new tether stats has been updated by polling update thread.
|
||||
setTetherOffloadStatsList(new TetherStatsParcel[] {
|
||||
buildTestTetherStatsParcel(wlanIfIndex, 1000, 100, 2000, 200),
|
||||
buildTestTetherStatsParcel(mobileIfIndex, 3000, 300, 4000, 400)});
|
||||
|
||||
final NetworkStats expectedIfaceStats = new NetworkStats(0L, 2)
|
||||
.addEntry(buildTestEntry(STATS_PER_IFACE, wlanIface, 1000, 100, 2000, 200))
|
||||
.addEntry(buildTestEntry(STATS_PER_IFACE, mobileIface, 3000, 300, 4000, 400));
|
||||
|
||||
final NetworkStats expectedUidStats = new NetworkStats(0L, 2)
|
||||
.addEntry(buildTestEntry(STATS_PER_UID, wlanIface, 1000, 100, 2000, 200))
|
||||
.addEntry(buildTestEntry(STATS_PER_UID, mobileIface, 3000, 300, 4000, 400));
|
||||
|
||||
// Force pushing stats update to verify the stats reported.
|
||||
// TODO: Perhaps make #expectNotifyStatsUpdated to use test TetherStatsParcel object for
|
||||
// verifying the notification.
|
||||
mTetherStatsProvider.pushTetherStats();
|
||||
mTetherStatsProviderCb.expectNotifyStatsUpdated(expectedIfaceStats, expectedUidStats);
|
||||
|
||||
// [2] Only one interface stats is changed.
|
||||
// The tether stats of mobile interface is accumulated and The tether stats of wlan
|
||||
// interface is the same.
|
||||
setTetherOffloadStatsList(new TetherStatsParcel[] {
|
||||
buildTestTetherStatsParcel(wlanIfIndex, 1000, 100, 2000, 200),
|
||||
buildTestTetherStatsParcel(mobileIfIndex, 3010, 320, 4030, 440)});
|
||||
|
||||
final NetworkStats expectedIfaceStatsDiff = new NetworkStats(0L, 2)
|
||||
.addEntry(buildTestEntry(STATS_PER_IFACE, wlanIface, 0, 0, 0, 0))
|
||||
.addEntry(buildTestEntry(STATS_PER_IFACE, mobileIface, 10, 20, 30, 40));
|
||||
|
||||
final NetworkStats expectedUidStatsDiff = new NetworkStats(0L, 2)
|
||||
.addEntry(buildTestEntry(STATS_PER_UID, wlanIface, 0, 0, 0, 0))
|
||||
.addEntry(buildTestEntry(STATS_PER_UID, mobileIface, 10, 20, 30, 40));
|
||||
|
||||
// Force pushing stats update to verify that only diff of stats is reported.
|
||||
mTetherStatsProvider.pushTetherStats();
|
||||
mTetherStatsProviderCb.expectNotifyStatsUpdated(expectedIfaceStatsDiff,
|
||||
expectedUidStatsDiff);
|
||||
|
||||
// [3] Stop coordinator.
|
||||
// Shutdown the coordinator and clear the invocation history, especially the
|
||||
// tetherOffloadGetStats() calls.
|
||||
coordinator.stopPolling();
|
||||
clearInvocations(mNetd);
|
||||
|
||||
// Verify the polling update thread stopped.
|
||||
mTestLooper.moveTimeForward(DEFAULT_PERFORM_POLL_INTERVAL_MS);
|
||||
waitForIdle();
|
||||
verify(mNetd, never()).tetherOffloadGetStats();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testOnSetAlert() throws Exception {
|
||||
setupFunctioningNetdInterface();
|
||||
|
||||
final BpfCoordinator coordinator = makeBpfCoordinator();
|
||||
coordinator.startPolling();
|
||||
|
||||
final String mobileIface = "rmnet_data0";
|
||||
final Integer mobileIfIndex = 100;
|
||||
coordinator.addUpstreamNameToLookupTable(mobileIfIndex, mobileIface);
|
||||
|
||||
// Verify that set quota to 0 will immediately triggers a callback.
|
||||
mTetherStatsProvider.onSetAlert(0);
|
||||
waitForIdle();
|
||||
mTetherStatsProviderCb.expectNotifyAlertReached();
|
||||
|
||||
// Verify that notifyAlertReached never fired if quota is not yet reached.
|
||||
when(mNetd.tetherOffloadGetStats()).thenReturn(
|
||||
new TetherStatsParcel[] {buildTestTetherStatsParcel(mobileIfIndex, 0, 0, 0, 0)});
|
||||
mTetherStatsProvider.onSetAlert(100);
|
||||
mTestLooper.moveTimeForward(DEFAULT_PERFORM_POLL_INTERVAL_MS);
|
||||
waitForIdle();
|
||||
mTetherStatsProviderCb.assertNoCallback();
|
||||
|
||||
// Verify that notifyAlertReached fired when quota is reached.
|
||||
when(mNetd.tetherOffloadGetStats()).thenReturn(
|
||||
new TetherStatsParcel[] {buildTestTetherStatsParcel(mobileIfIndex, 50, 0, 50, 0)});
|
||||
mTestLooper.moveTimeForward(DEFAULT_PERFORM_POLL_INTERVAL_MS);
|
||||
waitForIdle();
|
||||
mTetherStatsProviderCb.expectNotifyAlertReached();
|
||||
|
||||
// Verify that set quota with UNLIMITED won't trigger any callback.
|
||||
mTetherStatsProvider.onSetAlert(QUOTA_UNLIMITED);
|
||||
mTestLooper.moveTimeForward(DEFAULT_PERFORM_POLL_INTERVAL_MS);
|
||||
waitForIdle();
|
||||
mTetherStatsProviderCb.assertNoCallback();
|
||||
}
|
||||
}
|
||||
@@ -19,6 +19,10 @@ package com.android.networkstack.tethering
|
||||
import android.app.Notification
|
||||
import android.app.NotificationManager
|
||||
import android.content.Context
|
||||
import android.content.pm.ActivityInfo
|
||||
import android.content.pm.ApplicationInfo
|
||||
import android.content.pm.PackageManager
|
||||
import android.content.pm.ResolveInfo
|
||||
import android.content.res.Resources
|
||||
import android.net.ConnectivityManager.TETHERING_WIFI
|
||||
import android.os.Handler
|
||||
@@ -51,6 +55,7 @@ import org.mockito.ArgumentMatchers.anyInt
|
||||
import org.mockito.ArgumentMatchers.eq
|
||||
import org.mockito.Mock
|
||||
import org.mockito.Mockito.doReturn
|
||||
import org.mockito.Mockito.mock
|
||||
import org.mockito.Mockito.never
|
||||
import org.mockito.Mockito.reset
|
||||
import org.mockito.Mockito.times
|
||||
@@ -351,4 +356,26 @@ class TetheringNotificationUpdaterTest {
|
||||
notificationUpdater.onUpstreamCapabilitiesChanged(ROAMING_CAPABILITIES)
|
||||
verifyNotificationCancelled(listOf(NO_UPSTREAM_NOTIFICATION_ID, ROAMING_NOTIFICATION_ID))
|
||||
}
|
||||
|
||||
@Test
|
||||
fun testGetSettingsPackageName() {
|
||||
val defaultSettingsPackageName = "com.android.settings"
|
||||
val testSettingsPackageName = "com.android.test.settings"
|
||||
val pm = mock(PackageManager::class.java)
|
||||
doReturn(null).`when`(pm).resolveActivity(any(), anyInt())
|
||||
assertEquals(defaultSettingsPackageName,
|
||||
TetheringNotificationUpdater.getSettingsPackageName(pm))
|
||||
|
||||
val resolveInfo = ResolveInfo().apply {
|
||||
activityInfo = ActivityInfo().apply {
|
||||
name = "test"
|
||||
applicationInfo = ApplicationInfo().apply {
|
||||
packageName = testSettingsPackageName
|
||||
}
|
||||
}
|
||||
}
|
||||
doReturn(resolveInfo).`when`(pm).resolveActivity(any(), anyInt())
|
||||
assertEquals(testSettingsPackageName,
|
||||
TetheringNotificationUpdater.getSettingsPackageName(pm))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -203,6 +203,7 @@ public class TetheringTest {
|
||||
@Mock private ConnectivityManager mCm;
|
||||
@Mock private EthernetManager mEm;
|
||||
@Mock private TetheringNotificationUpdater mNotificationUpdater;
|
||||
@Mock private BpfCoordinator mBpfCoordinator;
|
||||
|
||||
private final MockIpServerDependencies mIpServerDependencies =
|
||||
spy(new MockIpServerDependencies());
|
||||
@@ -336,6 +337,12 @@ public class TetheringTest {
|
||||
mIpv6CoordinatorNotifyList = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BpfCoordinator getBpfCoordinator(Handler handler, INetd netd,
|
||||
SharedLog log, BpfCoordinator.Dependencies deps) {
|
||||
return mBpfCoordinator;
|
||||
}
|
||||
|
||||
@Override
|
||||
public OffloadHardwareInterface getOffloadHardwareInterface(Handler h, SharedLog log) {
|
||||
return mOffloadHardwareInterface;
|
||||
|
||||
Reference in New Issue
Block a user