Merge "Clear IPv4 offload rules when tethering stopped"
This commit is contained in:
@@ -33,6 +33,8 @@ import com.android.networkstack.tethering.Tether4Key;
|
||||
import com.android.networkstack.tethering.Tether4Value;
|
||||
import com.android.networkstack.tethering.TetherStatsValue;
|
||||
|
||||
import java.util.function.BiConsumer;
|
||||
|
||||
/**
|
||||
* Bpf coordinator class for API shims.
|
||||
*/
|
||||
@@ -160,6 +162,12 @@ public class BpfCoordinatorShimImpl
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void tetherOffloadRuleForEach(boolean downstream,
|
||||
@NonNull BiConsumer<Tether4Key, Tether4Value> action) {
|
||||
/* no op */
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean attachProgram(String iface, boolean downstream) {
|
||||
/* no op */
|
||||
|
||||
@@ -47,6 +47,7 @@ import com.android.networkstack.tethering.TetherUpstream6Key;
|
||||
|
||||
import java.io.FileDescriptor;
|
||||
import java.io.IOException;
|
||||
import java.util.function.BiConsumer;
|
||||
|
||||
/**
|
||||
* Bpf coordinator class for API shims.
|
||||
@@ -380,10 +381,7 @@ public class BpfCoordinatorShimImpl
|
||||
|
||||
try {
|
||||
if (downstream) {
|
||||
if (!mBpfDownstream4Map.deleteEntry(key)) {
|
||||
mLog.e("Could not delete entry (key: " + key + ")");
|
||||
return false;
|
||||
}
|
||||
if (!mBpfDownstream4Map.deleteEntry(key)) return false; // Rule did not exist
|
||||
|
||||
// Decrease the rule count while a deleting rule is not using a given upstream
|
||||
// interface anymore.
|
||||
@@ -401,18 +399,31 @@ public class BpfCoordinatorShimImpl
|
||||
mRule4CountOnUpstream.put(upstreamIfindex, count);
|
||||
}
|
||||
} else {
|
||||
mBpfUpstream4Map.deleteEntry(key);
|
||||
if (!mBpfUpstream4Map.deleteEntry(key)) return false; // Rule did not exist
|
||||
}
|
||||
} catch (ErrnoException e) {
|
||||
// Silent if the rule did not exist.
|
||||
if (e.errno != OsConstants.ENOENT) {
|
||||
mLog.e("Could not delete entry: ", e);
|
||||
return false;
|
||||
}
|
||||
mLog.e("Could not delete entry (key: " + key + ")", e);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void tetherOffloadRuleForEach(boolean downstream,
|
||||
@NonNull BiConsumer<Tether4Key, Tether4Value> action) {
|
||||
if (!isInitialized()) return;
|
||||
|
||||
try {
|
||||
if (downstream) {
|
||||
mBpfDownstream4Map.forEach(action);
|
||||
} else {
|
||||
mBpfUpstream4Map.forEach(action);
|
||||
}
|
||||
} catch (ErrnoException e) {
|
||||
mLog.e("Could not iterate map: ", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean attachProgram(String iface, boolean downstream) {
|
||||
if (!isInitialized()) return false;
|
||||
|
||||
@@ -28,6 +28,8 @@ import com.android.networkstack.tethering.Tether4Key;
|
||||
import com.android.networkstack.tethering.Tether4Value;
|
||||
import com.android.networkstack.tethering.TetherStatsValue;
|
||||
|
||||
import java.util.function.BiConsumer;
|
||||
|
||||
/**
|
||||
* Bpf coordinator class for API shims.
|
||||
*/
|
||||
@@ -145,9 +147,24 @@ public abstract class BpfCoordinatorShim {
|
||||
|
||||
/**
|
||||
* Deletes a tethering IPv4 offload rule from the appropriate BPF map.
|
||||
*
|
||||
* @param downstream true if downstream, false if upstream.
|
||||
* @param key the key to delete.
|
||||
* @return true iff the map was modified, false if the key did not exist or there was an error.
|
||||
*/
|
||||
public abstract boolean tetherOffloadRuleRemove(boolean downstream, @NonNull Tether4Key key);
|
||||
|
||||
/**
|
||||
* Iterate through the map and handle each key -> value retrieved base on the given BiConsumer.
|
||||
*
|
||||
* @param downstream true if downstream, false if upstream.
|
||||
* @param action represents the action for each key -> value. The entry deletion is not
|
||||
* allowed and use #tetherOffloadRuleRemove instead.
|
||||
*/
|
||||
@Nullable
|
||||
public abstract void tetherOffloadRuleForEach(boolean downstream,
|
||||
@NonNull BiConsumer<Tether4Key, Tether4Value> action);
|
||||
|
||||
/**
|
||||
* Whether there is currently any IPv4 rule on the specified upstream.
|
||||
*/
|
||||
|
||||
@@ -596,6 +596,7 @@ public class IpServer extends StateMachine {
|
||||
// into calls to InterfaceController, shared with startIPv4().
|
||||
mInterfaceCtrl.clearIPv4Address();
|
||||
mPrivateAddressCoordinator.releaseDownstream(this);
|
||||
mBpfCoordinator.tetherOffloadClientClear(this);
|
||||
mIpv4Address = null;
|
||||
mStaticIpv4ServerAddr = null;
|
||||
mStaticIpv4ClientAddr = null;
|
||||
@@ -949,7 +950,6 @@ public class IpServer extends StateMachine {
|
||||
if (e.isValid()) {
|
||||
mBpfCoordinator.tetherOffloadClientAdd(this, clientInfo);
|
||||
} else {
|
||||
// TODO: Delete all related offload rules which are using this client.
|
||||
mBpfCoordinator.tetherOffloadClientRemove(this, clientInfo);
|
||||
}
|
||||
}
|
||||
@@ -1283,6 +1283,16 @@ public class IpServer extends StateMachine {
|
||||
super.exit();
|
||||
}
|
||||
|
||||
// Note that IPv4 offload rules cleanup is implemented in BpfCoordinator while upstream
|
||||
// state is null or changed because IPv4 and IPv6 tethering have different code flow
|
||||
// and behaviour. While upstream is switching from offload supported interface to
|
||||
// offload non-supportted interface, event CMD_TETHER_CONNECTION_CHANGED calls
|
||||
// #cleanupUpstreamInterface but #cleanupUpstream because new UpstreamIfaceSet is not null.
|
||||
// This case won't happen in IPv6 tethering because IPv6 tethering upstream state is
|
||||
// reported by IPv6TetheringCoordinator. #cleanupUpstream is also called by unwirding
|
||||
// adding NAT failure. In that case, the IPv4 offload rules are removed by #stopIPv4
|
||||
// in the state machine. Once there is any case out whish is not covered by previous cases,
|
||||
// probably consider clearing rules in #cleanupUpstream as well.
|
||||
private void cleanupUpstream() {
|
||||
if (mUpstreamIfaceSet == null) return;
|
||||
|
||||
|
||||
@@ -34,7 +34,6 @@ import static com.android.networkstack.tethering.TetheringConfiguration.DEFAULT_
|
||||
|
||||
import android.app.usage.NetworkStatsManager;
|
||||
import android.net.INetd;
|
||||
import android.net.LinkProperties;
|
||||
import android.net.MacAddress;
|
||||
import android.net.NetworkStats;
|
||||
import android.net.NetworkStats.Entry;
|
||||
@@ -51,6 +50,7 @@ import android.os.Handler;
|
||||
import android.os.SystemClock;
|
||||
import android.system.ErrnoException;
|
||||
import android.text.TextUtils;
|
||||
import android.util.ArraySet;
|
||||
import android.util.Log;
|
||||
import android.util.SparseArray;
|
||||
|
||||
@@ -69,6 +69,7 @@ import java.net.Inet6Address;
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
@@ -228,6 +229,10 @@ public class BpfCoordinator {
|
||||
// BpfCoordinatorTest needs predictable iteration order.
|
||||
private final Set<Integer> mDeviceMapSet = new LinkedHashSet<>();
|
||||
|
||||
// Tracks the last IPv4 upstream index. Support single upstream only.
|
||||
// TODO: Support multi-upstream interfaces.
|
||||
private int mLastIPv4UpstreamIfindex = 0;
|
||||
|
||||
// Runnable that used by scheduling next polling of stats.
|
||||
private final Runnable mScheduledPollingTask = () -> {
|
||||
updateForwardedStats();
|
||||
@@ -576,6 +581,7 @@ public class BpfCoordinator {
|
||||
/**
|
||||
* Clear all forwarding rules for a given downstream.
|
||||
* Note that this can be only called on handler thread.
|
||||
* TODO: rename to tetherOffloadRuleClear6 because of IPv6 only.
|
||||
*/
|
||||
public void tetherOffloadRuleClear(@NonNull final IpServer ipServer) {
|
||||
if (!isUsingBpf()) return;
|
||||
@@ -647,6 +653,7 @@ public class BpfCoordinator {
|
||||
|
||||
/**
|
||||
* Add downstream client.
|
||||
* Note that this can be only called on handler thread.
|
||||
*/
|
||||
public void tetherOffloadClientAdd(@NonNull final IpServer ipServer,
|
||||
@NonNull final ClientInfo client) {
|
||||
@@ -661,54 +668,180 @@ public class BpfCoordinator {
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove downstream client.
|
||||
* Remove a downstream client and its rules if any.
|
||||
* Note that this can be only called on handler thread.
|
||||
*/
|
||||
public void tetherOffloadClientRemove(@NonNull final IpServer ipServer,
|
||||
@NonNull final ClientInfo client) {
|
||||
if (!isUsingBpf()) return;
|
||||
|
||||
// No clients on the downstream, return early.
|
||||
HashMap<Inet4Address, ClientInfo> clients = mTetherClients.get(ipServer);
|
||||
if (clients == null) return;
|
||||
|
||||
// If no rule is removed, return early. Avoid unnecessary work on a non-existent rule
|
||||
// which may have never been added or removed already.
|
||||
// No client is removed, return early.
|
||||
if (clients.remove(client.clientAddress) == null) return;
|
||||
|
||||
// Remove the downstream entry if it has no more rule.
|
||||
// Remove the client's rules. Removing the client implies that its rules are not used
|
||||
// anymore.
|
||||
tetherOffloadRuleClear(client);
|
||||
|
||||
// Remove the downstream entry if it has no more client.
|
||||
if (clients.isEmpty()) {
|
||||
mTetherClients.remove(ipServer);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Call when UpstreamNetworkState may be changed.
|
||||
* If upstream has ipv4 for tethering, update this new UpstreamNetworkState to map. The
|
||||
* upstream interface index and its address mapping is prepared for building IPv4
|
||||
* offload rule.
|
||||
*
|
||||
* TODO: Delete the unused upstream interface mapping.
|
||||
* TODO: Support ether ip upstream interface.
|
||||
* Clear all downstream clients and their rules if any.
|
||||
* Note that this can be only called on handler thread.
|
||||
*/
|
||||
public void addUpstreamIfindexToMap(LinkProperties lp) {
|
||||
if (!mPollingStarted) return;
|
||||
public void tetherOffloadClientClear(@NonNull final IpServer ipServer) {
|
||||
if (!isUsingBpf()) return;
|
||||
|
||||
final HashMap<Inet4Address, ClientInfo> clients = mTetherClients.get(ipServer);
|
||||
if (clients == null) return;
|
||||
|
||||
// Need to build a client list because the client map may be changed in the iteration.
|
||||
for (final ClientInfo c : new ArrayList<ClientInfo>(clients.values())) {
|
||||
tetherOffloadClientRemove(ipServer, c);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all forwarding IPv4 rules for a given client.
|
||||
* Note that this can be only called on handler thread.
|
||||
*/
|
||||
private void tetherOffloadRuleClear(@NonNull final ClientInfo clientInfo) {
|
||||
// TODO: consider removing the rules in #tetherOffloadRuleForEach once BpfMap#forEach
|
||||
// can guarantee that deleting some pass-in rules in the BPF map iteration can still
|
||||
// walk through every entry.
|
||||
final Inet4Address clientAddr = clientInfo.clientAddress;
|
||||
final Set<Integer> upstreamIndiceSet = new ArraySet<Integer>();
|
||||
final Set<Tether4Key> deleteUpstreamRuleKeys = new ArraySet<Tether4Key>();
|
||||
final Set<Tether4Key> deleteDownstreamRuleKeys = new ArraySet<Tether4Key>();
|
||||
|
||||
// Find the rules which are related with the given client.
|
||||
mBpfCoordinatorShim.tetherOffloadRuleForEach(UPSTREAM, (k, v) -> {
|
||||
if (Arrays.equals(k.src4, clientAddr.getAddress())) {
|
||||
deleteUpstreamRuleKeys.add(k);
|
||||
}
|
||||
});
|
||||
mBpfCoordinatorShim.tetherOffloadRuleForEach(DOWNSTREAM, (k, v) -> {
|
||||
if (Arrays.equals(v.dst46, toIpv4MappedAddressBytes(clientAddr))) {
|
||||
deleteDownstreamRuleKeys.add(k);
|
||||
upstreamIndiceSet.add((int) k.iif);
|
||||
}
|
||||
});
|
||||
|
||||
// The rules should be paired on upstream and downstream map because they are added by
|
||||
// conntrack events which have bidirectional information.
|
||||
// TODO: Consider figuring out a way to fix. Probably delete all rules to fallback.
|
||||
if (deleteUpstreamRuleKeys.size() != deleteDownstreamRuleKeys.size()) {
|
||||
Log.wtf(TAG, "The deleting rule numbers are different on upstream4 and downstream4 ("
|
||||
+ "upstream: " + deleteUpstreamRuleKeys.size() + ", "
|
||||
+ "downstream: " + deleteDownstreamRuleKeys.size() + ").");
|
||||
return;
|
||||
}
|
||||
|
||||
// Delete the rules which are related with the given client.
|
||||
for (final Tether4Key k : deleteUpstreamRuleKeys) {
|
||||
mBpfCoordinatorShim.tetherOffloadRuleRemove(UPSTREAM, k);
|
||||
}
|
||||
for (final Tether4Key k : deleteDownstreamRuleKeys) {
|
||||
mBpfCoordinatorShim.tetherOffloadRuleRemove(DOWNSTREAM, k);
|
||||
}
|
||||
|
||||
// Cleanup each upstream interface by a set which avoids duplicated work on the same
|
||||
// upstream interface. Cleaning up the same interface twice (or more) here may raise
|
||||
// an exception because all related information were removed in the first deletion.
|
||||
for (final int upstreamIndex : upstreamIndiceSet) {
|
||||
maybeClearLimit(upstreamIndex);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all forwarding IPv4 rules for a given downstream. Needed because the client may still
|
||||
* connect on the downstream but the existing rules are not required anymore. Ex: upstream
|
||||
* changed.
|
||||
*/
|
||||
private void tetherOffloadRule4Clear(@NonNull final IpServer ipServer) {
|
||||
if (!isUsingBpf()) return;
|
||||
|
||||
final HashMap<Inet4Address, ClientInfo> clients = mTetherClients.get(ipServer);
|
||||
if (clients == null) return;
|
||||
|
||||
// The value should be unique as its key because currently the key was using from its
|
||||
// client address of ClientInfo. See #tetherOffloadClientAdd.
|
||||
for (final ClientInfo client : clients.values()) {
|
||||
tetherOffloadRuleClear(client);
|
||||
}
|
||||
}
|
||||
|
||||
private boolean isValidUpstreamIpv4Address(@NonNull final InetAddress addr) {
|
||||
if (!(addr instanceof Inet4Address)) return false;
|
||||
Inet4Address v4 = (Inet4Address) addr;
|
||||
if (v4.isAnyLocalAddress() || v4.isLinkLocalAddress()
|
||||
|| v4.isLoopbackAddress() || v4.isMulticastAddress()) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Call when UpstreamNetworkState may be changed.
|
||||
* If upstream has ipv4 for tethering, update this new UpstreamNetworkState
|
||||
* to BpfCoordinator for building upstream interface index mapping. Otherwise,
|
||||
* clear the all existing rules if any.
|
||||
*
|
||||
* Note that this can be only called on handler thread.
|
||||
*/
|
||||
public void updateUpstreamNetworkState(UpstreamNetworkState ns) {
|
||||
if (!isUsingBpf()) return;
|
||||
|
||||
int upstreamIndex = 0;
|
||||
|
||||
// This will not work on a network that is using 464xlat because hasIpv4Address will not be
|
||||
// true.
|
||||
// TODO: need to consider 464xlat.
|
||||
if (lp == null || !lp.hasIpv4Address()) return;
|
||||
if (ns != null && ns.linkProperties != null && ns.linkProperties.hasIpv4Address()) {
|
||||
// TODO: support ether ip upstream interface.
|
||||
final InterfaceParams params = mDeps.getInterfaceParams(
|
||||
ns.linkProperties.getInterfaceName());
|
||||
if (params != null && !params.hasMacAddress /* raw ip upstream only */) {
|
||||
upstreamIndex = params.index;
|
||||
}
|
||||
}
|
||||
if (mLastIPv4UpstreamIfindex == upstreamIndex) return;
|
||||
|
||||
// Support raw ip upstream interface only.
|
||||
final InterfaceParams params = mDeps.getInterfaceParams(lp.getInterfaceName());
|
||||
if (params == null || params.hasMacAddress) return;
|
||||
// Clear existing rules if upstream interface is changed. The existing rules should be
|
||||
// cleared before upstream index mapping is cleared. It can avoid that ipServer or
|
||||
// conntrack event may use the non-existing upstream interfeace index to build a removing
|
||||
// key while removeing the rules. Can't notify each IpServer to clear the rules as
|
||||
// IPv6TetheringCoordinator#updateUpstreamNetworkState because the IpServer may not
|
||||
// handle the upstream changing notification before changing upstream index mapping.
|
||||
if (mLastIPv4UpstreamIfindex != 0) {
|
||||
// Clear all forwarding IPv4 rules for all downstreams.
|
||||
for (final IpServer ipserver : mTetherClients.keySet()) {
|
||||
tetherOffloadRule4Clear(ipserver);
|
||||
}
|
||||
}
|
||||
|
||||
Collection<InetAddress> addresses = lp.getAddresses();
|
||||
for (InetAddress addr: addresses) {
|
||||
if (addr instanceof Inet4Address) {
|
||||
Inet4Address i4addr = (Inet4Address) addr;
|
||||
if (!i4addr.isAnyLocalAddress() && !i4addr.isLinkLocalAddress()
|
||||
&& !i4addr.isLoopbackAddress() && !i4addr.isMulticastAddress()) {
|
||||
mIpv4UpstreamIndices.put(i4addr, params.index);
|
||||
}
|
||||
// Don't update mLastIPv4UpstreamIfindex before clearing existing rules if any. Need that
|
||||
// to tell if it is required to clean the out-of-date rules.
|
||||
mLastIPv4UpstreamIfindex = upstreamIndex;
|
||||
|
||||
// If link properties are valid, build the upstream information mapping. Otherwise, clear
|
||||
// the upstream interface index mapping, to ensure that any conntrack events that arrive
|
||||
// after the upstream is lost do not incorrectly add rules pointing at the upstream.
|
||||
if (upstreamIndex == 0) {
|
||||
mIpv4UpstreamIndices.clear();
|
||||
return;
|
||||
}
|
||||
Collection<InetAddress> addresses = ns.linkProperties.getAddresses();
|
||||
for (final InetAddress addr: addresses) {
|
||||
if (isValidUpstreamIpv4Address(addr)) {
|
||||
mIpv4UpstreamIndices.put((Inet4Address) addr, upstreamIndex);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -793,6 +926,24 @@ public class BpfCoordinator {
|
||||
dumpDevmap(pw);
|
||||
pw.decreaseIndent();
|
||||
|
||||
pw.println("Client Information:");
|
||||
pw.increaseIndent();
|
||||
if (mTetherClients.isEmpty()) {
|
||||
pw.println("<empty>");
|
||||
} else {
|
||||
pw.println(mTetherClients.toString());
|
||||
}
|
||||
pw.decreaseIndent();
|
||||
|
||||
pw.println("IPv4 Upstream Indices:");
|
||||
pw.increaseIndent();
|
||||
if (mIpv4UpstreamIndices.isEmpty()) {
|
||||
pw.println("<empty>");
|
||||
} else {
|
||||
pw.println(mIpv4UpstreamIndices.toString());
|
||||
}
|
||||
pw.decreaseIndent();
|
||||
|
||||
pw.println();
|
||||
pw.println("Forwarding counters:");
|
||||
pw.increaseIndent();
|
||||
@@ -971,14 +1122,14 @@ public class BpfCoordinator {
|
||||
return;
|
||||
}
|
||||
if (map.isEmpty()) {
|
||||
pw.println("No interface index");
|
||||
pw.println("<empty>");
|
||||
return;
|
||||
}
|
||||
pw.println("ifindex (iface) -> ifindex (iface)");
|
||||
pw.increaseIndent();
|
||||
map.forEach((k, v) -> {
|
||||
// Only get upstream interface name. Just do the best to make the index readable.
|
||||
// TODO: get downstream interface name because the index is either upstrema or
|
||||
// TODO: get downstream interface name because the index is either upstream or
|
||||
// downstream interface in dev map.
|
||||
pw.println(String.format("%d (%s) -> %d (%s)", k.ifIndex, getIfName(k.ifIndex),
|
||||
v.ifIndex, getIfName(v.ifIndex)));
|
||||
@@ -1248,6 +1399,19 @@ public class BpfCoordinator {
|
||||
return null;
|
||||
}
|
||||
|
||||
@NonNull
|
||||
private byte[] toIpv4MappedAddressBytes(Inet4Address ia4) {
|
||||
final byte[] addr4 = ia4.getAddress();
|
||||
final byte[] addr6 = new byte[16];
|
||||
addr6[10] = (byte) 0xff;
|
||||
addr6[11] = (byte) 0xff;
|
||||
addr6[12] = addr4[0];
|
||||
addr6[13] = addr4[1];
|
||||
addr6[14] = addr4[2];
|
||||
addr6[15] = addr4[3];
|
||||
return addr6;
|
||||
}
|
||||
|
||||
// Support raw ip only.
|
||||
// TODO: add ether ip support.
|
||||
// TODO: parse CTA_PROTOINFO of conntrack event in ConntrackMonitor. For TCP, only add rules
|
||||
@@ -1292,19 +1456,6 @@ public class BpfCoordinator {
|
||||
0 /* lastUsed, filled by bpf prog only */);
|
||||
}
|
||||
|
||||
@NonNull
|
||||
private byte[] toIpv4MappedAddressBytes(Inet4Address ia4) {
|
||||
final byte[] addr4 = ia4.getAddress();
|
||||
final byte[] addr6 = new byte[16];
|
||||
addr6[10] = (byte) 0xff;
|
||||
addr6[11] = (byte) 0xff;
|
||||
addr6[12] = addr4[0];
|
||||
addr6[13] = addr4[1];
|
||||
addr6[14] = addr4[2];
|
||||
addr6[15] = addr4[3];
|
||||
return addr6;
|
||||
}
|
||||
|
||||
public void accept(ConntrackEvent e) {
|
||||
final ClientInfo tetherClient = getClientInfo(e.tupleOrig.srcIp);
|
||||
if (tetherClient == null) return;
|
||||
@@ -1318,8 +1469,23 @@ public class BpfCoordinator {
|
||||
|
||||
if (e.msgType == (NetlinkConstants.NFNL_SUBSYS_CTNETLINK << 8
|
||||
| NetlinkConstants.IPCTNL_MSG_CT_DELETE)) {
|
||||
mBpfCoordinatorShim.tetherOffloadRuleRemove(UPSTREAM, upstream4Key);
|
||||
mBpfCoordinatorShim.tetherOffloadRuleRemove(DOWNSTREAM, downstream4Key);
|
||||
final boolean deletedUpstream = mBpfCoordinatorShim.tetherOffloadRuleRemove(
|
||||
UPSTREAM, upstream4Key);
|
||||
final boolean deletedDownstream = mBpfCoordinatorShim.tetherOffloadRuleRemove(
|
||||
DOWNSTREAM, downstream4Key);
|
||||
|
||||
if (!deletedUpstream && !deletedDownstream) {
|
||||
// The rules may have been already removed by losing client or losing upstream.
|
||||
return;
|
||||
}
|
||||
|
||||
if (deletedUpstream != deletedDownstream) {
|
||||
Log.wtf(TAG, "The bidirectional rules should be removed concurrently ("
|
||||
+ "upstream: " + deletedUpstream
|
||||
+ ", downstream: " + deletedDownstream + ")");
|
||||
return;
|
||||
}
|
||||
|
||||
maybeClearLimit(upstreamIndex);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1720,13 +1720,7 @@ public class Tethering {
|
||||
protected void handleNewUpstreamNetworkState(UpstreamNetworkState ns) {
|
||||
mIPv6TetheringCoordinator.updateUpstreamNetworkState(ns);
|
||||
mOffload.updateUpstreamNetworkState(ns);
|
||||
|
||||
// TODO: Delete all related offload rules which are using this upstream.
|
||||
if (ns != null) {
|
||||
// Add upstream index to the map. The upstream interface index is required while
|
||||
// the conntrack event builds the offload rules.
|
||||
mBpfCoordinator.addUpstreamIfindexToMap(ns.linkProperties);
|
||||
}
|
||||
mBpfCoordinator.updateUpstreamNetworkState(ns);
|
||||
}
|
||||
|
||||
private void handleInterfaceServingStateActive(int mode, IpServer who) {
|
||||
|
||||
@@ -584,6 +584,7 @@ public class IpServerTest {
|
||||
inOrder.verify(mNetd).networkRemoveInterface(INetd.LOCAL_NET_ID, IFACE_NAME);
|
||||
inOrder.verify(mNetd).interfaceSetCfg(argThat(cfg -> IFACE_NAME.equals(cfg.ifName)));
|
||||
inOrder.verify(mAddressCoordinator).releaseDownstream(any());
|
||||
inOrder.verify(mBpfCoordinator).tetherOffloadClientClear(mIpServer);
|
||||
inOrder.verify(mBpfCoordinator).stopMonitoring(mIpServer);
|
||||
inOrder.verify(mCallback).updateInterfaceState(
|
||||
mIpServer, STATE_AVAILABLE, TETHER_ERROR_NO_ERROR);
|
||||
|
||||
@@ -70,6 +70,8 @@ import android.net.InetAddresses;
|
||||
import android.net.LinkAddress;
|
||||
import android.net.LinkProperties;
|
||||
import android.net.MacAddress;
|
||||
import android.net.Network;
|
||||
import android.net.NetworkCapabilities;
|
||||
import android.net.NetworkStats;
|
||||
import android.net.TetherOffloadRuleParcel;
|
||||
import android.net.TetherStatsParcel;
|
||||
@@ -127,6 +129,8 @@ public class BpfCoordinatorTest {
|
||||
@Rule
|
||||
public final DevSdkIgnoreRule mIgnoreRule = new DevSdkIgnoreRule();
|
||||
|
||||
private static final int TEST_NET_ID = 24;
|
||||
|
||||
private static final int UPSTREAM_IFINDEX = 1001;
|
||||
private static final int DOWNSTREAM_IFINDEX = 1002;
|
||||
|
||||
@@ -1365,7 +1369,10 @@ public class BpfCoordinatorTest {
|
||||
final LinkProperties lp = new LinkProperties();
|
||||
lp.setInterfaceName(UPSTREAM_IFACE);
|
||||
lp.addLinkAddress(new LinkAddress(PUBLIC_ADDR, 32 /* prefix length */));
|
||||
coordinator.addUpstreamIfindexToMap(lp);
|
||||
final NetworkCapabilities capabilities = new NetworkCapabilities()
|
||||
.addTransportType(NetworkCapabilities.TRANSPORT_CELLULAR);
|
||||
coordinator.updateUpstreamNetworkState(new UpstreamNetworkState(lp, capabilities,
|
||||
new Network(TEST_NET_ID)));
|
||||
}
|
||||
|
||||
private void setDownstreamAndClientInformationTo(final BpfCoordinator coordinator) {
|
||||
@@ -1379,8 +1386,11 @@ public class BpfCoordinatorTest {
|
||||
// was started.
|
||||
coordinator.startPolling();
|
||||
|
||||
// Needed because tetherOffloadRuleRemove of api31.BpfCoordinatorShimImpl only decreases
|
||||
// the count while the entry is deleted. In the other words, deleteEntry returns true.
|
||||
// Needed because two reasons: (1) BpfConntrackEventConsumer#accept only performs cleanup
|
||||
// when both upstream and downstream rules are removed. (2) tetherOffloadRuleRemove of
|
||||
// api31.BpfCoordinatorShimImpl only decreases the count while the entry is deleted.
|
||||
// In the other words, deleteEntry returns true.
|
||||
doReturn(true).when(mBpfUpstream4Map).deleteEntry(any());
|
||||
doReturn(true).when(mBpfDownstream4Map).deleteEntry(any());
|
||||
|
||||
// Needed because BpfCoordinator#addUpstreamIfindexToMap queries interface parameter for
|
||||
|
||||
Reference in New Issue
Block a user