Move network stats to FileRotator pattern.

Split existing network stats into two separate classes: a recorder
which generates historical data based on periodic counter snapshots,
and a collection of historical data with persistance logic.

Recorder keeps a pending history in memory until outstanding data
crosses a specific threshold.  Persisting is handled through a given
FileRotator.  This pattern significantly reduces disk churn and
memory overhead.  Separate UID data from UID tag data, enabling a
shorter rotation cycle.  Migrate existing stats into new structure.

Remove "xt" stats until iptables hooks are ready.  Avoid consuming
Entry values when recording into NetworkStatsHistory.  Assign
operation counts to default route interface.

Introduce "Rewriter" interface in FileRotator with methods to enable
rewriteAll().  Introduce IndentingPrintWriter to handle indenting in
dump() methods.

Bug: 5386531
Change-Id: Ibe086230a17999a197206ca62d45f266225fdff1
This commit is contained in:
Jeff Sharkey
2012-01-11 18:38:16 -08:00
parent e32bcef8e1
commit bfe82685e7
6 changed files with 1217 additions and 1033 deletions

View File

@@ -102,6 +102,15 @@ public class NetworkStats implements Parcelable {
this.operations = operations; this.operations = operations;
} }
public boolean isNegative() {
return rxBytes < 0 || rxPackets < 0 || txBytes < 0 || txPackets < 0 || operations < 0;
}
public boolean isEmpty() {
return rxBytes == 0 && rxPackets == 0 && txBytes == 0 && txPackets == 0
&& operations == 0;
}
@Override @Override
public String toString() { public String toString() {
final StringBuilder builder = new StringBuilder(); final StringBuilder builder = new StringBuilder();
@@ -343,6 +352,7 @@ public class NetworkStats implements Parcelable {
* on matching {@link #uid} and {@link #tag} rows. Ignores {@link #iface}, * on matching {@link #uid} and {@link #tag} rows. Ignores {@link #iface},
* since operation counts are at data layer. * since operation counts are at data layer.
*/ */
@Deprecated
public void spliceOperationsFrom(NetworkStats stats) { public void spliceOperationsFrom(NetworkStats stats) {
for (int i = 0; i < size; i++) { for (int i = 0; i < size; i++) {
final int j = stats.findIndex(IFACE_ALL, uid[i], set[i], tag[i]); final int j = stats.findIndex(IFACE_ALL, uid[i], set[i], tag[i]);
@@ -397,7 +407,7 @@ public class NetworkStats implements Parcelable {
* Return total of all fields represented by this snapshot object. * Return total of all fields represented by this snapshot object.
*/ */
public Entry getTotal(Entry recycle) { public Entry getTotal(Entry recycle) {
return getTotal(recycle, null, UID_ALL); return getTotal(recycle, null, UID_ALL, false);
} }
/** /**
@@ -405,7 +415,7 @@ public class NetworkStats implements Parcelable {
* the requested {@link #uid}. * the requested {@link #uid}.
*/ */
public Entry getTotal(Entry recycle, int limitUid) { public Entry getTotal(Entry recycle, int limitUid) {
return getTotal(recycle, null, limitUid); return getTotal(recycle, null, limitUid, false);
} }
/** /**
@@ -413,7 +423,11 @@ public class NetworkStats implements Parcelable {
* the requested {@link #iface}. * the requested {@link #iface}.
*/ */
public Entry getTotal(Entry recycle, HashSet<String> limitIface) { public Entry getTotal(Entry recycle, HashSet<String> limitIface) {
return getTotal(recycle, limitIface, UID_ALL); return getTotal(recycle, limitIface, UID_ALL, false);
}
public Entry getTotalIncludingTags(Entry recycle) {
return getTotal(recycle, null, UID_ALL, true);
} }
/** /**
@@ -423,7 +437,8 @@ public class NetworkStats implements Parcelable {
* @param limitIface Set of {@link #iface} to include in total; or {@code * @param limitIface Set of {@link #iface} to include in total; or {@code
* null} to include all ifaces. * null} to include all ifaces.
*/ */
private Entry getTotal(Entry recycle, HashSet<String> limitIface, int limitUid) { private Entry getTotal(
Entry recycle, HashSet<String> limitIface, int limitUid, boolean includeTags) {
final Entry entry = recycle != null ? recycle : new Entry(); final Entry entry = recycle != null ? recycle : new Entry();
entry.iface = IFACE_ALL; entry.iface = IFACE_ALL;
@@ -442,7 +457,7 @@ public class NetworkStats implements Parcelable {
if (matchesUid && matchesIface) { if (matchesUid && matchesIface) {
// skip specific tags, since already counted in TAG_NONE // skip specific tags, since already counted in TAG_NONE
if (tag[i] != TAG_NONE) continue; if (tag[i] != TAG_NONE && !includeTags) continue;
entry.rxBytes += rxBytes[i]; entry.rxBytes += rxBytes[i];
entry.rxPackets += rxPackets[i]; entry.rxPackets += rxPackets[i];
@@ -460,7 +475,7 @@ public class NetworkStats implements Parcelable {
* time, and that none of them have disappeared. * time, and that none of them have disappeared.
*/ */
public NetworkStats subtract(NetworkStats right) { public NetworkStats subtract(NetworkStats right) {
return subtract(this, right, null); return subtract(this, right, null, null);
} }
/** /**
@@ -471,12 +486,12 @@ public class NetworkStats implements Parcelable {
* If counters have rolled backwards, they are clamped to {@code 0} and * If counters have rolled backwards, they are clamped to {@code 0} and
* reported to the given {@link NonMonotonicObserver}. * reported to the given {@link NonMonotonicObserver}.
*/ */
public static NetworkStats subtract( public static <C> NetworkStats subtract(
NetworkStats left, NetworkStats right, NonMonotonicObserver observer) { NetworkStats left, NetworkStats right, NonMonotonicObserver<C> observer, C cookie) {
long deltaRealtime = left.elapsedRealtime - right.elapsedRealtime; long deltaRealtime = left.elapsedRealtime - right.elapsedRealtime;
if (deltaRealtime < 0) { if (deltaRealtime < 0) {
if (observer != null) { if (observer != null) {
observer.foundNonMonotonic(left, -1, right, -1); observer.foundNonMonotonic(left, -1, right, -1, cookie);
} }
deltaRealtime = 0; deltaRealtime = 0;
} }
@@ -510,7 +525,7 @@ public class NetworkStats implements Parcelable {
if (entry.rxBytes < 0 || entry.rxPackets < 0 || entry.txBytes < 0 if (entry.rxBytes < 0 || entry.rxPackets < 0 || entry.txBytes < 0
|| entry.txPackets < 0 || entry.operations < 0) { || entry.txPackets < 0 || entry.operations < 0) {
if (observer != null) { if (observer != null) {
observer.foundNonMonotonic(left, i, right, j); observer.foundNonMonotonic(left, i, right, j, cookie);
} }
entry.rxBytes = Math.max(entry.rxBytes, 0); entry.rxBytes = Math.max(entry.rxBytes, 0);
entry.rxPackets = Math.max(entry.rxPackets, 0); entry.rxPackets = Math.max(entry.rxPackets, 0);
@@ -663,8 +678,8 @@ public class NetworkStats implements Parcelable {
} }
}; };
public interface NonMonotonicObserver { public interface NonMonotonicObserver<C> {
public void foundNonMonotonic( public void foundNonMonotonic(
NetworkStats left, int leftIndex, NetworkStats right, int rightIndex); NetworkStats left, int leftIndex, NetworkStats right, int rightIndex, C cookie);
} }
} }

View File

@@ -26,16 +26,18 @@ import static android.net.NetworkStatsHistory.DataStreamUtils.writeVarLongArray;
import static android.net.NetworkStatsHistory.Entry.UNKNOWN; import static android.net.NetworkStatsHistory.Entry.UNKNOWN;
import static android.net.NetworkStatsHistory.ParcelUtils.readLongArray; import static android.net.NetworkStatsHistory.ParcelUtils.readLongArray;
import static android.net.NetworkStatsHistory.ParcelUtils.writeLongArray; import static android.net.NetworkStatsHistory.ParcelUtils.writeLongArray;
import static com.android.internal.util.ArrayUtils.total;
import android.os.Parcel; import android.os.Parcel;
import android.os.Parcelable; import android.os.Parcelable;
import android.util.MathUtils; import android.util.MathUtils;
import com.android.internal.util.IndentingPrintWriter;
import java.io.CharArrayWriter; import java.io.CharArrayWriter;
import java.io.DataInputStream; import java.io.DataInputStream;
import java.io.DataOutputStream; import java.io.DataOutputStream;
import java.io.IOException; import java.io.IOException;
import java.io.PrintWriter;
import java.net.ProtocolException; import java.net.ProtocolException;
import java.util.Arrays; import java.util.Arrays;
import java.util.Random; import java.util.Random;
@@ -74,6 +76,7 @@ public class NetworkStatsHistory implements Parcelable {
private long[] txBytes; private long[] txBytes;
private long[] txPackets; private long[] txPackets;
private long[] operations; private long[] operations;
private long totalBytes;
public static class Entry { public static class Entry {
public static final long UNKNOWN = -1; public static final long UNKNOWN = -1;
@@ -106,6 +109,12 @@ public class NetworkStatsHistory implements Parcelable {
if ((fields & FIELD_TX_PACKETS) != 0) txPackets = new long[initialSize]; if ((fields & FIELD_TX_PACKETS) != 0) txPackets = new long[initialSize];
if ((fields & FIELD_OPERATIONS) != 0) operations = new long[initialSize]; if ((fields & FIELD_OPERATIONS) != 0) operations = new long[initialSize];
bucketCount = 0; bucketCount = 0;
totalBytes = 0;
}
public NetworkStatsHistory(NetworkStatsHistory existing, long bucketDuration) {
this(bucketDuration, existing.estimateResizeBuckets(bucketDuration));
recordEntireHistory(existing);
} }
public NetworkStatsHistory(Parcel in) { public NetworkStatsHistory(Parcel in) {
@@ -118,6 +127,7 @@ public class NetworkStatsHistory implements Parcelable {
txPackets = readLongArray(in); txPackets = readLongArray(in);
operations = readLongArray(in); operations = readLongArray(in);
bucketCount = bucketStart.length; bucketCount = bucketStart.length;
totalBytes = in.readLong();
} }
/** {@inheritDoc} */ /** {@inheritDoc} */
@@ -130,6 +140,7 @@ public class NetworkStatsHistory implements Parcelable {
writeLongArray(out, txBytes, bucketCount); writeLongArray(out, txBytes, bucketCount);
writeLongArray(out, txPackets, bucketCount); writeLongArray(out, txPackets, bucketCount);
writeLongArray(out, operations, bucketCount); writeLongArray(out, operations, bucketCount);
out.writeLong(totalBytes);
} }
public NetworkStatsHistory(DataInputStream in) throws IOException { public NetworkStatsHistory(DataInputStream in) throws IOException {
@@ -144,6 +155,7 @@ public class NetworkStatsHistory implements Parcelable {
txPackets = new long[bucketStart.length]; txPackets = new long[bucketStart.length];
operations = new long[bucketStart.length]; operations = new long[bucketStart.length];
bucketCount = bucketStart.length; bucketCount = bucketStart.length;
totalBytes = total(rxBytes) + total(txBytes);
break; break;
} }
case VERSION_ADD_PACKETS: case VERSION_ADD_PACKETS:
@@ -158,6 +170,7 @@ public class NetworkStatsHistory implements Parcelable {
txPackets = readVarLongArray(in); txPackets = readVarLongArray(in);
operations = readVarLongArray(in); operations = readVarLongArray(in);
bucketCount = bucketStart.length; bucketCount = bucketStart.length;
totalBytes = total(rxBytes) + total(txBytes);
break; break;
} }
default: { default: {
@@ -207,6 +220,13 @@ public class NetworkStatsHistory implements Parcelable {
} }
} }
/**
* Return total bytes represented by this history.
*/
public long getTotalBytes() {
return totalBytes;
}
/** /**
* Return index of bucket that contains or is immediately before the * Return index of bucket that contains or is immediately before the
* requested time. * requested time.
@@ -266,13 +286,16 @@ public class NetworkStatsHistory implements Parcelable {
* distribute across internal buckets, creating new buckets as needed. * distribute across internal buckets, creating new buckets as needed.
*/ */
public void recordData(long start, long end, NetworkStats.Entry entry) { public void recordData(long start, long end, NetworkStats.Entry entry) {
if (entry.rxBytes < 0 || entry.rxPackets < 0 || entry.txBytes < 0 || entry.txPackets < 0 long rxBytes = entry.rxBytes;
|| entry.operations < 0) { long rxPackets = entry.rxPackets;
long txBytes = entry.txBytes;
long txPackets = entry.txPackets;
long operations = entry.operations;
if (entry.isNegative()) {
throw new IllegalArgumentException("tried recording negative data"); throw new IllegalArgumentException("tried recording negative data");
} }
if (entry.rxBytes == 0 && entry.rxPackets == 0 && entry.txBytes == 0 && entry.txPackets == 0 if (entry.isEmpty()) {
&& entry.operations == 0) {
// nothing to record; skip
return; return;
} }
@@ -295,21 +318,23 @@ public class NetworkStatsHistory implements Parcelable {
if (overlap <= 0) continue; if (overlap <= 0) continue;
// integer math each time is faster than floating point // integer math each time is faster than floating point
final long fracRxBytes = entry.rxBytes * overlap / duration; final long fracRxBytes = rxBytes * overlap / duration;
final long fracRxPackets = entry.rxPackets * overlap / duration; final long fracRxPackets = rxPackets * overlap / duration;
final long fracTxBytes = entry.txBytes * overlap / duration; final long fracTxBytes = txBytes * overlap / duration;
final long fracTxPackets = entry.txPackets * overlap / duration; final long fracTxPackets = txPackets * overlap / duration;
final long fracOperations = entry.operations * overlap / duration; final long fracOperations = operations * overlap / duration;
addLong(activeTime, i, overlap); addLong(activeTime, i, overlap);
addLong(rxBytes, i, fracRxBytes); entry.rxBytes -= fracRxBytes; addLong(this.rxBytes, i, fracRxBytes); rxBytes -= fracRxBytes;
addLong(rxPackets, i, fracRxPackets); entry.rxPackets -= fracRxPackets; addLong(this.rxPackets, i, fracRxPackets); rxPackets -= fracRxPackets;
addLong(txBytes, i, fracTxBytes); entry.txBytes -= fracTxBytes; addLong(this.txBytes, i, fracTxBytes); txBytes -= fracTxBytes;
addLong(txPackets, i, fracTxPackets); entry.txPackets -= fracTxPackets; addLong(this.txPackets, i, fracTxPackets); txPackets -= fracTxPackets;
addLong(operations, i, fracOperations); entry.operations -= fracOperations; addLong(this.operations, i, fracOperations); operations -= fracOperations;
duration -= overlap; duration -= overlap;
} }
totalBytes += entry.rxBytes + entry.txBytes;
} }
/** /**
@@ -394,6 +419,7 @@ public class NetworkStatsHistory implements Parcelable {
/** /**
* Remove buckets older than requested cutoff. * Remove buckets older than requested cutoff.
*/ */
@Deprecated
public void removeBucketsBefore(long cutoff) { public void removeBucketsBefore(long cutoff) {
int i; int i;
for (i = 0; i < bucketCount; i++) { for (i = 0; i < bucketCount; i++) {
@@ -415,6 +441,8 @@ public class NetworkStatsHistory implements Parcelable {
if (txPackets != null) txPackets = Arrays.copyOfRange(txPackets, i, length); if (txPackets != null) txPackets = Arrays.copyOfRange(txPackets, i, length);
if (operations != null) operations = Arrays.copyOfRange(operations, i, length); if (operations != null) operations = Arrays.copyOfRange(operations, i, length);
bucketCount -= i; bucketCount -= i;
// TODO: subtract removed values from totalBytes
} }
} }
@@ -527,18 +555,16 @@ public class NetworkStatsHistory implements Parcelable {
return (long) (start + (r.nextFloat() * (end - start))); return (long) (start + (r.nextFloat() * (end - start)));
} }
public void dump(String prefix, PrintWriter pw, boolean fullHistory) { public void dump(IndentingPrintWriter pw, boolean fullHistory) {
pw.print(prefix);
pw.print("NetworkStatsHistory: bucketDuration="); pw.println(bucketDuration); pw.print("NetworkStatsHistory: bucketDuration="); pw.println(bucketDuration);
pw.increaseIndent();
final int start = fullHistory ? 0 : Math.max(0, bucketCount - 32); final int start = fullHistory ? 0 : Math.max(0, bucketCount - 32);
if (start > 0) { if (start > 0) {
pw.print(prefix);
pw.print("(omitting "); pw.print(start); pw.println(" buckets)"); pw.print("(omitting "); pw.print(start); pw.println(" buckets)");
} }
for (int i = start; i < bucketCount; i++) { for (int i = start; i < bucketCount; i++) {
pw.print(prefix);
pw.print("bucketStart="); pw.print(bucketStart[i]); pw.print("bucketStart="); pw.print(bucketStart[i]);
if (activeTime != null) { pw.print(" activeTime="); pw.print(activeTime[i]); } if (activeTime != null) { pw.print(" activeTime="); pw.print(activeTime[i]); }
if (rxBytes != null) { pw.print(" rxBytes="); pw.print(rxBytes[i]); } if (rxBytes != null) { pw.print(" rxBytes="); pw.print(rxBytes[i]); }
@@ -548,12 +574,14 @@ public class NetworkStatsHistory implements Parcelable {
if (operations != null) { pw.print(" operations="); pw.print(operations[i]); } if (operations != null) { pw.print(" operations="); pw.print(operations[i]); }
pw.println(); pw.println();
} }
pw.decreaseIndent();
} }
@Override @Override
public String toString() { public String toString() {
final CharArrayWriter writer = new CharArrayWriter(); final CharArrayWriter writer = new CharArrayWriter();
dump("", new PrintWriter(writer), false); dump(new IndentingPrintWriter(writer, " "), false);
return writer.toString(); return writer.toString();
} }
@@ -579,6 +607,10 @@ public class NetworkStatsHistory implements Parcelable {
if (array != null) array[i] += value; if (array != null) array[i] += value;
} }
public int estimateResizeBuckets(long newBucketDuration) {
return (int) (size() * getBucketDuration() / newBucketDuration);
}
/** /**
* Utility methods for interacting with {@link DataInputStream} and * Utility methods for interacting with {@link DataInputStream} and
* {@link DataOutputStream}, mostly dealing with writing partial arrays. * {@link DataOutputStream}, mostly dealing with writing partial arrays.

View File

@@ -195,7 +195,7 @@ public class TrafficStats {
// subtract starting values and return delta // subtract starting values and return delta
final NetworkStats profilingStop = getDataLayerSnapshotForUid(context); final NetworkStats profilingStop = getDataLayerSnapshotForUid(context);
final NetworkStats profilingDelta = NetworkStats.subtract( final NetworkStats profilingDelta = NetworkStats.subtract(
profilingStop, sActiveProfilingStart, null); profilingStop, sActiveProfilingStart, null, null);
sActiveProfilingStart = null; sActiveProfilingStart = null;
return profilingDelta; return profilingDelta;
} }

View File

@@ -0,0 +1,510 @@
/*
* Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.server.net;
import static android.net.NetworkStats.IFACE_ALL;
import static android.net.NetworkStats.SET_ALL;
import static android.net.NetworkStats.SET_DEFAULT;
import static android.net.NetworkStats.TAG_NONE;
import static android.net.NetworkStats.UID_ALL;
import static android.net.TrafficStats.UID_REMOVED;
import android.net.NetworkIdentity;
import android.net.NetworkStats;
import android.net.NetworkStatsHistory;
import android.net.NetworkTemplate;
import android.net.TrafficStats;
import android.text.format.DateUtils;
import com.android.internal.os.AtomicFile;
import com.android.internal.util.FileRotator;
import com.android.internal.util.IndentingPrintWriter;
import com.android.internal.util.Objects;
import com.google.android.collect.Lists;
import com.google.android.collect.Maps;
import java.io.BufferedInputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.net.ProtocolException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import libcore.io.IoUtils;
/**
* Collection of {@link NetworkStatsHistory}, stored based on combined key of
* {@link NetworkIdentitySet}, UID, set, and tag. Knows how to persist itself.
*/
public class NetworkStatsCollection implements FileRotator.Reader {
private static final String TAG = "NetworkStatsCollection";
/** File header magic number: "ANET" */
private static final int FILE_MAGIC = 0x414E4554;
private static final int VERSION_NETWORK_INIT = 1;
private static final int VERSION_UID_INIT = 1;
private static final int VERSION_UID_WITH_IDENT = 2;
private static final int VERSION_UID_WITH_TAG = 3;
private static final int VERSION_UID_WITH_SET = 4;
private static final int VERSION_UNIFIED_INIT = 16;
private HashMap<Key, NetworkStatsHistory> mStats = Maps.newHashMap();
private long mBucketDuration;
private long mStartMillis;
private long mEndMillis;
private long mTotalBytes;
private boolean mDirty;
public NetworkStatsCollection(long bucketDuration) {
mBucketDuration = bucketDuration;
reset();
}
public void reset() {
mStats.clear();
mStartMillis = Long.MAX_VALUE;
mEndMillis = Long.MIN_VALUE;
mTotalBytes = 0;
mDirty = false;
}
public long getStartMillis() {
return mStartMillis;
}
public long getEndMillis() {
return mEndMillis;
}
public long getTotalBytes() {
return mTotalBytes;
}
public boolean isDirty() {
return mDirty;
}
public void clearDirty() {
mDirty = false;
}
public boolean isEmpty() {
return mStartMillis == Long.MAX_VALUE && mEndMillis == Long.MIN_VALUE;
}
/**
* Combine all {@link NetworkStatsHistory} in this collection which match
* the requested parameters.
*/
public NetworkStatsHistory getHistory(
NetworkTemplate template, int uid, int set, int tag, int fields) {
final NetworkStatsHistory combined = new NetworkStatsHistory(
mBucketDuration, estimateBuckets(), fields);
for (Map.Entry<Key, NetworkStatsHistory> entry : mStats.entrySet()) {
final Key key = entry.getKey();
final boolean setMatches = set == SET_ALL || key.set == set;
if (key.uid == uid && setMatches && key.tag == tag
&& templateMatches(template, key.ident)) {
combined.recordEntireHistory(entry.getValue());
}
}
return combined;
}
/**
* Summarize all {@link NetworkStatsHistory} in this collection which match
* the requested parameters.
*/
public NetworkStats getSummary(NetworkTemplate template, long start, long end) {
final long now = System.currentTimeMillis();
final NetworkStats stats = new NetworkStats(end - start, 24);
final NetworkStats.Entry entry = new NetworkStats.Entry();
NetworkStatsHistory.Entry historyEntry = null;
for (Map.Entry<Key, NetworkStatsHistory> mapEntry : mStats.entrySet()) {
final Key key = mapEntry.getKey();
if (templateMatches(template, key.ident)) {
final NetworkStatsHistory history = mapEntry.getValue();
historyEntry = history.getValues(start, end, now, historyEntry);
entry.iface = IFACE_ALL;
entry.uid = key.uid;
entry.set = key.set;
entry.tag = key.tag;
entry.rxBytes = historyEntry.rxBytes;
entry.rxPackets = historyEntry.rxPackets;
entry.txBytes = historyEntry.txBytes;
entry.txPackets = historyEntry.txPackets;
entry.operations = historyEntry.operations;
if (!entry.isEmpty()) {
stats.combineValues(entry);
}
}
}
return stats;
}
/**
* Record given {@link NetworkStats.Entry} into this collection.
*/
public void recordData(NetworkIdentitySet ident, int uid, int set, int tag, long start,
long end, NetworkStats.Entry entry) {
noteRecordedHistory(start, end, entry.rxBytes + entry.txBytes);
findOrCreateHistory(ident, uid, set, tag).recordData(start, end, entry);
}
/**
* Record given {@link NetworkStatsHistory} into this collection.
*/
private void recordHistory(Key key, NetworkStatsHistory history) {
if (history.size() == 0) return;
noteRecordedHistory(history.getStart(), history.getEnd(), history.getTotalBytes());
final NetworkStatsHistory existing = mStats.get(key);
if (existing != null) {
existing.recordEntireHistory(history);
} else {
mStats.put(key, history);
}
}
/**
* Record all {@link NetworkStatsHistory} contained in the given collection
* into this collection.
*/
public void recordCollection(NetworkStatsCollection another) {
for (Map.Entry<Key, NetworkStatsHistory> entry : another.mStats.entrySet()) {
recordHistory(entry.getKey(), entry.getValue());
}
}
private NetworkStatsHistory findOrCreateHistory(
NetworkIdentitySet ident, int uid, int set, int tag) {
final Key key = new Key(ident, uid, set, tag);
final NetworkStatsHistory existing = mStats.get(key);
// update when no existing, or when bucket duration changed
NetworkStatsHistory updated = null;
if (existing == null) {
updated = new NetworkStatsHistory(mBucketDuration, 10);
} else if (existing.getBucketDuration() != mBucketDuration) {
updated = new NetworkStatsHistory(existing, mBucketDuration);
}
if (updated != null) {
mStats.put(key, updated);
return updated;
} else {
return existing;
}
}
/** {@inheritDoc} */
public void read(InputStream in) throws IOException {
read(new DataInputStream(in));
}
public void read(DataInputStream in) throws IOException {
// verify file magic header intact
final int magic = in.readInt();
if (magic != FILE_MAGIC) {
throw new ProtocolException("unexpected magic: " + magic);
}
final int version = in.readInt();
switch (version) {
case VERSION_UNIFIED_INIT: {
// uid := size *(NetworkIdentitySet size *(uid set tag NetworkStatsHistory))
final int identSize = in.readInt();
for (int i = 0; i < identSize; i++) {
final NetworkIdentitySet ident = new NetworkIdentitySet(in);
final int size = in.readInt();
for (int j = 0; j < size; j++) {
final int uid = in.readInt();
final int set = in.readInt();
final int tag = in.readInt();
final Key key = new Key(ident, uid, set, tag);
final NetworkStatsHistory history = new NetworkStatsHistory(in);
recordHistory(key, history);
}
}
break;
}
default: {
throw new ProtocolException("unexpected version: " + version);
}
}
}
public void write(DataOutputStream out) throws IOException {
// cluster key lists grouped by ident
final HashMap<NetworkIdentitySet, ArrayList<Key>> keysByIdent = Maps.newHashMap();
for (Key key : mStats.keySet()) {
ArrayList<Key> keys = keysByIdent.get(key.ident);
if (keys == null) {
keys = Lists.newArrayList();
keysByIdent.put(key.ident, keys);
}
keys.add(key);
}
out.writeInt(FILE_MAGIC);
out.writeInt(VERSION_UNIFIED_INIT);
out.writeInt(keysByIdent.size());
for (NetworkIdentitySet ident : keysByIdent.keySet()) {
final ArrayList<Key> keys = keysByIdent.get(ident);
ident.writeToStream(out);
out.writeInt(keys.size());
for (Key key : keys) {
final NetworkStatsHistory history = mStats.get(key);
out.writeInt(key.uid);
out.writeInt(key.set);
out.writeInt(key.tag);
history.writeToStream(out);
}
}
out.flush();
}
@Deprecated
public void readLegacyNetwork(File file) throws IOException {
final AtomicFile inputFile = new AtomicFile(file);
DataInputStream in = null;
try {
in = new DataInputStream(new BufferedInputStream(inputFile.openRead()));
// verify file magic header intact
final int magic = in.readInt();
if (magic != FILE_MAGIC) {
throw new ProtocolException("unexpected magic: " + magic);
}
final int version = in.readInt();
switch (version) {
case VERSION_NETWORK_INIT: {
// network := size *(NetworkIdentitySet NetworkStatsHistory)
final int size = in.readInt();
for (int i = 0; i < size; i++) {
final NetworkIdentitySet ident = new NetworkIdentitySet(in);
final NetworkStatsHistory history = new NetworkStatsHistory(in);
final Key key = new Key(ident, UID_ALL, SET_ALL, TAG_NONE);
recordHistory(key, history);
}
break;
}
default: {
throw new ProtocolException("unexpected version: " + version);
}
}
} catch (FileNotFoundException e) {
// missing stats is okay, probably first boot
} finally {
IoUtils.closeQuietly(in);
}
}
@Deprecated
public void readLegacyUid(File file, boolean onlyTags) throws IOException {
final AtomicFile inputFile = new AtomicFile(file);
DataInputStream in = null;
try {
in = new DataInputStream(new BufferedInputStream(inputFile.openRead()));
// verify file magic header intact
final int magic = in.readInt();
if (magic != FILE_MAGIC) {
throw new ProtocolException("unexpected magic: " + magic);
}
final int version = in.readInt();
switch (version) {
case VERSION_UID_INIT: {
// uid := size *(UID NetworkStatsHistory)
// drop this data version, since we don't have a good
// mapping into NetworkIdentitySet.
break;
}
case VERSION_UID_WITH_IDENT: {
// uid := size *(NetworkIdentitySet size *(UID NetworkStatsHistory))
// drop this data version, since this version only existed
// for a short time.
break;
}
case VERSION_UID_WITH_TAG:
case VERSION_UID_WITH_SET: {
// uid := size *(NetworkIdentitySet size *(uid set tag NetworkStatsHistory))
final int identSize = in.readInt();
for (int i = 0; i < identSize; i++) {
final NetworkIdentitySet ident = new NetworkIdentitySet(in);
final int size = in.readInt();
for (int j = 0; j < size; j++) {
final int uid = in.readInt();
final int set = (version >= VERSION_UID_WITH_SET) ? in.readInt()
: SET_DEFAULT;
final int tag = in.readInt();
final Key key = new Key(ident, uid, set, tag);
final NetworkStatsHistory history = new NetworkStatsHistory(in);
if ((tag == TAG_NONE) != onlyTags) {
recordHistory(key, history);
}
}
}
break;
}
default: {
throw new ProtocolException("unexpected version: " + version);
}
}
} catch (FileNotFoundException e) {
// missing stats is okay, probably first boot
} finally {
IoUtils.closeQuietly(in);
}
}
/**
* Remove any {@link NetworkStatsHistory} attributed to the requested UID,
* moving any {@link NetworkStats#TAG_NONE} series to
* {@link TrafficStats#UID_REMOVED}.
*/
public void removeUid(int uid) {
final ArrayList<Key> knownKeys = Lists.newArrayList();
knownKeys.addAll(mStats.keySet());
// migrate all UID stats into special "removed" bucket
for (Key key : knownKeys) {
if (key.uid == uid) {
// only migrate combined TAG_NONE history
if (key.tag == TAG_NONE) {
final NetworkStatsHistory uidHistory = mStats.get(key);
final NetworkStatsHistory removedHistory = findOrCreateHistory(
key.ident, UID_REMOVED, SET_DEFAULT, TAG_NONE);
removedHistory.recordEntireHistory(uidHistory);
}
mStats.remove(key);
mDirty = true;
}
}
}
private void noteRecordedHistory(long startMillis, long endMillis, long totalBytes) {
if (startMillis < mStartMillis) mStartMillis = startMillis;
if (endMillis > mEndMillis) mEndMillis = endMillis;
mTotalBytes += totalBytes;
mDirty = true;
}
private int estimateBuckets() {
return (int) (Math.min(mEndMillis - mStartMillis, DateUtils.WEEK_IN_MILLIS * 5)
/ mBucketDuration);
}
public void dump(IndentingPrintWriter pw) {
final ArrayList<Key> keys = Lists.newArrayList();
keys.addAll(mStats.keySet());
Collections.sort(keys);
for (Key key : keys) {
pw.print("ident="); pw.print(key.ident.toString());
pw.print(" uid="); pw.print(key.uid);
pw.print(" set="); pw.print(NetworkStats.setToString(key.set));
pw.print(" tag="); pw.println(NetworkStats.tagToString(key.tag));
final NetworkStatsHistory history = mStats.get(key);
pw.increaseIndent();
history.dump(pw, true);
pw.decreaseIndent();
}
}
/**
* Test if given {@link NetworkTemplate} matches any {@link NetworkIdentity}
* in the given {@link NetworkIdentitySet}.
*/
private static boolean templateMatches(NetworkTemplate template, NetworkIdentitySet identSet) {
for (NetworkIdentity ident : identSet) {
if (template.matches(ident)) {
return true;
}
}
return false;
}
private static class Key implements Comparable<Key> {
public final NetworkIdentitySet ident;
public final int uid;
public final int set;
public final int tag;
private final int hashCode;
public Key(NetworkIdentitySet ident, int uid, int set, int tag) {
this.ident = ident;
this.uid = uid;
this.set = set;
this.tag = tag;
hashCode = Objects.hashCode(ident, uid, set, tag);
}
@Override
public int hashCode() {
return hashCode;
}
@Override
public boolean equals(Object obj) {
if (obj instanceof Key) {
final Key key = (Key) obj;
return uid == key.uid && set == key.set && tag == key.tag
&& Objects.equal(ident, key.ident);
}
return false;
}
/** {@inheritDoc} */
public int compareTo(Key another) {
return Integer.compare(uid, another.uid);
}
}
}

View File

@@ -0,0 +1,341 @@
/*
* Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.server.net;
import static android.net.NetworkStats.TAG_NONE;
import static com.android.internal.util.Preconditions.checkNotNull;
import android.net.NetworkStats;
import android.net.NetworkStats.NonMonotonicObserver;
import android.net.NetworkStatsHistory;
import android.net.NetworkTemplate;
import android.net.TrafficStats;
import android.util.Log;
import android.util.Slog;
import com.android.internal.util.FileRotator;
import com.android.internal.util.IndentingPrintWriter;
import com.google.android.collect.Sets;
import java.io.DataOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.lang.ref.WeakReference;
import java.util.HashSet;
import java.util.Map;
/**
* Logic to record deltas between periodic {@link NetworkStats} snapshots into
* {@link NetworkStatsHistory} that belong to {@link NetworkStatsCollection}.
* Keeps pending changes in memory until they pass a specific threshold, in
* bytes. Uses {@link FileRotator} for persistence logic.
* <p>
* Not inherently thread safe.
*/
public class NetworkStatsRecorder {
private static final String TAG = "NetworkStatsRecorder";
private static final boolean LOGD = true;
private final FileRotator mRotator;
private final NonMonotonicObserver<String> mObserver;
private final String mCookie;
private final long mBucketDuration;
private final long mPersistThresholdBytes;
private final boolean mOnlyTags;
private NetworkStats mLastSnapshot;
private final NetworkStatsCollection mPending;
private final NetworkStatsCollection mSinceBoot;
private final CombiningRewriter mPendingRewriter;
private WeakReference<NetworkStatsCollection> mComplete;
public NetworkStatsRecorder(FileRotator rotator, NonMonotonicObserver<String> observer,
String cookie, long bucketDuration, long persistThresholdBytes, boolean onlyTags) {
mRotator = checkNotNull(rotator, "missing FileRotator");
mObserver = checkNotNull(observer, "missing NonMonotonicObserver");
mCookie = cookie;
mBucketDuration = bucketDuration;
mPersistThresholdBytes = persistThresholdBytes;
mOnlyTags = onlyTags;
mPending = new NetworkStatsCollection(bucketDuration);
mSinceBoot = new NetworkStatsCollection(bucketDuration);
mPendingRewriter = new CombiningRewriter(mPending);
}
public void resetLocked() {
mLastSnapshot = null;
mPending.reset();
mSinceBoot.reset();
mComplete.clear();
}
public NetworkStats.Entry getTotalSinceBootLocked(NetworkTemplate template) {
return mSinceBoot.getSummary(template, Long.MIN_VALUE, Long.MAX_VALUE).getTotal(null);
}
/**
* Load complete history represented by {@link FileRotator}. Caches
* internally as a {@link WeakReference}, and updated with future
* {@link #recordSnapshotLocked(NetworkStats, Map, long)} snapshots as long
* as reference is valid.
*/
public NetworkStatsCollection getOrLoadCompleteLocked() {
NetworkStatsCollection complete = mComplete != null ? mComplete.get() : null;
if (complete == null) {
if (LOGD) Slog.d(TAG, "getOrLoadCompleteLocked() reading from disk for " + mCookie);
try {
complete = new NetworkStatsCollection(mBucketDuration);
mRotator.readMatching(complete, Long.MIN_VALUE, Long.MAX_VALUE);
complete.recordCollection(mPending);
mComplete = new WeakReference<NetworkStatsCollection>(complete);
} catch (IOException e) {
Log.wtf(TAG, "problem completely reading network stats", e);
}
}
return complete;
}
/**
* Record any delta that occurred since last {@link NetworkStats} snapshot,
* using the given {@link Map} to identify network interfaces. First
* snapshot is considered bootstrap, and is not counted as delta.
*/
public void recordSnapshotLocked(NetworkStats snapshot,
Map<String, NetworkIdentitySet> ifaceIdent, long currentTimeMillis) {
final HashSet<String> unknownIfaces = Sets.newHashSet();
// assume first snapshot is bootstrap and don't record
if (mLastSnapshot == null) {
mLastSnapshot = snapshot;
return;
}
final NetworkStatsCollection complete = mComplete != null ? mComplete.get() : null;
final NetworkStats delta = NetworkStats.subtract(
snapshot, mLastSnapshot, mObserver, mCookie);
final long end = currentTimeMillis;
final long start = end - delta.getElapsedRealtime();
NetworkStats.Entry entry = null;
for (int i = 0; i < delta.size(); i++) {
entry = delta.getValues(i, entry);
final NetworkIdentitySet ident = ifaceIdent.get(entry.iface);
if (ident == null) {
unknownIfaces.add(entry.iface);
continue;
}
// skip when no delta occured
if (entry.isEmpty()) continue;
// only record tag data when requested
if ((entry.tag == TAG_NONE) != mOnlyTags) {
mPending.recordData(ident, entry.uid, entry.set, entry.tag, start, end, entry);
// also record against boot stats when present
if (mSinceBoot != null) {
mSinceBoot.recordData(ident, entry.uid, entry.set, entry.tag, start, end, entry);
}
// also record against complete dataset when present
if (complete != null) {
complete.recordData(ident, entry.uid, entry.set, entry.tag, start, end, entry);
}
}
}
mLastSnapshot = snapshot;
if (LOGD && unknownIfaces.size() > 0) {
Slog.w(TAG, "unknown interfaces " + unknownIfaces + ", ignoring those stats");
}
}
/**
* Consider persisting any pending deltas, if they are beyond
* {@link #mPersistThresholdBytes}.
*/
public void maybePersistLocked(long currentTimeMillis) {
final long pendingBytes = mPending.getTotalBytes();
if (pendingBytes >= mPersistThresholdBytes) {
forcePersistLocked(currentTimeMillis);
} else {
mRotator.maybeRotate(currentTimeMillis);
}
}
/**
* Force persisting any pending deltas.
*/
public void forcePersistLocked(long currentTimeMillis) {
if (mPending.isDirty()) {
if (LOGD) Slog.d(TAG, "forcePersistLocked() writing for " + mCookie);
try {
mRotator.rewriteActive(mPendingRewriter, currentTimeMillis);
mRotator.maybeRotate(currentTimeMillis);
mPending.reset();
} catch (IOException e) {
Log.wtf(TAG, "problem persisting pending stats", e);
}
}
}
/**
* Remove the given UID from all {@link FileRotator} history, migrating it
* to {@link TrafficStats#UID_REMOVED}.
*/
public void removeUidLocked(int uid) {
try {
// process all existing data to migrate uid
mRotator.rewriteAll(new RemoveUidRewriter(mBucketDuration, uid));
} catch (IOException e) {
Log.wtf(TAG, "problem removing UID " + uid, e);
}
// clear UID from current stats snapshot
if (mLastSnapshot != null) {
mLastSnapshot = mLastSnapshot.withoutUid(uid);
}
}
/**
* Rewriter that will combine current {@link NetworkStatsCollection} values
* with anything read from disk, and write combined set to disk. Clears the
* original {@link NetworkStatsCollection} when finished writing.
*/
private static class CombiningRewriter implements FileRotator.Rewriter {
private final NetworkStatsCollection mCollection;
public CombiningRewriter(NetworkStatsCollection collection) {
mCollection = checkNotNull(collection, "missing NetworkStatsCollection");
}
/** {@inheritDoc} */
public void reset() {
// ignored
}
/** {@inheritDoc} */
public void read(InputStream in) throws IOException {
mCollection.read(in);
}
/** {@inheritDoc} */
public boolean shouldWrite() {
return true;
}
/** {@inheritDoc} */
public void write(OutputStream out) throws IOException {
mCollection.write(new DataOutputStream(out));
mCollection.reset();
}
}
/**
* Rewriter that will remove any {@link NetworkStatsHistory} attributed to
* the requested UID, only writing data back when modified.
*/
public static class RemoveUidRewriter implements FileRotator.Rewriter {
private final NetworkStatsCollection mTemp;
private final int mUid;
public RemoveUidRewriter(long bucketDuration, int uid) {
mTemp = new NetworkStatsCollection(bucketDuration);
mUid = uid;
}
/** {@inheritDoc} */
public void reset() {
mTemp.reset();
}
/** {@inheritDoc} */
public void read(InputStream in) throws IOException {
mTemp.read(in);
mTemp.clearDirty();
mTemp.removeUid(mUid);
}
/** {@inheritDoc} */
public boolean shouldWrite() {
return mTemp.isDirty();
}
/** {@inheritDoc} */
public void write(OutputStream out) throws IOException {
mTemp.write(new DataOutputStream(out));
}
}
public void importLegacyNetworkLocked(File file) throws IOException {
// legacy file still exists; start empty to avoid double importing
mRotator.deleteAll();
final NetworkStatsCollection collection = new NetworkStatsCollection(mBucketDuration);
collection.readLegacyNetwork(file);
final long startMillis = collection.getStartMillis();
final long endMillis = collection.getEndMillis();
if (!collection.isEmpty()) {
// process legacy data, creating active file at starting time, then
// using end time to possibly trigger rotation.
mRotator.rewriteActive(new CombiningRewriter(collection), startMillis);
mRotator.maybeRotate(endMillis);
}
}
public void importLegacyUidLocked(File file) throws IOException {
// legacy file still exists; start empty to avoid double importing
mRotator.deleteAll();
final NetworkStatsCollection collection = new NetworkStatsCollection(mBucketDuration);
collection.readLegacyUid(file, mOnlyTags);
final long startMillis = collection.getStartMillis();
final long endMillis = collection.getEndMillis();
if (!collection.isEmpty()) {
// process legacy data, creating active file at starting time, then
// using end time to possibly trigger rotation.
mRotator.rewriteActive(new CombiningRewriter(collection), startMillis);
mRotator.maybeRotate(endMillis);
}
}
public void dumpLocked(IndentingPrintWriter pw, boolean fullHistory) {
pw.print("Pending bytes: "); pw.println(mPending.getTotalBytes());
if (fullHistory) {
pw.println("Complete history:");
getOrLoadCompleteLocked().dump(pw);
} else {
pw.println("History since boot:");
mSinceBoot.dump(pw);
}
}
}

File diff suppressed because it is too large Load Diff