update-payload-extractor: Import master update_payload

Change-Id: I94798f08b2e02677390024abcd43d45af7f585ee
This commit is contained in:
Luca Stefani
2020-07-15 13:22:21 +02:00
parent 961ad2b7fc
commit cc06167604
12 changed files with 943 additions and 938 deletions

View File

@@ -17,6 +17,8 @@
"""Library for processing, verifying and applying Chrome OS update payloads."""
# Just raise the interface classes to the root namespace.
from __future__ import absolute_import
from update_payload.checker import CHECKS_TO_DISABLE
from update_payload.error import PayloadError
from update_payload.payload import Payload

View File

@@ -24,12 +24,12 @@ payload. The interface for invoking the applier is as follows:
"""
from __future__ import absolute_import
from __future__ import print_function
import array
import bz2
import hashlib
import itertools
# Not everywhere we can have the lzma library so we ignore it if we didn't have
# it because it is not going to be used. For example, 'cros flash' uses
# devserver code which eventually loads this file, but the lzma library is not
@@ -45,7 +45,6 @@ except ImportError:
except ImportError:
pass
import os
import shutil
import subprocess
import sys
import tempfile
@@ -53,7 +52,6 @@ import tempfile
from update_payload import common
from update_payload.error import PayloadError
#
# Helper functions.
#
@@ -72,7 +70,7 @@ def _VerifySha256(file_obj, expected_hash, name, length=-1):
"""
hasher = hashlib.sha256()
block_length = 1024 * 1024
max_length = length if length >= 0 else sys.maxint
max_length = length if length >= 0 else sys.maxsize
while max_length > 0:
read_length = min(max_length, block_length)
@@ -108,20 +106,16 @@ def _ReadExtents(file_obj, extents, block_size, max_length=-1):
Returns:
A character array containing the concatenated read data.
"""
data = array.array('c')
data = array.array('B')
if max_length < 0:
max_length = sys.maxint
max_length = sys.maxsize
for ex in extents:
if max_length == 0:
break
read_length = min(max_length, ex.num_blocks * block_size)
# Fill with zeros or read from file, depending on the type of extent.
if ex.start_block == common.PSEUDO_EXTENT_MARKER:
data.extend(itertools.repeat('\0', read_length))
else:
file_obj.seek(ex.start_block * block_size)
data.fromfile(file_obj, read_length)
file_obj.seek(ex.start_block * block_size)
data.fromfile(file_obj, read_length)
max_length -= read_length
@@ -149,12 +143,8 @@ def _WriteExtents(file_obj, data, extents, block_size, base_name):
if not data_length:
raise PayloadError('%s: more write extents than data' % ex_name)
write_length = min(data_length, ex.num_blocks * block_size)
# Only do actual writing if this is not a pseudo-extent.
if ex.start_block != common.PSEUDO_EXTENT_MARKER:
file_obj.seek(ex.start_block * block_size)
data_view = buffer(data, data_offset, write_length)
file_obj.write(data_view)
file_obj.seek(ex.start_block * block_size)
file_obj.write(data[data_offset:(data_offset + write_length)])
data_offset += write_length
data_length -= write_length
@@ -184,20 +174,17 @@ def _ExtentsToBspatchArg(extents, block_size, base_name, data_length=-1):
arg = ''
pad_off = pad_len = 0
if data_length < 0:
data_length = sys.maxint
data_length = sys.maxsize
for ex, ex_name in common.ExtentIter(extents, base_name):
if not data_length:
raise PayloadError('%s: more extents than total data length' % ex_name)
is_pseudo = ex.start_block == common.PSEUDO_EXTENT_MARKER
start_byte = -1 if is_pseudo else ex.start_block * block_size
start_byte = ex.start_block * block_size
num_bytes = ex.num_blocks * block_size
if data_length < num_bytes:
# We're only padding a real extent.
if not is_pseudo:
pad_off = start_byte + data_length
pad_len = num_bytes - data_length
pad_off = start_byte + data_length
pad_len = num_bytes - data_length
num_bytes = data_length
arg += '%s%d:%d' % (arg and ',', start_byte, num_bytes)
@@ -274,30 +261,28 @@ class PayloadApplier(object):
num_blocks = ex.num_blocks
count = num_blocks * block_size
# Make sure it's not a fake (signature) operation.
if start_block != common.PSEUDO_EXTENT_MARKER:
data_end = data_start + count
data_end = data_start + count
# Make sure we're not running past partition boundary.
if (start_block + num_blocks) * block_size > part_size:
raise PayloadError(
'%s: extent (%s) exceeds partition size (%d)' %
(ex_name, common.FormatExtent(ex, block_size),
part_size))
# Make sure we're not running past partition boundary.
if (start_block + num_blocks) * block_size > part_size:
raise PayloadError(
'%s: extent (%s) exceeds partition size (%d)' %
(ex_name, common.FormatExtent(ex, block_size),
part_size))
# Make sure that we have enough data to write.
if data_end >= data_length + block_size:
raise PayloadError(
'%s: more dst blocks than data (even with padding)')
# Make sure that we have enough data to write.
if data_end >= data_length + block_size:
raise PayloadError(
'%s: more dst blocks than data (even with padding)')
# Pad with zeros if necessary.
if data_end > data_length:
padding = data_end - data_length
out_data += '\0' * padding
# Pad with zeros if necessary.
if data_end > data_length:
padding = data_end - data_length
out_data += b'\0' * padding
self.payload.payload_file.seek(start_block * block_size)
part_file.seek(start_block * block_size)
part_file.write(out_data[data_start:data_end])
self.payload.payload_file.seek(start_block * block_size)
part_file.seek(start_block * block_size)
part_file.write(out_data[data_start:data_end])
data_start += count
@@ -306,30 +291,6 @@ class PayloadApplier(object):
raise PayloadError('%s: wrote fewer bytes (%d) than expected (%d)' %
(op_name, data_start, data_length))
def _ApplyMoveOperation(self, op, op_name, part_file):
"""Applies a MOVE operation.
Note that this operation must read the whole block data from the input and
only then dump it, due to our in-place update semantics; otherwise, it
might clobber data midway through.
Args:
op: the operation object
op_name: name string for error reporting
part_file: the partition file object
Raises:
PayloadError if something goes wrong.
"""
block_size = self.block_size
# Gather input raw data from src extents.
in_data = _ReadExtents(part_file, op.src_extents, block_size)
# Dump extracted data to dst extents.
_WriteExtents(part_file, in_data, op.dst_extents, block_size,
'%s.dst_extents' % op_name)
def _ApplyZeroOperation(self, op, op_name, part_file):
"""Applies a ZERO operation.
@@ -347,10 +308,8 @@ class PayloadApplier(object):
# Iterate over the extents and write zero.
# pylint: disable=unused-variable
for ex, ex_name in common.ExtentIter(op.dst_extents, base_name):
# Only do actual writing if this is not a pseudo-extent.
if ex.start_block != common.PSEUDO_EXTENT_MARKER:
part_file.seek(ex.start_block * block_size)
part_file.write('\0' * (ex.num_blocks * block_size))
part_file.seek(ex.start_block * block_size)
part_file.write(b'\0' * (ex.num_blocks * block_size))
def _ApplySourceCopyOperation(self, op, op_name, old_part_file,
new_part_file):
@@ -439,12 +398,19 @@ class PayloadApplier(object):
# Diff from source partition.
old_file_name = '/dev/fd/%d' % old_part_file.fileno()
if op.type in (common.OpType.BSDIFF, common.OpType.SOURCE_BSDIFF,
common.OpType.BROTLI_BSDIFF):
# In python3, file descriptors(fd) are not passed to child processes by
# default. To pass the fds to the child processes, we need to set the flag
# 'inheritable' in the fds and make the subprocess calls with the argument
# close_fds set to False.
if sys.version_info.major >= 3:
os.set_inheritable(new_part_file.fileno(), True)
os.set_inheritable(old_part_file.fileno(), True)
if op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.BROTLI_BSDIFF):
# Invoke bspatch on partition file with extents args.
bspatch_cmd = [self.bspatch_path, old_file_name, new_file_name,
patch_file_name, in_extents_arg, out_extents_arg]
subprocess.check_call(bspatch_cmd)
subprocess.check_call(bspatch_cmd, close_fds=False)
elif op.type == common.OpType.PUFFDIFF:
# Invoke puffpatch on partition file with extents args.
puffpatch_cmd = [self.puffpatch_path,
@@ -454,14 +420,14 @@ class PayloadApplier(object):
"--patch_file=%s" % patch_file_name,
"--src_extents=%s" % in_extents_arg,
"--dst_extents=%s" % out_extents_arg]
subprocess.check_call(puffpatch_cmd)
subprocess.check_call(puffpatch_cmd, close_fds=False)
else:
raise PayloadError("Unknown operation %s", op.type)
raise PayloadError("Unknown operation %s" % op.type)
# Pad with zeros past the total output length.
if pad_len:
new_part_file.seek(pad_off)
new_part_file.write('\0' * pad_len)
new_part_file.write(b'\0' * pad_len)
else:
# Gather input raw data and write to a temp file.
input_part_file = old_part_file if old_part_file else new_part_file
@@ -477,8 +443,7 @@ class PayloadApplier(object):
with tempfile.NamedTemporaryFile(delete=False) as out_file:
out_file_name = out_file.name
if op.type in (common.OpType.BSDIFF, common.OpType.SOURCE_BSDIFF,
common.OpType.BROTLI_BSDIFF):
if op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.BROTLI_BSDIFF):
# Invoke bspatch.
bspatch_cmd = [self.bspatch_path, in_file_name, out_file_name,
patch_file_name]
@@ -492,7 +457,7 @@ class PayloadApplier(object):
"--patch_file=%s" % patch_file_name]
subprocess.check_call(puffpatch_cmd)
else:
raise PayloadError("Unknown operation %s", op.type)
raise PayloadError("Unknown operation %s" % op.type)
# Read output.
with open(out_file_name, 'rb') as out_file:
@@ -505,7 +470,7 @@ class PayloadApplier(object):
# Write output back to partition, with padding.
unaligned_out_len = len(out_data) % block_size
if unaligned_out_len:
out_data += '\0' * (block_size - unaligned_out_len)
out_data += b'\0' * (block_size - unaligned_out_len)
_WriteExtents(new_part_file, out_data, op.dst_extents, block_size,
'%s.dst_extents' % op_name)
@@ -520,10 +485,6 @@ class PayloadApplier(object):
new_part_file, part_size):
"""Applies a sequence of update operations to a partition.
This assumes an in-place update semantics for MOVE and BSDIFF, namely all
reads are performed first, then the data is processed and written back to
the same file.
Args:
operations: the sequence of operations
base_name: the name of the operation sequence
@@ -541,13 +502,8 @@ class PayloadApplier(object):
if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ,
common.OpType.REPLACE_XZ):
self._ApplyReplaceOperation(op, op_name, data, new_part_file, part_size)
elif op.type == common.OpType.MOVE:
self._ApplyMoveOperation(op, op_name, new_part_file)
elif op.type == common.OpType.ZERO:
self._ApplyZeroOperation(op, op_name, new_part_file)
elif op.type == common.OpType.BSDIFF:
self._ApplyDiffOperation(op, op_name, data, new_part_file,
new_part_file)
elif op.type == common.OpType.SOURCE_COPY:
self._ApplySourceCopyOperation(op, op_name, old_part_file,
new_part_file)
@@ -583,18 +539,8 @@ class PayloadApplier(object):
_VerifySha256(old_part_file, old_part_info.hash,
'old ' + part_name, length=old_part_info.size)
new_part_file_mode = 'r+b'
if self.minor_version == common.INPLACE_MINOR_PAYLOAD_VERSION:
# Copy the src partition to the dst one; make sure we don't truncate it.
shutil.copyfile(old_part_file_name, new_part_file_name)
elif (self.minor_version == common.SOURCE_MINOR_PAYLOAD_VERSION or
self.minor_version == common.OPSRCHASH_MINOR_PAYLOAD_VERSION or
self.minor_version == common.BROTLI_BSDIFF_MINOR_PAYLOAD_VERSION or
self.minor_version == common.PUFFDIFF_MINOR_PAYLOAD_VERSION):
# In minor version >= 2, we don't want to copy the partitions, so
# instead just make the new partition file.
open(new_part_file_name, 'w').close()
else:
raise PayloadError("Unknown minor version: %d" % self.minor_version)
open(new_part_file_name, 'w').close()
else:
# We need to create/truncate the dst partition file.
new_part_file_mode = 'w+b'
@@ -622,46 +568,54 @@ class PayloadApplier(object):
_VerifySha256(new_part_file, new_part_info.hash,
'new ' + part_name, length=new_part_info.size)
def Run(self, new_kernel_part, new_rootfs_part, old_kernel_part=None,
old_rootfs_part=None):
def Run(self, new_parts, old_parts=None):
"""Applier entry point, invoking all update operations.
Args:
new_kernel_part: name of dest kernel partition file
new_rootfs_part: name of dest rootfs partition file
old_kernel_part: name of source kernel partition file (optional)
old_rootfs_part: name of source rootfs partition file (optional)
new_parts: map of partition name to dest partition file
old_parts: map of partition name to source partition file (optional)
Raises:
PayloadError if payload application failed.
"""
if old_parts is None:
old_parts = {}
self.payload.ResetFile()
# Make sure the arguments are sane and match the payload.
if not (new_kernel_part and new_rootfs_part):
raise PayloadError('missing dst {kernel,rootfs} partitions')
new_part_info = {}
old_part_info = {}
install_operations = []
if not (old_kernel_part or old_rootfs_part):
if not self.payload.IsFull():
raise PayloadError('trying to apply a non-full update without src '
'{kernel,rootfs} partitions')
elif old_kernel_part and old_rootfs_part:
if not self.payload.IsDelta():
raise PayloadError('trying to apply a non-delta update onto src '
'{kernel,rootfs} partitions')
manifest = self.payload.manifest
for part in manifest.partitions:
name = part.partition_name
new_part_info[name] = part.new_partition_info
old_part_info[name] = part.old_partition_info
install_operations.append((name, part.operations))
part_names = set(new_part_info.keys()) # Equivalently, old_part_info.keys()
# Make sure the arguments are sane and match the payload.
new_part_names = set(new_parts.keys())
if new_part_names != part_names:
raise PayloadError('missing dst partition(s) %s' %
', '.join(part_names - new_part_names))
old_part_names = set(old_parts.keys())
if part_names - old_part_names:
if self.payload.IsDelta():
raise PayloadError('trying to apply a delta update without src '
'partition(s) %s' %
', '.join(part_names - old_part_names))
elif old_part_names == part_names:
if self.payload.IsFull():
raise PayloadError('trying to apply a full update onto src partitions')
else:
raise PayloadError('not all src partitions provided')
# Apply update to rootfs.
self._ApplyToPartition(
self.payload.manifest.install_operations, 'rootfs',
'install_operations', new_rootfs_part,
self.payload.manifest.new_rootfs_info, old_rootfs_part,
self.payload.manifest.old_rootfs_info)
# Apply update to kernel update.
self._ApplyToPartition(
self.payload.manifest.kernel_install_operations, 'kernel',
'kernel_install_operations', new_kernel_part,
self.payload.manifest.new_kernel_info, old_kernel_part,
self.payload.manifest.old_kernel_info)
for name, operations in install_operations:
# Apply update to partition.
self._ApplyToPartition(
operations, name, '%s_install_operations' % name, new_parts[name],
new_part_info[name], old_parts.get(name, None), old_part_info[name])

View File

@@ -24,31 +24,32 @@ follows:
checker.Run(...)
"""
from __future__ import absolute_import
from __future__ import print_function
import array
import base64
import collections
import hashlib
import itertools
import os
import subprocess
from six.moves import range
from update_payload import common
from update_payload import error
from update_payload import format_utils
from update_payload import histogram
from update_payload import update_metadata_pb2
#
# Constants.
#
_CHECK_DST_PSEUDO_EXTENTS = 'dst-pseudo-extents'
_CHECK_MOVE_SAME_SRC_DST_BLOCK = 'move-same-src-dst-block'
_CHECK_PAYLOAD_SIG = 'payload-sig'
CHECKS_TO_DISABLE = (
_CHECK_DST_PSEUDO_EXTENTS,
_CHECK_MOVE_SAME_SRC_DST_BLOCK,
_CHECK_PAYLOAD_SIG,
)
@@ -65,14 +66,13 @@ _DEFAULT_PUBKEY_FILE_NAME = os.path.join(os.path.dirname(__file__),
# Supported minor version map to payload types allowed to be using them.
_SUPPORTED_MINOR_VERSIONS = {
0: (_TYPE_FULL,),
1: (_TYPE_DELTA,),
2: (_TYPE_DELTA,),
3: (_TYPE_DELTA,),
4: (_TYPE_DELTA,),
5: (_TYPE_DELTA,),
6: (_TYPE_DELTA,),
}
_OLD_DELTA_USABLE_PART_SIZE = 2 * 1024 * 1024 * 1024
#
# Helper functions.
@@ -321,8 +321,6 @@ class PayloadChecker(object):
self.allow_unhashed = allow_unhashed
# Disable specific tests.
self.check_dst_pseudo_extents = (
_CHECK_DST_PSEUDO_EXTENTS not in disabled_tests)
self.check_move_same_src_dst_block = (
_CHECK_MOVE_SAME_SRC_DST_BLOCK not in disabled_tests)
self.check_payload_sig = _CHECK_PAYLOAD_SIG not in disabled_tests
@@ -330,15 +328,12 @@ class PayloadChecker(object):
# Reset state; these will be assigned when the manifest is checked.
self.sigs_offset = 0
self.sigs_size = 0
self.old_rootfs_fs_size = 0
self.old_kernel_fs_size = 0
self.new_rootfs_fs_size = 0
self.new_kernel_fs_size = 0
self.old_part_info = {}
self.new_part_info = {}
self.new_fs_sizes = collections.defaultdict(int)
self.old_fs_sizes = collections.defaultdict(int)
self.minor_version = None
# TODO(*): When fixing crbug.com/794404, the major version should be
# correclty handled in update_payload scripts. So stop forcing
# major_verions=1 here and set it to the correct value.
self.major_version = 1
self.major_version = None
@staticmethod
def _CheckElem(msg, name, report, is_mandatory, is_submsg, convert=str,
@@ -368,22 +363,56 @@ class PayloadChecker(object):
Raises:
error.PayloadError if a mandatory element is missing.
"""
element_result = collections.namedtuple('element_result', ['msg', 'report'])
if not msg.HasField(name):
if is_mandatory:
raise error.PayloadError('%smissing mandatory %s %r.' %
(msg_name + ' ' if msg_name else '',
'sub-message' if is_submsg else 'field',
name))
return None, None
return element_result(None, None)
value = getattr(msg, name)
if is_submsg:
return value, report and report.AddSubReport(name)
return element_result(value, report and report.AddSubReport(name))
else:
if report:
report.AddField(name, convert(value), linebreak=linebreak,
indent=indent)
return value, None
return element_result(value, None)
@staticmethod
def _CheckRepeatedElemNotPresent(msg, field_name, msg_name):
"""Checks that a repeated element is not specified in the message.
Args:
msg: The message containing the element.
field_name: The name of the element.
msg_name: The name of the message object (for error reporting).
Raises:
error.PayloadError if the repeated element is present or non-empty.
"""
if getattr(msg, field_name, None):
raise error.PayloadError('%sfield %r not empty.' %
(msg_name + ' ' if msg_name else '', field_name))
@staticmethod
def _CheckElemNotPresent(msg, field_name, msg_name):
"""Checks that an element is not specified in the message.
Args:
msg: The message containing the element.
field_name: The name of the element.
msg_name: The name of the message object (for error reporting).
Raises:
error.PayloadError if the repeated element is present.
"""
if msg.HasField(field_name):
raise error.PayloadError('%sfield %r exists.' %
(msg_name + ' ' if msg_name else '', field_name))
@staticmethod
def _CheckMandatoryField(msg, field_name, report, msg_name, convert=str,
@@ -432,6 +461,22 @@ class PayloadChecker(object):
(present, missing,
' in ' + obj_name if obj_name else ''))
@staticmethod
def _CheckPresentIffMany(vals, name, obj_name):
"""Checks that a set of vals and names imply every other element.
Args:
vals: The set of values to be compared.
name: The name of the objects holding the corresponding value.
obj_name: Name of the object containing these values.
Raises:
error.PayloadError if assertion does not hold.
"""
if any(vals) and not all(vals):
raise error.PayloadError('%r is not present in all values%s.' %
(name, ' in ' + obj_name if obj_name else ''))
@staticmethod
def _Run(cmd, send_data=None):
"""Runs a subprocess, returns its output.
@@ -544,13 +589,12 @@ class PayloadChecker(object):
raise error.PayloadError('Unsupported minor version: %d' %
self.minor_version)
def _CheckManifest(self, report, rootfs_part_size=0, kernel_part_size=0):
def _CheckManifest(self, report, part_sizes=None):
"""Checks the payload manifest.
Args:
report: A report object to add to.
rootfs_part_size: Size of the rootfs partition in bytes.
kernel_part_size: Size of the kernel partition in bytes.
part_sizes: Map of partition label to partition size in bytes.
Returns:
A tuple consisting of the partition block size used during the update
@@ -559,6 +603,9 @@ class PayloadChecker(object):
Raises:
error.PayloadError if any of the checks fail.
"""
self.major_version = self.payload.header.version
part_sizes = part_sizes or collections.defaultdict(int)
manifest = self.payload.manifest
report.AddSection('manifest')
@@ -577,39 +624,45 @@ class PayloadChecker(object):
self._CheckPresentIff(self.sigs_offset, self.sigs_size,
'signatures_offset', 'signatures_size', 'manifest')
# Check: old_kernel_info <==> old_rootfs_info.
oki_msg, oki_report = self._CheckOptionalSubMsg(manifest,
'old_kernel_info', report)
ori_msg, ori_report = self._CheckOptionalSubMsg(manifest,
'old_rootfs_info', report)
self._CheckPresentIff(oki_msg, ori_msg, 'old_kernel_info',
'old_rootfs_info', 'manifest')
if oki_msg: # equivalently, ori_msg
for part in manifest.partitions:
name = part.partition_name
self.old_part_info[name] = self._CheckOptionalSubMsg(
part, 'old_partition_info', report)
self.new_part_info[name] = self._CheckMandatorySubMsg(
part, 'new_partition_info', report, 'manifest.partitions')
# Check: Old-style partition infos should not be specified.
for _, part in common.CROS_PARTITIONS:
self._CheckElemNotPresent(manifest, 'old_%s_info' % part, 'manifest')
self._CheckElemNotPresent(manifest, 'new_%s_info' % part, 'manifest')
# Check: If old_partition_info is specified anywhere, it must be
# specified everywhere.
old_part_msgs = [part.msg for part in self.old_part_info.values() if part]
self._CheckPresentIffMany(old_part_msgs, 'old_partition_info',
'manifest.partitions')
is_delta = any(part and part.msg for part in self.old_part_info.values())
if is_delta:
# Assert/mark delta payload.
if self.payload_type == _TYPE_FULL:
raise error.PayloadError(
'Apparent full payload contains old_{kernel,rootfs}_info.')
self.payload_type = _TYPE_DELTA
# Check: {size, hash} present in old_{kernel,rootfs}_info.
self.old_kernel_fs_size = self._CheckMandatoryField(
oki_msg, 'size', oki_report, 'old_kernel_info')
self._CheckMandatoryField(oki_msg, 'hash', oki_report, 'old_kernel_info',
convert=common.FormatSha256)
self.old_rootfs_fs_size = self._CheckMandatoryField(
ori_msg, 'size', ori_report, 'old_rootfs_info')
self._CheckMandatoryField(ori_msg, 'hash', ori_report, 'old_rootfs_info',
convert=common.FormatSha256)
for part, (msg, part_report) in self.old_part_info.items():
# Check: {size, hash} present in old_{kernel,rootfs}_info.
field = 'old_%s_info' % part
self.old_fs_sizes[part] = self._CheckMandatoryField(msg, 'size',
part_report, field)
self._CheckMandatoryField(msg, 'hash', part_report, field,
convert=common.FormatSha256)
# Check: old_{kernel,rootfs} size must fit in respective partition.
if kernel_part_size and self.old_kernel_fs_size > kernel_part_size:
raise error.PayloadError(
'Old kernel content (%d) exceed partition size (%d).' %
(self.old_kernel_fs_size, kernel_part_size))
if rootfs_part_size and self.old_rootfs_fs_size > rootfs_part_size:
raise error.PayloadError(
'Old rootfs content (%d) exceed partition size (%d).' %
(self.old_rootfs_fs_size, rootfs_part_size))
# Check: old_{kernel,rootfs} size must fit in respective partition.
if self.old_fs_sizes[part] > part_sizes[part] > 0:
raise error.PayloadError(
'Old %s content (%d) exceed partition size (%d).' %
(part, self.old_fs_sizes[part], part_sizes[part]))
else:
# Assert/mark full payload.
if self.payload_type == _TYPE_DELTA:
@@ -617,31 +670,19 @@ class PayloadChecker(object):
'Apparent delta payload missing old_{kernel,rootfs}_info.')
self.payload_type = _TYPE_FULL
# Check: new_kernel_info present; contains {size, hash}.
nki_msg, nki_report = self._CheckMandatorySubMsg(
manifest, 'new_kernel_info', report, 'manifest')
self.new_kernel_fs_size = self._CheckMandatoryField(
nki_msg, 'size', nki_report, 'new_kernel_info')
self._CheckMandatoryField(nki_msg, 'hash', nki_report, 'new_kernel_info',
convert=common.FormatSha256)
# Check: new_{kernel,rootfs}_info present; contains {size, hash}.
for part, (msg, part_report) in self.new_part_info.items():
field = 'new_%s_info' % part
self.new_fs_sizes[part] = self._CheckMandatoryField(msg, 'size',
part_report, field)
self._CheckMandatoryField(msg, 'hash', part_report, field,
convert=common.FormatSha256)
# Check: new_rootfs_info present; contains {size, hash}.
nri_msg, nri_report = self._CheckMandatorySubMsg(
manifest, 'new_rootfs_info', report, 'manifest')
self.new_rootfs_fs_size = self._CheckMandatoryField(
nri_msg, 'size', nri_report, 'new_rootfs_info')
self._CheckMandatoryField(nri_msg, 'hash', nri_report, 'new_rootfs_info',
convert=common.FormatSha256)
# Check: new_{kernel,rootfs} size must fit in respective partition.
if kernel_part_size and self.new_kernel_fs_size > kernel_part_size:
raise error.PayloadError(
'New kernel content (%d) exceed partition size (%d).' %
(self.new_kernel_fs_size, kernel_part_size))
if rootfs_part_size and self.new_rootfs_fs_size > rootfs_part_size:
raise error.PayloadError(
'New rootfs content (%d) exceed partition size (%d).' %
(self.new_rootfs_fs_size, rootfs_part_size))
# Check: new_{kernel,rootfs} size must fit in respective partition.
if self.new_fs_sizes[part] > part_sizes[part] > 0:
raise error.PayloadError(
'New %s content (%d) exceed partition size (%d).' %
(part, self.new_fs_sizes[part], part_sizes[part]))
# Check: minor_version makes sense for the payload type. This check should
# run after the payload type has been set.
@@ -667,8 +708,7 @@ class PayloadChecker(object):
self._CheckBlocksFitLength(length, total_blocks, self.block_size,
'%s: %s' % (op_name, length_name))
def _CheckExtents(self, extents, usable_size, block_counters, name,
allow_pseudo=False, allow_signature=False):
def _CheckExtents(self, extents, usable_size, block_counters, name):
"""Checks a sequence of extents.
Args:
@@ -676,8 +716,6 @@ class PayloadChecker(object):
usable_size: The usable size of the partition to which the extents apply.
block_counters: Array of counters corresponding to the number of blocks.
name: The name of the extent block.
allow_pseudo: Whether or not pseudo block numbers are allowed.
allow_signature: Whether or not the extents are used for a signature.
Returns:
The total number of blocks in the extents.
@@ -698,20 +736,15 @@ class PayloadChecker(object):
if num_blocks == 0:
raise error.PayloadError('%s: extent length is zero.' % ex_name)
if start_block != common.PSEUDO_EXTENT_MARKER:
# Check: Make sure we're within the partition limit.
if usable_size and end_block * self.block_size > usable_size:
raise error.PayloadError(
'%s: extent (%s) exceeds usable partition size (%d).' %
(ex_name, common.FormatExtent(ex, self.block_size), usable_size))
# Check: Make sure we're within the partition limit.
if usable_size and end_block * self.block_size > usable_size:
raise error.PayloadError(
'%s: extent (%s) exceeds usable partition size (%d).' %
(ex_name, common.FormatExtent(ex, self.block_size), usable_size))
# Record block usage.
for i in xrange(start_block, end_block):
block_counters[i] += 1
elif not (allow_pseudo or (allow_signature and len(extents) == 1)):
# Pseudo-extents must be allowed explicitly, or otherwise be part of a
# signature operation (in which case there has to be exactly one).
raise error.PayloadError('%s: unexpected pseudo-extent.' % ex_name)
# Record block usage.
for i in range(start_block, end_block):
block_counters[i] += 1
total_num_blocks += num_blocks
@@ -729,6 +762,11 @@ class PayloadChecker(object):
Raises:
error.PayloadError if any check fails.
"""
# Check: total_dst_blocks is not a floating point.
if isinstance(total_dst_blocks, float):
raise error.PayloadError('%s: contains invalid data type of '
'total_dst_blocks.' % op_name)
# Check: Does not contain src extents.
if op.src_extents:
raise error.PayloadError('%s: contains src_extents.' % op_name)
@@ -742,96 +780,13 @@ class PayloadChecker(object):
self.block_size,
op_name + '.data_length', 'dst')
else:
# Check: data_length must be smaller than the alotted dst blocks.
# Check: data_length must be smaller than the allotted dst blocks.
if data_length >= total_dst_blocks * self.block_size:
raise error.PayloadError(
'%s: data_length (%d) must be less than allotted dst block '
'space (%d * %d).' %
(op_name, data_length, total_dst_blocks, self.block_size))
def _CheckMoveOperation(self, op, data_offset, total_src_blocks,
total_dst_blocks, op_name):
"""Specific checks for MOVE operations.
Args:
op: The operation object from the manifest.
data_offset: The offset of a data blob for the operation.
total_src_blocks: Total number of blocks in src_extents.
total_dst_blocks: Total number of blocks in dst_extents.
op_name: Operation name for error reporting.
Raises:
error.PayloadError if any check fails.
"""
# Check: No data_{offset,length}.
if data_offset is not None:
raise error.PayloadError('%s: contains data_{offset,length}.' % op_name)
# Check: total_src_blocks == total_dst_blocks.
if total_src_blocks != total_dst_blocks:
raise error.PayloadError(
'%s: total src blocks (%d) != total dst blocks (%d).' %
(op_name, total_src_blocks, total_dst_blocks))
# Check: For all i, i-th src block index != i-th dst block index.
i = 0
src_extent_iter = iter(op.src_extents)
dst_extent_iter = iter(op.dst_extents)
src_extent = dst_extent = None
src_idx = src_num = dst_idx = dst_num = 0
while i < total_src_blocks:
# Get the next source extent, if needed.
if not src_extent:
try:
src_extent = src_extent_iter.next()
except StopIteration:
raise error.PayloadError('%s: ran out of src extents (%d/%d).' %
(op_name, i, total_src_blocks))
src_idx = src_extent.start_block
src_num = src_extent.num_blocks
# Get the next dest extent, if needed.
if not dst_extent:
try:
dst_extent = dst_extent_iter.next()
except StopIteration:
raise error.PayloadError('%s: ran out of dst extents (%d/%d).' %
(op_name, i, total_dst_blocks))
dst_idx = dst_extent.start_block
dst_num = dst_extent.num_blocks
# Check: start block is not 0. See crbug/480751; there are still versions
# of update_engine which fail when seeking to 0 in PReadAll and PWriteAll,
# so we need to fail payloads that try to MOVE to/from block 0.
if src_idx == 0 or dst_idx == 0:
raise error.PayloadError(
'%s: MOVE operation cannot have extent with start block 0' %
op_name)
if self.check_move_same_src_dst_block and src_idx == dst_idx:
raise error.PayloadError(
'%s: src/dst block number %d is the same (%d).' %
(op_name, i, src_idx))
advance = min(src_num, dst_num)
i += advance
src_idx += advance
src_num -= advance
if src_num == 0:
src_extent = None
dst_idx += advance
dst_num -= advance
if dst_num == 0:
dst_extent = None
# Make sure we've exhausted all src/dst extents.
if src_extent:
raise error.PayloadError('%s: excess src blocks.' % op_name)
if dst_extent:
raise error.PayloadError('%s: excess dst blocks.' % op_name)
def _CheckZeroOperation(self, op, op_name):
"""Specific checks for ZERO operations.
@@ -851,7 +806,7 @@ class PayloadChecker(object):
raise error.PayloadError('%s: contains data_offset.' % op_name)
def _CheckAnyDiffOperation(self, op, data_length, total_dst_blocks, op_name):
"""Specific checks for BSDIFF, SOURCE_BSDIFF, PUFFDIFF and BROTLI_BSDIFF
"""Specific checks for SOURCE_BSDIFF, PUFFDIFF and BROTLI_BSDIFF
operations.
Args:
@@ -867,7 +822,7 @@ class PayloadChecker(object):
if data_length is None:
raise error.PayloadError('%s: missing data_{offset,length}.' % op_name)
# Check: data_length is strictly smaller than the alotted dst blocks.
# Check: data_length is strictly smaller than the allotted dst blocks.
if data_length >= total_dst_blocks * self.block_size:
raise error.PayloadError(
'%s: data_length (%d) must be smaller than allotted dst space '
@@ -876,8 +831,7 @@ class PayloadChecker(object):
total_dst_blocks * self.block_size))
# Check the existence of src_length and dst_length for legacy bsdiffs.
if (op.type == common.OpType.BSDIFF or
(op.type == common.OpType.SOURCE_BSDIFF and self.minor_version <= 3)):
if op.type == common.OpType.SOURCE_BSDIFF and self.minor_version <= 3:
if not op.HasField('src_length') or not op.HasField('dst_length'):
raise error.PayloadError('%s: require {src,dst}_length.' % op_name)
else:
@@ -926,21 +880,19 @@ class PayloadChecker(object):
if self.minor_version >= 3 and op.src_sha256_hash is None:
raise error.PayloadError('%s: source hash missing.' % op_name)
def _CheckOperation(self, op, op_name, is_last, old_block_counters,
new_block_counters, old_usable_size, new_usable_size,
prev_data_offset, allow_signature, blob_hash_counts):
def _CheckOperation(self, op, op_name, old_block_counters, new_block_counters,
old_usable_size, new_usable_size, prev_data_offset,
blob_hash_counts):
"""Checks a single update operation.
Args:
op: The operation object.
op_name: Operation name string for error reporting.
is_last: Whether this is the last operation in the sequence.
old_block_counters: Arrays of block read counters.
new_block_counters: Arrays of block write counters.
old_usable_size: The overall usable size for src data in bytes.
new_usable_size: The overall usable size for dst data in bytes.
prev_data_offset: Offset of last used data bytes.
allow_signature: Whether this may be a signature operation.
blob_hash_counts: Counters for hashed/unhashed blobs.
Returns:
@@ -952,14 +904,10 @@ class PayloadChecker(object):
# Check extents.
total_src_blocks = self._CheckExtents(
op.src_extents, old_usable_size, old_block_counters,
op_name + '.src_extents', allow_pseudo=True)
allow_signature_in_extents = (allow_signature and is_last and
op.type == common.OpType.REPLACE)
op_name + '.src_extents')
total_dst_blocks = self._CheckExtents(
op.dst_extents, new_usable_size, new_block_counters,
op_name + '.dst_extents',
allow_pseudo=(not self.check_dst_pseudo_extents),
allow_signature=allow_signature_in_extents)
op_name + '.dst_extents')
# Check: data_offset present <==> data_length present.
data_offset = self._CheckOptionalField(op, 'data_offset', None)
@@ -995,9 +943,7 @@ class PayloadChecker(object):
(op_name, common.FormatSha256(op.data_sha256_hash),
common.FormatSha256(actual_hash.digest())))
elif data_offset is not None:
if allow_signature_in_extents:
blob_hash_counts['signature'] += 1
elif self.allow_unhashed:
if self.allow_unhashed:
blob_hash_counts['unhashed'] += 1
else:
raise error.PayloadError('%s: unhashed operation not allowed.' %
@@ -1011,18 +957,11 @@ class PayloadChecker(object):
(op_name, data_offset, prev_data_offset))
# Type-specific checks.
if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ,
common.OpType.REPLACE_XZ):
self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name)
elif op.type == common.OpType.REPLACE_XZ and (self.minor_version >= 3 or
self.major_version >= 2):
self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name)
elif op.type == common.OpType.MOVE and self.minor_version == 1:
self._CheckMoveOperation(op, data_offset, total_src_blocks,
total_dst_blocks, op_name)
elif op.type == common.OpType.ZERO and self.minor_version >= 4:
self._CheckZeroOperation(op, op_name)
elif op.type == common.OpType.BSDIFF and self.minor_version == 1:
self._CheckAnyDiffOperation(op, data_length, total_dst_blocks, op_name)
elif op.type == common.OpType.SOURCE_COPY and self.minor_version >= 2:
self._CheckSourceCopyOperation(data_offset, total_src_blocks,
total_dst_blocks, op_name)
@@ -1044,7 +983,7 @@ class PayloadChecker(object):
def _SizeToNumBlocks(self, size):
"""Returns the number of blocks needed to contain a given byte size."""
return (size + self.block_size - 1) / self.block_size
return (size + self.block_size - 1) // self.block_size
def _AllocBlockCounters(self, total_size):
"""Returns a freshly initialized array of block counters.
@@ -1064,7 +1003,7 @@ class PayloadChecker(object):
def _CheckOperations(self, operations, report, base_name, old_fs_size,
new_fs_size, old_usable_size, new_usable_size,
prev_data_offset, allow_signature):
prev_data_offset):
"""Checks a sequence of update operations.
Args:
@@ -1076,7 +1015,6 @@ class PayloadChecker(object):
old_usable_size: The overall usable size of the old partition in bytes.
new_usable_size: The overall usable size of the new partition in bytes.
prev_data_offset: Offset of last used data bytes.
allow_signature: Whether this sequence may contain signature operations.
Returns:
The total data blob size used.
@@ -1091,9 +1029,7 @@ class PayloadChecker(object):
common.OpType.REPLACE: 0,
common.OpType.REPLACE_BZ: 0,
common.OpType.REPLACE_XZ: 0,
common.OpType.MOVE: 0,
common.OpType.ZERO: 0,
common.OpType.BSDIFF: 0,
common.OpType.SOURCE_COPY: 0,
common.OpType.SOURCE_BSDIFF: 0,
common.OpType.PUFFDIFF: 0,
@@ -1104,8 +1040,6 @@ class PayloadChecker(object):
common.OpType.REPLACE: 0,
common.OpType.REPLACE_BZ: 0,
common.OpType.REPLACE_XZ: 0,
# MOVE operations don't have blobs.
common.OpType.BSDIFF: 0,
# SOURCE_COPY operations don't have blobs.
common.OpType.SOURCE_BSDIFF: 0,
common.OpType.PUFFDIFF: 0,
@@ -1116,8 +1050,6 @@ class PayloadChecker(object):
'hashed': 0,
'unhashed': 0,
}
if allow_signature:
blob_hash_counts['signature'] = 0
# Allocate old and new block counters.
old_block_counters = (self._AllocBlockCounters(old_usable_size)
@@ -1130,16 +1062,14 @@ class PayloadChecker(object):
op_num += 1
# Check: Type is valid.
if op.type not in op_counts.keys():
if op.type not in op_counts:
raise error.PayloadError('%s: invalid type (%d).' % (op_name, op.type))
op_counts[op.type] += 1
is_last = op_num == len(operations)
curr_data_used = self._CheckOperation(
op, op_name, is_last, old_block_counters, new_block_counters,
op, op_name, old_block_counters, new_block_counters,
old_usable_size, new_usable_size,
prev_data_offset + total_data_used, allow_signature,
blob_hash_counts)
prev_data_offset + total_data_used, blob_hash_counts)
if curr_data_used:
op_blob_totals[op.type] += curr_data_used
total_data_used += curr_data_used
@@ -1193,18 +1123,17 @@ class PayloadChecker(object):
if not sigs.signatures:
raise error.PayloadError('Signature block is empty.')
last_ops_section = (self.payload.manifest.kernel_install_operations or
self.payload.manifest.install_operations)
fake_sig_op = last_ops_section[-1]
# Check: signatures_{offset,size} must match the last (fake) operation.
if not (fake_sig_op.type == common.OpType.REPLACE and
self.sigs_offset == fake_sig_op.data_offset and
self.sigs_size == fake_sig_op.data_length):
raise error.PayloadError(
'Signatures_{offset,size} (%d+%d) does not match last operation '
'(%d+%d).' %
(self.sigs_offset, self.sigs_size, fake_sig_op.data_offset,
fake_sig_op.data_length))
# Check that we don't have the signature operation blob at the end (used to
# be for major version 1).
last_partition = self.payload.manifest.partitions[-1]
if last_partition.operations:
last_op = last_partition.operations[-1]
# Check: signatures_{offset,size} must match the last (fake) operation.
if (last_op.type == common.OpType.REPLACE and
last_op.data_offset == self.sigs_offset and
last_op.data_length == self.sigs_size):
raise error.PayloadError('It seems like the last operation is the '
'signature blob. This is an invalid payload.')
# Compute the checksum of all data up to signature blob.
# TODO(garnold) we're re-reading the whole data section into a string
@@ -1231,17 +1160,16 @@ class PayloadChecker(object):
raise error.PayloadError('Unknown signature version (%d).' %
sig.version)
def Run(self, pubkey_file_name=None, metadata_sig_file=None,
rootfs_part_size=0, kernel_part_size=0, report_out_file=None):
def Run(self, pubkey_file_name=None, metadata_sig_file=None, metadata_size=0,
part_sizes=None, report_out_file=None):
"""Checker entry point, invoking all checks.
Args:
pubkey_file_name: Public key used for signature verification.
metadata_sig_file: Metadata signature, if verification is desired.
rootfs_part_size: The size of rootfs partitions in bytes (default: infer
based on payload type and version).
kernel_part_size: The size of kernel partitions in bytes (default: use
reported filesystem size).
metadata_size: Metadata size, if verification is desired.
part_sizes: Mapping of partition label to size in bytes (default: infer
based on payload type and version or filesystem).
report_out_file: File object to dump the report to.
Raises:
@@ -1258,6 +1186,12 @@ class PayloadChecker(object):
self.payload.ResetFile()
try:
# Check metadata_size (if provided).
if metadata_size and self.payload.metadata_size != metadata_size:
raise error.PayloadError('Invalid payload metadata size in payload(%d) '
'vs given(%d)' % (self.payload.metadata_size,
metadata_size))
# Check metadata signature (if provided).
if metadata_sig_file:
metadata_sig = base64.b64decode(metadata_sig_file.read())
@@ -1268,65 +1202,60 @@ class PayloadChecker(object):
# Part 1: Check the file header.
report.AddSection('header')
# Check: Payload version is valid.
if self.payload.header.version != 1:
if self.payload.header.version not in (1, 2):
raise error.PayloadError('Unknown payload version (%d).' %
self.payload.header.version)
report.AddField('version', self.payload.header.version)
report.AddField('manifest len', self.payload.header.manifest_len)
# Part 2: Check the manifest.
self._CheckManifest(report, rootfs_part_size, kernel_part_size)
self._CheckManifest(report, part_sizes)
assert self.payload_type, 'payload type should be known by now'
# Infer the usable partition size when validating rootfs operations:
# - If rootfs partition size was provided, use that.
# - Otherwise, if this is an older delta (minor version < 2), stick with
# a known constant size. This is necessary because older deltas may
# exceed the filesystem size when moving data blocks around.
# - Otherwise, use the encoded filesystem size.
new_rootfs_usable_size = self.new_rootfs_fs_size
old_rootfs_usable_size = self.old_rootfs_fs_size
if rootfs_part_size:
new_rootfs_usable_size = rootfs_part_size
old_rootfs_usable_size = rootfs_part_size
elif self.payload_type == _TYPE_DELTA and self.minor_version in (None, 1):
new_rootfs_usable_size = _OLD_DELTA_USABLE_PART_SIZE
old_rootfs_usable_size = _OLD_DELTA_USABLE_PART_SIZE
# Make sure deprecated values are not present in the payload.
for field in ('install_operations', 'kernel_install_operations'):
self._CheckRepeatedElemNotPresent(self.payload.manifest, field,
'manifest')
for field in ('old_kernel_info', 'old_rootfs_info',
'new_kernel_info', 'new_rootfs_info'):
self._CheckElemNotPresent(self.payload.manifest, field, 'manifest')
# Part 3: Examine rootfs operations.
# TODO(garnold)(chromium:243559) only default to the filesystem size if
# no explicit size provided *and* the partition size is not embedded in
# the payload; see issue for more details.
report.AddSection('rootfs operations')
total_blob_size = self._CheckOperations(
self.payload.manifest.install_operations, report,
'install_operations', self.old_rootfs_fs_size,
self.new_rootfs_fs_size, old_rootfs_usable_size,
new_rootfs_usable_size, 0, False)
total_blob_size = 0
for part, operations in ((p.partition_name, p.operations)
for p in self.payload.manifest.partitions):
report.AddSection('%s operations' % part)
# Part 4: Examine kernel operations.
# TODO(garnold)(chromium:243559) as above.
report.AddSection('kernel operations')
total_blob_size += self._CheckOperations(
self.payload.manifest.kernel_install_operations, report,
'kernel_install_operations', self.old_kernel_fs_size,
self.new_kernel_fs_size,
kernel_part_size if kernel_part_size else self.old_kernel_fs_size,
kernel_part_size if kernel_part_size else self.new_kernel_fs_size,
total_blob_size, True)
new_fs_usable_size = self.new_fs_sizes[part]
old_fs_usable_size = self.old_fs_sizes[part]
if part_sizes is not None and part_sizes.get(part, None):
new_fs_usable_size = old_fs_usable_size = part_sizes[part]
# TODO(chromium:243559) only default to the filesystem size if no
# explicit size provided *and* the partition size is not embedded in the
# payload; see issue for more details.
total_blob_size += self._CheckOperations(
operations, report, '%s_install_operations' % part,
self.old_fs_sizes[part], self.new_fs_sizes[part],
old_fs_usable_size, new_fs_usable_size, total_blob_size)
# Check: Operations data reach the end of the payload file.
used_payload_size = self.payload.data_offset + total_blob_size
# Major versions 2 and higher have a signature at the end, so it should be
# considered in the total size of the image.
if self.sigs_size:
used_payload_size += self.sigs_size
if used_payload_size != payload_file_size:
raise error.PayloadError(
'Used payload size (%d) different from actual file size (%d).' %
(used_payload_size, payload_file_size))
# Part 5: Handle payload signatures message.
# Part 4: Handle payload signatures message.
if self.check_payload_sig and self.sigs_size:
self._CheckSignatures(report, pubkey_file_name)
# Part 6: Summary.
# Part 5: Summary.
report.AddSection('summary')
report.AddField('update type', self.payload_type)

View File

@@ -1,4 +1,4 @@
#!/usr/bin/python2
#!/usr/bin/env python
#
# Copyright (C) 2013 The Android Open Source Project
#
@@ -17,35 +17,36 @@
"""Unit testing checker.py."""
from __future__ import print_function
# Disable check for function names to avoid errors based on old code
# pylint: disable-msg=invalid-name
from __future__ import absolute_import
import array
import collections
import cStringIO
import hashlib
import io
import itertools
import os
import unittest
# pylint cannot find mox.
# pylint: disable=F0401
import mox
from six.moves import zip
import mock # pylint: disable=import-error
from update_payload import checker
from update_payload import common
from update_payload import test_utils
from update_payload import update_metadata_pb2
from update_payload.error import PayloadError
from update_payload.payload import Payload # Avoid name conflicts later.
from update_payload.payload import Payload # Avoid name conflicts later.
def _OpTypeByName(op_name):
"""Returns the type of an operation from itsname."""
"""Returns the type of an operation from its name."""
op_name_to_type = {
'REPLACE': common.OpType.REPLACE,
'REPLACE_BZ': common.OpType.REPLACE_BZ,
'MOVE': common.OpType.MOVE,
'BSDIFF': common.OpType.BSDIFF,
'SOURCE_COPY': common.OpType.SOURCE_COPY,
'SOURCE_BSDIFF': common.OpType.SOURCE_BSDIFF,
'ZERO': common.OpType.ZERO,
@@ -65,7 +66,7 @@ def _GetPayloadChecker(payload_gen_write_to_file_func, payload_gen_dargs=None,
if checker_init_dargs is None:
checker_init_dargs = {}
payload_file = cStringIO.StringIO()
payload_file = io.BytesIO()
payload_gen_write_to_file_func(payload_file, **payload_gen_dargs)
payload_file.seek(0)
payload = Payload(payload_file)
@@ -75,7 +76,7 @@ def _GetPayloadChecker(payload_gen_write_to_file_func, payload_gen_dargs=None,
def _GetPayloadCheckerWithData(payload_gen):
"""Returns a payload checker from a given payload generator."""
payload_file = cStringIO.StringIO()
payload_file = io.BytesIO()
payload_gen.WriteToFile(payload_file)
payload_file.seek(0)
payload = Payload(payload_file)
@@ -89,7 +90,7 @@ def _GetPayloadCheckerWithData(payload_gen):
# pylint: disable=W0212
# Don't bark about missing members of classes you cannot import.
# pylint: disable=E1101
class PayloadCheckerTest(mox.MoxTestBase):
class PayloadCheckerTest(unittest.TestCase):
"""Tests the PayloadChecker class.
In addition to ordinary testFoo() methods, which are automatically invoked by
@@ -102,11 +103,42 @@ class PayloadCheckerTest(mox.MoxTestBase):
all such tests is done in AddAllParametricTests().
"""
def setUp(self):
"""setUp function for unittest testcase"""
self.mock_checks = []
def tearDown(self):
"""tearDown function for unittest testcase"""
# Verify that all mock functions were called.
for check in self.mock_checks:
check.mock_fn.assert_called_once_with(*check.exp_args, **check.exp_kwargs)
class MockChecksAtTearDown(object):
"""Mock data storage.
This class stores the mock functions and its arguments to be checked at a
later point.
"""
def __init__(self, mock_fn, *args, **kwargs):
self.mock_fn = mock_fn
self.exp_args = args
self.exp_kwargs = kwargs
def addPostCheckForMockFunction(self, mock_fn, *args, **kwargs):
"""Store a mock function and its arguments to self.mock_checks
Args:
mock_fn: mock function object
args: expected positional arguments for the mock_fn
kwargs: expected named arguments for the mock_fn
"""
self.mock_checks.append(self.MockChecksAtTearDown(mock_fn, *args, **kwargs))
def MockPayload(self):
"""Create a mock payload object, complete with a mock manifest."""
payload = self.mox.CreateMock(Payload)
payload = mock.create_autospec(Payload)
payload.is_init = True
payload.manifest = self.mox.CreateMock(
payload.manifest = mock.create_autospec(
update_metadata_pb2.DeltaArchiveManifest)
return payload
@@ -175,19 +207,20 @@ class PayloadCheckerTest(mox.MoxTestBase):
subreport = 'fake subreport'
# Create a mock message.
msg = self.mox.CreateMock(update_metadata_pb2._message.Message)
msg.HasField(name).AndReturn(is_present)
msg = mock.create_autospec(update_metadata_pb2._message.Message)
self.addPostCheckForMockFunction(msg.HasField, name)
msg.HasField.return_value = is_present
setattr(msg, name, val)
# Create a mock report.
report = self.mox.CreateMock(checker._PayloadReport)
report = mock.create_autospec(checker._PayloadReport)
if is_present:
if is_submsg:
report.AddSubReport(name).AndReturn(subreport)
self.addPostCheckForMockFunction(report.AddSubReport, name)
report.AddSubReport.return_value = subreport
else:
report.AddField(name, convert(val), linebreak=linebreak, indent=indent)
self.addPostCheckForMockFunction(report.AddField, name, convert(val),
linebreak=linebreak, indent=indent)
self.mox.ReplayAll()
return (msg, report, subreport, name, val)
def DoAddElemTest(self, is_present, is_mandatory, is_submsg, convert,
@@ -213,9 +246,9 @@ class PayloadCheckerTest(mox.MoxTestBase):
else:
ret_val, ret_subreport = checker.PayloadChecker._CheckElem(*args,
**kwargs)
self.assertEquals(val if is_present else None, ret_val)
self.assertEquals(subreport if is_present and is_submsg else None,
ret_subreport)
self.assertEqual(val if is_present else None, ret_val)
self.assertEqual(subreport if is_present and is_submsg else None,
ret_subreport)
def DoAddFieldTest(self, is_mandatory, is_present, convert, linebreak,
indent):
@@ -245,7 +278,7 @@ class PayloadCheckerTest(mox.MoxTestBase):
self.assertRaises(PayloadError, tested_func, *args, **kwargs)
else:
ret_val = tested_func(*args, **kwargs)
self.assertEquals(val if is_present else None, ret_val)
self.assertEqual(val if is_present else None, ret_val)
def DoAddSubMsgTest(self, is_mandatory, is_present):
"""Parametrized testing of _Check{Mandatory,Optional}SubMsg().
@@ -269,8 +302,8 @@ class PayloadCheckerTest(mox.MoxTestBase):
self.assertRaises(PayloadError, tested_func, *args)
else:
ret_val, ret_subreport = tested_func(*args)
self.assertEquals(val if is_present else None, ret_val)
self.assertEquals(subreport if is_present else None, ret_subreport)
self.assertEqual(val if is_present else None, ret_val)
self.assertEqual(subreport if is_present else None, ret_subreport)
def testCheckPresentIff(self):
"""Tests _CheckPresentIff()."""
@@ -296,15 +329,14 @@ class PayloadCheckerTest(mox.MoxTestBase):
returned_signed_hash: The signed hash data retuned by openssl.
expected_signed_hash: The signed hash data to compare against.
"""
try:
# Stub out the subprocess invocation.
self.mox.StubOutWithMock(checker.PayloadChecker, '_Run')
# Stub out the subprocess invocation.
with mock.patch.object(checker.PayloadChecker, '_Run') \
as mock_payload_checker:
if expect_subprocess_call:
checker.PayloadChecker._Run(
mox.IsA(list), send_data=sig_data).AndReturn(
(sig_asn1_header + returned_signed_hash, None))
mock_payload_checker([], send_data=sig_data)
mock_payload_checker.return_value = (
sig_asn1_header + returned_signed_hash, None)
self.mox.ReplayAll()
if expect_pass:
self.assertIsNone(checker.PayloadChecker._CheckSha256Signature(
sig_data, 'foo', expected_signed_hash, 'bar'))
@@ -312,13 +344,11 @@ class PayloadCheckerTest(mox.MoxTestBase):
self.assertRaises(PayloadError,
checker.PayloadChecker._CheckSha256Signature,
sig_data, 'foo', expected_signed_hash, 'bar')
finally:
self.mox.UnsetStubs()
def testCheckSha256Signature_Pass(self):
"""Tests _CheckSha256Signature(); pass case."""
sig_data = 'fake-signature'.ljust(256)
signed_hash = hashlib.sha256('fake-data').digest()
signed_hash = hashlib.sha256(b'fake-data').digest()
self.DoCheckSha256SignatureTest(True, True, sig_data,
common.SIG_ASN1_HEADER, signed_hash,
signed_hash)
@@ -326,7 +356,7 @@ class PayloadCheckerTest(mox.MoxTestBase):
def testCheckSha256Signature_FailBadSignature(self):
"""Tests _CheckSha256Signature(); fails due to malformed signature."""
sig_data = 'fake-signature' # Malformed (not 256 bytes in length).
signed_hash = hashlib.sha256('fake-data').digest()
signed_hash = hashlib.sha256(b'fake-data').digest()
self.DoCheckSha256SignatureTest(False, False, sig_data,
common.SIG_ASN1_HEADER, signed_hash,
signed_hash)
@@ -334,7 +364,7 @@ class PayloadCheckerTest(mox.MoxTestBase):
def testCheckSha256Signature_FailBadOutputLength(self):
"""Tests _CheckSha256Signature(); fails due to unexpected output length."""
sig_data = 'fake-signature'.ljust(256)
signed_hash = 'fake-hash' # Malformed (not 32 bytes in length).
signed_hash = b'fake-hash' # Malformed (not 32 bytes in length).
self.DoCheckSha256SignatureTest(False, True, sig_data,
common.SIG_ASN1_HEADER, signed_hash,
signed_hash)
@@ -342,16 +372,16 @@ class PayloadCheckerTest(mox.MoxTestBase):
def testCheckSha256Signature_FailBadAsnHeader(self):
"""Tests _CheckSha256Signature(); fails due to bad ASN1 header."""
sig_data = 'fake-signature'.ljust(256)
signed_hash = hashlib.sha256('fake-data').digest()
bad_asn1_header = 'bad-asn-header'.ljust(len(common.SIG_ASN1_HEADER))
signed_hash = hashlib.sha256(b'fake-data').digest()
bad_asn1_header = b'bad-asn-header'.ljust(len(common.SIG_ASN1_HEADER))
self.DoCheckSha256SignatureTest(False, True, sig_data, bad_asn1_header,
signed_hash, signed_hash)
def testCheckSha256Signature_FailBadHash(self):
"""Tests _CheckSha256Signature(); fails due to bad hash returned."""
sig_data = 'fake-signature'.ljust(256)
expected_signed_hash = hashlib.sha256('fake-data').digest()
returned_signed_hash = hashlib.sha256('bad-fake-data').digest()
expected_signed_hash = hashlib.sha256(b'fake-data').digest()
returned_signed_hash = hashlib.sha256(b'bad-fake-data').digest()
self.DoCheckSha256SignatureTest(False, True, sig_data,
common.SIG_ASN1_HEADER,
expected_signed_hash, returned_signed_hash)
@@ -429,10 +459,10 @@ class PayloadCheckerTest(mox.MoxTestBase):
payload_gen.SetBlockSize(test_utils.KiB(4))
# Add some operations.
payload_gen.AddOperation(False, common.OpType.MOVE,
payload_gen.AddOperation(common.ROOTFS, common.OpType.SOURCE_COPY,
src_extents=[(0, 16), (16, 497)],
dst_extents=[(16, 496), (0, 16)])
payload_gen.AddOperation(True, common.OpType.MOVE,
payload_gen.AddOperation(common.KERNEL, common.OpType.SOURCE_COPY,
src_extents=[(0, 8), (8, 8)],
dst_extents=[(8, 8), (0, 8)])
@@ -457,21 +487,23 @@ class PayloadCheckerTest(mox.MoxTestBase):
# Add old kernel/rootfs partition info, as required.
if fail_mismatched_oki_ori or fail_old_kernel_fs_size or fail_bad_oki:
oki_hash = (None if fail_bad_oki
else hashlib.sha256('fake-oki-content').digest())
payload_gen.SetPartInfo(True, False, old_kernel_fs_size, oki_hash)
else hashlib.sha256(b'fake-oki-content').digest())
payload_gen.SetPartInfo(common.KERNEL, False, old_kernel_fs_size,
oki_hash)
if not fail_mismatched_oki_ori and (fail_old_rootfs_fs_size or
fail_bad_ori):
ori_hash = (None if fail_bad_ori
else hashlib.sha256('fake-ori-content').digest())
payload_gen.SetPartInfo(False, False, old_rootfs_fs_size, ori_hash)
else hashlib.sha256(b'fake-ori-content').digest())
payload_gen.SetPartInfo(common.ROOTFS, False, old_rootfs_fs_size,
ori_hash)
# Add new kernel/rootfs partition info.
payload_gen.SetPartInfo(
True, True, new_kernel_fs_size,
None if fail_bad_nki else hashlib.sha256('fake-nki-content').digest())
common.KERNEL, True, new_kernel_fs_size,
None if fail_bad_nki else hashlib.sha256(b'fake-nki-content').digest())
payload_gen.SetPartInfo(
False, True, new_rootfs_fs_size,
None if fail_bad_nri else hashlib.sha256('fake-nri-content').digest())
common.ROOTFS, True, new_rootfs_fs_size,
None if fail_bad_nri else hashlib.sha256(b'fake-nri-content').digest())
# Set the minor version.
payload_gen.SetMinorVersion(0)
@@ -485,13 +517,16 @@ class PayloadCheckerTest(mox.MoxTestBase):
fail_bad_nki or fail_bad_nri or fail_old_kernel_fs_size or
fail_old_rootfs_fs_size or fail_new_kernel_fs_size or
fail_new_rootfs_fs_size)
part_sizes = {
common.ROOTFS: rootfs_part_size,
common.KERNEL: kernel_part_size
}
if should_fail:
self.assertRaises(PayloadError, payload_checker._CheckManifest, report,
rootfs_part_size, kernel_part_size)
part_sizes)
else:
self.assertIsNone(payload_checker._CheckManifest(report,
rootfs_part_size,
kernel_part_size))
self.assertIsNone(payload_checker._CheckManifest(report, part_sizes))
def testCheckLength(self):
"""Tests _CheckLength()."""
@@ -515,28 +550,11 @@ class PayloadCheckerTest(mox.MoxTestBase):
# Passes w/ all real extents.
extents = self.NewExtentList((0, 4), (8, 3), (1024, 16))
self.assertEquals(
self.assertEqual(
23,
payload_checker._CheckExtents(extents, (1024 + 16) * block_size,
collections.defaultdict(int), 'foo'))
# Passes w/ pseudo-extents (aka sparse holes).
extents = self.NewExtentList((0, 4), (common.PSEUDO_EXTENT_MARKER, 5),
(8, 3))
self.assertEquals(
12,
payload_checker._CheckExtents(extents, (1024 + 16) * block_size,
collections.defaultdict(int), 'foo',
allow_pseudo=True))
# Passes w/ pseudo-extent due to a signature.
extents = self.NewExtentList((common.PSEUDO_EXTENT_MARKER, 2))
self.assertEquals(
2,
payload_checker._CheckExtents(extents, (1024 + 16) * block_size,
collections.defaultdict(int), 'foo',
allow_signature=True))
# Fails, extent missing a start block.
extents = self.NewExtentList((-1, 4), (8, 3), (1024, 16))
self.assertRaises(
@@ -567,34 +585,34 @@ class PayloadCheckerTest(mox.MoxTestBase):
block_size = payload_checker.block_size
data_length = 10000
op = self.mox.CreateMock(
update_metadata_pb2.InstallOperation)
op = mock.create_autospec(update_metadata_pb2.InstallOperation)
op.type = common.OpType.REPLACE
# Pass.
op.src_extents = []
self.assertIsNone(
payload_checker._CheckReplaceOperation(
op, data_length, (data_length + block_size - 1) / block_size,
op, data_length, (data_length + block_size - 1) // block_size,
'foo'))
# Fail, src extents founds.
op.src_extents = ['bar']
self.assertRaises(
PayloadError, payload_checker._CheckReplaceOperation,
op, data_length, (data_length + block_size - 1) / block_size, 'foo')
op, data_length, (data_length + block_size - 1) // block_size, 'foo')
# Fail, missing data.
op.src_extents = []
self.assertRaises(
PayloadError, payload_checker._CheckReplaceOperation,
op, None, (data_length + block_size - 1) / block_size, 'foo')
op, None, (data_length + block_size - 1) // block_size, 'foo')
# Fail, length / block number mismatch.
op.src_extents = ['bar']
self.assertRaises(
PayloadError, payload_checker._CheckReplaceOperation,
op, data_length, (data_length + block_size - 1) / block_size + 1, 'foo')
op, data_length, (data_length + block_size - 1) // block_size + 1,
'foo')
def testCheckReplaceBzOperation(self):
"""Tests _CheckReplaceOperation() where op.type == REPLACE_BZ."""
@@ -602,7 +620,7 @@ class PayloadCheckerTest(mox.MoxTestBase):
block_size = payload_checker.block_size
data_length = block_size * 3
op = self.mox.CreateMock(
op = mock.create_autospec(
update_metadata_pb2.InstallOperation)
op.type = common.OpType.REPLACE_BZ
@@ -610,23 +628,30 @@ class PayloadCheckerTest(mox.MoxTestBase):
op.src_extents = []
self.assertIsNone(
payload_checker._CheckReplaceOperation(
op, data_length, (data_length + block_size - 1) / block_size + 5,
op, data_length, (data_length + block_size - 1) // block_size + 5,
'foo'))
# Fail, src extents founds.
op.src_extents = ['bar']
self.assertRaises(
PayloadError, payload_checker._CheckReplaceOperation,
op, data_length, (data_length + block_size - 1) / block_size + 5, 'foo')
op, data_length, (data_length + block_size - 1) // block_size + 5,
'foo')
# Fail, missing data.
op.src_extents = []
self.assertRaises(
PayloadError, payload_checker._CheckReplaceOperation,
op, None, (data_length + block_size - 1) / block_size, 'foo')
op, None, (data_length + block_size - 1) // block_size, 'foo')
# Fail, too few blocks to justify BZ.
op.src_extents = []
self.assertRaises(
PayloadError, payload_checker._CheckReplaceOperation,
op, data_length, (data_length + block_size - 1) // block_size, 'foo')
# Fail, total_dst_blocks is a floating point value.
op.src_extents = []
self.assertRaises(
PayloadError, payload_checker._CheckReplaceOperation,
op, data_length, (data_length + block_size - 1) / block_size, 'foo')
@@ -637,7 +662,7 @@ class PayloadCheckerTest(mox.MoxTestBase):
block_size = payload_checker.block_size
data_length = block_size * 3
op = self.mox.CreateMock(
op = mock.create_autospec(
update_metadata_pb2.InstallOperation)
op.type = common.OpType.REPLACE_XZ
@@ -645,153 +670,34 @@ class PayloadCheckerTest(mox.MoxTestBase):
op.src_extents = []
self.assertIsNone(
payload_checker._CheckReplaceOperation(
op, data_length, (data_length + block_size - 1) / block_size + 5,
op, data_length, (data_length + block_size - 1) // block_size + 5,
'foo'))
# Fail, src extents founds.
op.src_extents = ['bar']
self.assertRaises(
PayloadError, payload_checker._CheckReplaceOperation,
op, data_length, (data_length + block_size - 1) / block_size + 5, 'foo')
op, data_length, (data_length + block_size - 1) // block_size + 5,
'foo')
# Fail, missing data.
op.src_extents = []
self.assertRaises(
PayloadError, payload_checker._CheckReplaceOperation,
op, None, (data_length + block_size - 1) / block_size, 'foo')
op, None, (data_length + block_size - 1) // block_size, 'foo')
# Fail, too few blocks to justify XZ.
op.src_extents = []
self.assertRaises(
PayloadError, payload_checker._CheckReplaceOperation,
op, data_length, (data_length + block_size - 1) // block_size, 'foo')
# Fail, total_dst_blocks is a floating point value.
op.src_extents = []
self.assertRaises(
PayloadError, payload_checker._CheckReplaceOperation,
op, data_length, (data_length + block_size - 1) / block_size, 'foo')
def testCheckMoveOperation_Pass(self):
"""Tests _CheckMoveOperation(); pass case."""
payload_checker = checker.PayloadChecker(self.MockPayload())
op = update_metadata_pb2.InstallOperation()
op.type = common.OpType.MOVE
self.AddToMessage(op.src_extents,
self.NewExtentList((1, 4), (12, 2), (1024, 128)))
self.AddToMessage(op.dst_extents,
self.NewExtentList((16, 128), (512, 6)))
self.assertIsNone(
payload_checker._CheckMoveOperation(op, None, 134, 134, 'foo'))
def testCheckMoveOperation_FailContainsData(self):
"""Tests _CheckMoveOperation(); fails, message contains data."""
payload_checker = checker.PayloadChecker(self.MockPayload())
op = update_metadata_pb2.InstallOperation()
op.type = common.OpType.MOVE
self.AddToMessage(op.src_extents,
self.NewExtentList((1, 4), (12, 2), (1024, 128)))
self.AddToMessage(op.dst_extents,
self.NewExtentList((16, 128), (512, 6)))
self.assertRaises(
PayloadError, payload_checker._CheckMoveOperation,
op, 1024, 134, 134, 'foo')
def testCheckMoveOperation_FailInsufficientSrcBlocks(self):
"""Tests _CheckMoveOperation(); fails, not enough actual src blocks."""
payload_checker = checker.PayloadChecker(self.MockPayload())
op = update_metadata_pb2.InstallOperation()
op.type = common.OpType.MOVE
self.AddToMessage(op.src_extents,
self.NewExtentList((1, 4), (12, 2), (1024, 127)))
self.AddToMessage(op.dst_extents,
self.NewExtentList((16, 128), (512, 6)))
self.assertRaises(
PayloadError, payload_checker._CheckMoveOperation,
op, None, 134, 134, 'foo')
def testCheckMoveOperation_FailInsufficientDstBlocks(self):
"""Tests _CheckMoveOperation(); fails, not enough actual dst blocks."""
payload_checker = checker.PayloadChecker(self.MockPayload())
op = update_metadata_pb2.InstallOperation()
op.type = common.OpType.MOVE
self.AddToMessage(op.src_extents,
self.NewExtentList((1, 4), (12, 2), (1024, 128)))
self.AddToMessage(op.dst_extents,
self.NewExtentList((16, 128), (512, 5)))
self.assertRaises(
PayloadError, payload_checker._CheckMoveOperation,
op, None, 134, 134, 'foo')
def testCheckMoveOperation_FailExcessSrcBlocks(self):
"""Tests _CheckMoveOperation(); fails, too many actual src blocks."""
payload_checker = checker.PayloadChecker(self.MockPayload())
op = update_metadata_pb2.InstallOperation()
op.type = common.OpType.MOVE
self.AddToMessage(op.src_extents,
self.NewExtentList((1, 4), (12, 2), (1024, 128)))
self.AddToMessage(op.dst_extents,
self.NewExtentList((16, 128), (512, 5)))
self.assertRaises(
PayloadError, payload_checker._CheckMoveOperation,
op, None, 134, 134, 'foo')
self.AddToMessage(op.src_extents,
self.NewExtentList((1, 4), (12, 2), (1024, 129)))
self.AddToMessage(op.dst_extents,
self.NewExtentList((16, 128), (512, 6)))
self.assertRaises(
PayloadError, payload_checker._CheckMoveOperation,
op, None, 134, 134, 'foo')
def testCheckMoveOperation_FailExcessDstBlocks(self):
"""Tests _CheckMoveOperation(); fails, too many actual dst blocks."""
payload_checker = checker.PayloadChecker(self.MockPayload())
op = update_metadata_pb2.InstallOperation()
op.type = common.OpType.MOVE
self.AddToMessage(op.src_extents,
self.NewExtentList((1, 4), (12, 2), (1024, 128)))
self.AddToMessage(op.dst_extents,
self.NewExtentList((16, 128), (512, 7)))
self.assertRaises(
PayloadError, payload_checker._CheckMoveOperation,
op, None, 134, 134, 'foo')
def testCheckMoveOperation_FailStagnantBlocks(self):
"""Tests _CheckMoveOperation(); fails, there are blocks that do not move."""
payload_checker = checker.PayloadChecker(self.MockPayload())
op = update_metadata_pb2.InstallOperation()
op.type = common.OpType.MOVE
self.AddToMessage(op.src_extents,
self.NewExtentList((1, 4), (12, 2), (1024, 128)))
self.AddToMessage(op.dst_extents,
self.NewExtentList((8, 128), (512, 6)))
self.assertRaises(
PayloadError, payload_checker._CheckMoveOperation,
op, None, 134, 134, 'foo')
def testCheckMoveOperation_FailZeroStartBlock(self):
"""Tests _CheckMoveOperation(); fails, has extent with start block 0."""
payload_checker = checker.PayloadChecker(self.MockPayload())
op = update_metadata_pb2.InstallOperation()
op.type = common.OpType.MOVE
self.AddToMessage(op.src_extents,
self.NewExtentList((0, 4), (12, 2), (1024, 128)))
self.AddToMessage(op.dst_extents,
self.NewExtentList((8, 128), (512, 6)))
self.assertRaises(
PayloadError, payload_checker._CheckMoveOperation,
op, None, 134, 134, 'foo')
self.AddToMessage(op.src_extents,
self.NewExtentList((1, 4), (12, 2), (1024, 128)))
self.AddToMessage(op.dst_extents,
self.NewExtentList((0, 128), (512, 6)))
self.assertRaises(
PayloadError, payload_checker._CheckMoveOperation,
op, None, 134, 134, 'foo')
def testCheckAnyDiff(self):
"""Tests _CheckAnyDiffOperation()."""
payload_checker = checker.PayloadChecker(self.MockPayload())
@@ -829,8 +735,8 @@ class PayloadCheckerTest(mox.MoxTestBase):
self.assertRaises(PayloadError, payload_checker._CheckSourceCopyOperation,
None, 0, 1, 'foo')
def DoCheckOperationTest(self, op_type_name, is_last, allow_signature,
allow_unhashed, fail_src_extents, fail_dst_extents,
def DoCheckOperationTest(self, op_type_name, allow_unhashed,
fail_src_extents, fail_dst_extents,
fail_mismatched_data_offset_length,
fail_missing_dst_extents, fail_src_length,
fail_dst_length, fail_data_hash,
@@ -838,10 +744,8 @@ class PayloadCheckerTest(mox.MoxTestBase):
"""Parametric testing of _CheckOperation().
Args:
op_type_name: 'REPLACE', 'REPLACE_BZ', 'REPLACE_XZ', 'MOVE', 'BSDIFF',
op_type_name: 'REPLACE', 'REPLACE_BZ', 'REPLACE_XZ',
'SOURCE_COPY', 'SOURCE_BSDIFF', BROTLI_BSDIFF or 'PUFFDIFF'.
is_last: Whether we're testing the last operation in a sequence.
allow_signature: Whether we're testing a signature-capable operation.
allow_unhashed: Whether we're allowing to not hash the data.
fail_src_extents: Tamper with src extents.
fail_dst_extents: Tamper with dst extents.
@@ -866,9 +770,9 @@ class PayloadCheckerTest(mox.MoxTestBase):
old_part_size = test_utils.MiB(4)
new_part_size = test_utils.MiB(8)
old_block_counters = array.array(
'B', [0] * ((old_part_size + block_size - 1) / block_size))
'B', [0] * ((old_part_size + block_size - 1) // block_size))
new_block_counters = array.array(
'B', [0] * ((new_part_size + block_size - 1) / block_size))
'B', [0] * ((new_part_size + block_size - 1) // block_size))
prev_data_offset = 1876
blob_hash_counts = collections.defaultdict(int)
@@ -877,8 +781,7 @@ class PayloadCheckerTest(mox.MoxTestBase):
op.type = op_type
total_src_blocks = 0
if op_type in (common.OpType.MOVE, common.OpType.BSDIFF,
common.OpType.SOURCE_COPY, common.OpType.SOURCE_BSDIFF,
if op_type in (common.OpType.SOURCE_COPY, common.OpType.SOURCE_BSDIFF,
common.OpType.PUFFDIFF, common.OpType.BROTLI_BSDIFF):
if fail_src_extents:
self.AddToMessage(op.src_extents,
@@ -888,10 +791,9 @@ class PayloadCheckerTest(mox.MoxTestBase):
self.NewExtentList((1, 16)))
total_src_blocks = 16
payload_checker.major_version = common.BRILLO_MAJOR_PAYLOAD_VERSION
if op_type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
payload_checker.minor_version = 0
elif op_type in (common.OpType.MOVE, common.OpType.BSDIFF):
payload_checker.minor_version = 2 if fail_bad_minor_version else 1
elif op_type in (common.OpType.SOURCE_COPY, common.OpType.SOURCE_BSDIFF):
payload_checker.minor_version = 1 if fail_bad_minor_version else 2
if op_type == common.OpType.REPLACE_XZ:
@@ -902,7 +804,7 @@ class PayloadCheckerTest(mox.MoxTestBase):
elif op_type == common.OpType.PUFFDIFF:
payload_checker.minor_version = 4 if fail_bad_minor_version else 5
if op_type not in (common.OpType.MOVE, common.OpType.SOURCE_COPY):
if op_type != common.OpType.SOURCE_COPY:
if not fail_mismatched_data_offset_length:
op.data_length = 16 * block_size - 8
if fail_prev_data_offset:
@@ -911,20 +813,16 @@ class PayloadCheckerTest(mox.MoxTestBase):
op.data_offset = prev_data_offset
fake_data = 'fake-data'.ljust(op.data_length)
if not (allow_unhashed or (is_last and allow_signature and
op_type == common.OpType.REPLACE)):
if not fail_data_hash:
# Create a valid data blob hash.
op.data_sha256_hash = hashlib.sha256(fake_data).digest()
payload.ReadDataBlob(op.data_offset, op.data_length).AndReturn(
fake_data)
if not allow_unhashed and not fail_data_hash:
# Create a valid data blob hash.
op.data_sha256_hash = hashlib.sha256(fake_data.encode('utf-8')).digest()
payload.ReadDataBlob.return_value = fake_data.encode('utf-8')
elif fail_data_hash:
# Create an invalid data blob hash.
op.data_sha256_hash = hashlib.sha256(
fake_data.replace(' ', '-')).digest()
payload.ReadDataBlob(op.data_offset, op.data_length).AndReturn(
fake_data)
fake_data.replace(' ', '-').encode('utf-8')).digest()
payload.ReadDataBlob.return_value = fake_data.encode('utf-8')
total_dst_blocks = 0
if not fail_missing_dst_extents:
@@ -939,8 +837,7 @@ class PayloadCheckerTest(mox.MoxTestBase):
if total_src_blocks:
if fail_src_length:
op.src_length = total_src_blocks * block_size + 8
elif (op_type in (common.OpType.MOVE, common.OpType.BSDIFF,
common.OpType.SOURCE_BSDIFF) and
elif (op_type == common.OpType.SOURCE_BSDIFF and
payload_checker.minor_version <= 3):
op.src_length = total_src_blocks * block_size
elif fail_src_length:
@@ -950,19 +847,17 @@ class PayloadCheckerTest(mox.MoxTestBase):
if total_dst_blocks:
if fail_dst_length:
op.dst_length = total_dst_blocks * block_size + 8
elif (op_type in (common.OpType.MOVE, common.OpType.BSDIFF,
common.OpType.SOURCE_BSDIFF) and
elif (op_type == common.OpType.SOURCE_BSDIFF and
payload_checker.minor_version <= 3):
op.dst_length = total_dst_blocks * block_size
self.mox.ReplayAll()
should_fail = (fail_src_extents or fail_dst_extents or
fail_mismatched_data_offset_length or
fail_missing_dst_extents or fail_src_length or
fail_dst_length or fail_data_hash or fail_prev_data_offset or
fail_bad_minor_version)
args = (op, 'foo', is_last, old_block_counters, new_block_counters,
old_part_size, new_part_size, prev_data_offset, allow_signature,
args = (op, 'foo', old_block_counters, new_block_counters,
old_part_size, new_part_size, prev_data_offset,
blob_hash_counts)
if should_fail:
self.assertRaises(PayloadError, payload_checker._CheckOperation, *args)
@@ -1004,8 +899,9 @@ class PayloadCheckerTest(mox.MoxTestBase):
if fail_nonexhaustive_full_update:
rootfs_data_length -= block_size
payload_gen.AddOperation(False, rootfs_op_type,
dst_extents=[(0, rootfs_data_length / block_size)],
payload_gen.AddOperation(common.ROOTFS, rootfs_op_type,
dst_extents=
[(0, rootfs_data_length // block_size)],
data_offset=0,
data_length=rootfs_data_length)
@@ -1015,17 +911,17 @@ class PayloadCheckerTest(mox.MoxTestBase):
'allow_unhashed': True})
payload_checker.payload_type = checker._TYPE_FULL
report = checker._PayloadReport()
args = (payload_checker.payload.manifest.install_operations, report, 'foo',
0, rootfs_part_size, rootfs_part_size, rootfs_part_size, 0, False)
partition = next((p for p in payload_checker.payload.manifest.partitions
if p.partition_name == common.ROOTFS), None)
args = (partition.operations, report, 'foo',
0, rootfs_part_size, rootfs_part_size, rootfs_part_size, 0)
if fail_nonexhaustive_full_update:
self.assertRaises(PayloadError, payload_checker._CheckOperations, *args)
else:
self.assertEqual(rootfs_data_length,
payload_checker._CheckOperations(*args))
def DoCheckSignaturesTest(self, fail_empty_sigs_blob, fail_missing_pseudo_op,
fail_mismatched_pseudo_op, fail_sig_missing_fields,
def DoCheckSignaturesTest(self, fail_empty_sigs_blob, fail_sig_missing_fields,
fail_unknown_sig_version, fail_incorrect_sig):
"""Tests _CheckSignatures()."""
# Generate a test payload. For this test, we only care about the signature
@@ -1036,20 +932,18 @@ class PayloadCheckerTest(mox.MoxTestBase):
payload_gen.SetBlockSize(block_size)
rootfs_part_size = test_utils.MiB(2)
kernel_part_size = test_utils.KiB(16)
payload_gen.SetPartInfo(False, True, rootfs_part_size,
hashlib.sha256('fake-new-rootfs-content').digest())
payload_gen.SetPartInfo(True, True, kernel_part_size,
hashlib.sha256('fake-new-kernel-content').digest())
payload_gen.SetPartInfo(common.ROOTFS, True, rootfs_part_size,
hashlib.sha256(b'fake-new-rootfs-content').digest())
payload_gen.SetPartInfo(common.KERNEL, True, kernel_part_size,
hashlib.sha256(b'fake-new-kernel-content').digest())
payload_gen.SetMinorVersion(0)
payload_gen.AddOperationWithData(
False, common.OpType.REPLACE,
dst_extents=[(0, rootfs_part_size / block_size)],
common.ROOTFS, common.OpType.REPLACE,
dst_extents=[(0, rootfs_part_size // block_size)],
data_blob=os.urandom(rootfs_part_size))
do_forge_pseudo_op = (fail_missing_pseudo_op or fail_mismatched_pseudo_op)
do_forge_sigs_data = (do_forge_pseudo_op or fail_empty_sigs_blob or
fail_sig_missing_fields or fail_unknown_sig_version
or fail_incorrect_sig)
do_forge_sigs_data = (fail_empty_sigs_blob or fail_sig_missing_fields or
fail_unknown_sig_version or fail_incorrect_sig)
sigs_data = None
if do_forge_sigs_data:
@@ -1058,37 +952,29 @@ class PayloadCheckerTest(mox.MoxTestBase):
if fail_sig_missing_fields:
sig_data = None
else:
sig_data = test_utils.SignSha256('fake-payload-content',
sig_data = test_utils.SignSha256(b'fake-payload-content',
test_utils._PRIVKEY_FILE_NAME)
sigs_gen.AddSig(5 if fail_unknown_sig_version else 1, sig_data)
sigs_data = sigs_gen.ToBinary()
payload_gen.SetSignatures(payload_gen.curr_offset, len(sigs_data))
if do_forge_pseudo_op:
assert sigs_data is not None, 'should have forged signatures blob by now'
sigs_len = len(sigs_data)
payload_gen.AddOperation(
False, common.OpType.REPLACE,
data_offset=payload_gen.curr_offset / 2,
data_length=sigs_len / 2,
dst_extents=[(0, (sigs_len / 2 + block_size - 1) / block_size)])
# Generate payload (complete w/ signature) and create the test object.
payload_checker = _GetPayloadChecker(
payload_gen.WriteToFileWithData,
payload_gen_dargs={
'sigs_data': sigs_data,
'privkey_file_name': test_utils._PRIVKEY_FILE_NAME,
'do_add_pseudo_operation': not do_forge_pseudo_op})
'privkey_file_name': test_utils._PRIVKEY_FILE_NAME})
payload_checker.payload_type = checker._TYPE_FULL
report = checker._PayloadReport()
# We have to check the manifest first in order to set signature attributes.
payload_checker._CheckManifest(report, rootfs_part_size, kernel_part_size)
payload_checker._CheckManifest(report, {
common.ROOTFS: rootfs_part_size,
common.KERNEL: kernel_part_size
})
should_fail = (fail_empty_sigs_blob or fail_missing_pseudo_op or
fail_mismatched_pseudo_op or fail_sig_missing_fields or
should_fail = (fail_empty_sigs_blob or fail_sig_missing_fields or
fail_unknown_sig_version or fail_incorrect_sig)
args = (report, test_utils._PUBKEY_FILE_NAME)
if should_fail:
@@ -1112,7 +998,6 @@ class PayloadCheckerTest(mox.MoxTestBase):
should_succeed = (
(minor_version == 0 and payload_type == checker._TYPE_FULL) or
(minor_version == 1 and payload_type == checker._TYPE_DELTA) or
(minor_version == 2 and payload_type == checker._TYPE_DELTA) or
(minor_version == 3 and payload_type == checker._TYPE_DELTA) or
(minor_version == 4 and payload_type == checker._TYPE_DELTA) or
@@ -1127,8 +1012,8 @@ class PayloadCheckerTest(mox.MoxTestBase):
def DoRunTest(self, rootfs_part_size_provided, kernel_part_size_provided,
fail_wrong_payload_type, fail_invalid_block_size,
fail_mismatched_block_size, fail_excess_data,
fail_rootfs_part_size_exceeded,
fail_mismatched_metadata_size, fail_mismatched_block_size,
fail_excess_data, fail_rootfs_part_size_exceeded,
fail_kernel_part_size_exceeded):
"""Tests Run()."""
# Generate a test payload. For this test, we generate a full update that
@@ -1142,10 +1027,10 @@ class PayloadCheckerTest(mox.MoxTestBase):
payload_gen.SetBlockSize(block_size)
kernel_filesystem_size = test_utils.KiB(16)
rootfs_filesystem_size = test_utils.MiB(2)
payload_gen.SetPartInfo(False, True, rootfs_filesystem_size,
hashlib.sha256('fake-new-rootfs-content').digest())
payload_gen.SetPartInfo(True, True, kernel_filesystem_size,
hashlib.sha256('fake-new-kernel-content').digest())
payload_gen.SetPartInfo(common.ROOTFS, True, rootfs_filesystem_size,
hashlib.sha256(b'fake-new-rootfs-content').digest())
payload_gen.SetPartInfo(common.KERNEL, True, kernel_filesystem_size,
hashlib.sha256(b'fake-new-kernel-content').digest())
payload_gen.SetMinorVersion(0)
rootfs_part_size = 0
@@ -1155,8 +1040,8 @@ class PayloadCheckerTest(mox.MoxTestBase):
if fail_rootfs_part_size_exceeded:
rootfs_op_size += block_size
payload_gen.AddOperationWithData(
False, common.OpType.REPLACE,
dst_extents=[(0, rootfs_op_size / block_size)],
common.ROOTFS, common.OpType.REPLACE,
dst_extents=[(0, rootfs_op_size // block_size)],
data_blob=os.urandom(rootfs_op_size))
kernel_part_size = 0
@@ -1166,8 +1051,8 @@ class PayloadCheckerTest(mox.MoxTestBase):
if fail_kernel_part_size_exceeded:
kernel_op_size += block_size
payload_gen.AddOperationWithData(
True, common.OpType.REPLACE,
dst_extents=[(0, kernel_op_size / block_size)],
common.KERNEL, common.OpType.REPLACE,
dst_extents=[(0, kernel_op_size // block_size)],
data_blob=os.urandom(kernel_op_size))
# Generate payload (complete w/ signature) and create the test object.
@@ -1178,11 +1063,14 @@ class PayloadCheckerTest(mox.MoxTestBase):
else:
use_block_size = block_size
# For the unittests 237 is the value that generated for the payload.
metadata_size = 237
if fail_mismatched_metadata_size:
metadata_size += 1
kwargs = {
'payload_gen_dargs': {
'privkey_file_name': test_utils._PRIVKEY_FILE_NAME,
'do_add_pseudo_operation': True,
'is_pseudo_in_kernel': True,
'padding': os.urandom(1024) if fail_excess_data else None},
'checker_init_dargs': {
'assert_type': 'delta' if fail_wrong_payload_type else 'full',
@@ -1194,23 +1082,27 @@ class PayloadCheckerTest(mox.MoxTestBase):
payload_checker = _GetPayloadChecker(payload_gen.WriteToFileWithData,
**kwargs)
kwargs = {'pubkey_file_name': test_utils._PUBKEY_FILE_NAME,
'rootfs_part_size': rootfs_part_size,
'kernel_part_size': kernel_part_size}
kwargs2 = {
'pubkey_file_name': test_utils._PUBKEY_FILE_NAME,
'metadata_size': metadata_size,
'part_sizes': {
common.KERNEL: kernel_part_size,
common.ROOTFS: rootfs_part_size}}
should_fail = (fail_wrong_payload_type or fail_mismatched_block_size or
fail_excess_data or
fail_mismatched_metadata_size or fail_excess_data or
fail_rootfs_part_size_exceeded or
fail_kernel_part_size_exceeded)
if should_fail:
self.assertRaises(PayloadError, payload_checker.Run, **kwargs)
self.assertRaises(PayloadError, payload_checker.Run, **kwargs2)
else:
self.assertIsNone(payload_checker.Run(**kwargs))
self.assertIsNone(payload_checker.Run(**kwargs2))
# This implements a generic API, hence the occasional unused args.
# pylint: disable=W0613
def ValidateCheckOperationTest(op_type_name, is_last, allow_signature,
allow_unhashed, fail_src_extents,
fail_dst_extents,
def ValidateCheckOperationTest(op_type_name, allow_unhashed,
fail_src_extents, fail_dst_extents,
fail_mismatched_data_offset_length,
fail_missing_dst_extents, fail_src_length,
fail_dst_length, fail_data_hash,
@@ -1227,8 +1119,8 @@ def ValidateCheckOperationTest(op_type_name, is_last, allow_signature,
fail_bad_minor_version)):
return False
# MOVE and SOURCE_COPY operations don't carry data.
if (op_type in (common.OpType.MOVE, common.OpType.SOURCE_COPY) and (
# SOURCE_COPY operation does not carry data.
if (op_type == common.OpType.SOURCE_COPY and (
fail_mismatched_data_offset_length or fail_data_hash or
fail_prev_data_offset)):
return False
@@ -1257,14 +1149,14 @@ def AddParametricTests(tested_method_name, arg_space, validate_func=None):
(values) associated with them.
validate_func: A function used for validating test argument combinations.
"""
for value_tuple in itertools.product(*arg_space.itervalues()):
run_dargs = dict(zip(arg_space.iterkeys(), value_tuple))
for value_tuple in itertools.product(*iter(arg_space.values())):
run_dargs = dict(zip(iter(arg_space.keys()), value_tuple))
if validate_func and not validate_func(**run_dargs):
continue
run_method_name = 'Do%sTest' % tested_method_name
test_method_name = 'test%s' % tested_method_name
for arg_key, arg_val in run_dargs.iteritems():
if arg_val or type(arg_val) is int:
for arg_key, arg_val in run_dargs.items():
if arg_val or isinstance(arg_val, int):
test_method_name += '__%s=%s' % (arg_key, arg_val)
setattr(PayloadCheckerTest, test_method_name,
TestMethodBody(run_method_name, run_dargs))
@@ -1311,11 +1203,8 @@ def AddAllParametricTests():
# Add all _CheckOperation() test cases.
AddParametricTests('CheckOperation',
{'op_type_name': ('REPLACE', 'REPLACE_BZ', 'REPLACE_XZ',
'MOVE', 'BSDIFF', 'SOURCE_COPY',
'SOURCE_BSDIFF', 'PUFFDIFF',
'BROTLI_BSDIFF'),
'is_last': (True, False),
'allow_signature': (True, False),
'SOURCE_COPY', 'SOURCE_BSDIFF',
'PUFFDIFF', 'BROTLI_BSDIFF'),
'allow_unhashed': (True, False),
'fail_src_extents': (True, False),
'fail_dst_extents': (True, False),
@@ -1335,15 +1224,13 @@ def AddAllParametricTests():
# Add all _CheckOperations() test cases.
AddParametricTests('CheckSignatures',
{'fail_empty_sigs_blob': (True, False),
'fail_missing_pseudo_op': (True, False),
'fail_mismatched_pseudo_op': (True, False),
'fail_sig_missing_fields': (True, False),
'fail_unknown_sig_version': (True, False),
'fail_incorrect_sig': (True, False)})
# Add all _CheckManifestMinorVersion() test cases.
AddParametricTests('CheckManifestMinorVersion',
{'minor_version': (None, 0, 1, 2, 3, 4, 5, 555),
{'minor_version': (None, 0, 2, 3, 4, 5, 555),
'payload_type': (checker._TYPE_FULL,
checker._TYPE_DELTA)})
@@ -1353,6 +1240,7 @@ def AddAllParametricTests():
'kernel_part_size_provided': (True, False),
'fail_wrong_payload_type': (True, False),
'fail_invalid_block_size': (True, False),
'fail_mismatched_metadata_size': (True, False),
'fail_mismatched_block_size': (True, False),
'fail_excess_data': (True, False),
'fail_rootfs_part_size_exceeded': (True, False),

View File

@@ -16,8 +16,11 @@
"""Utilities for update payload processing."""
from __future__ import absolute_import
from __future__ import print_function
import base64
from update_payload import update_metadata_pb2
from update_payload.error import PayloadError
@@ -25,23 +28,25 @@ from update_payload.error import PayloadError
#
# Constants.
#
PSEUDO_EXTENT_MARKER = (1L << 64) - 1 # UINT64_MAX
SIG_ASN1_HEADER = (
'\x30\x31\x30\x0d\x06\x09\x60\x86'
'\x48\x01\x65\x03\x04\x02\x01\x05'
'\x00\x04\x20'
b'\x30\x31\x30\x0d\x06\x09\x60\x86'
b'\x48\x01\x65\x03\x04\x02\x01\x05'
b'\x00\x04\x20'
)
CHROMEOS_MAJOR_PAYLOAD_VERSION = 1
BRILLO_MAJOR_PAYLOAD_VERSION = 2
INPLACE_MINOR_PAYLOAD_VERSION = 1
SOURCE_MINOR_PAYLOAD_VERSION = 2
OPSRCHASH_MINOR_PAYLOAD_VERSION = 3
BROTLI_BSDIFF_MINOR_PAYLOAD_VERSION = 4
PUFFDIFF_MINOR_PAYLOAD_VERSION = 5
KERNEL = 'kernel'
ROOTFS = 'root'
# Tuple of (name in system, name in protobuf).
CROS_PARTITIONS = ((KERNEL, KERNEL), (ROOTFS, 'rootfs'))
#
# Payload operation types.
#
@@ -50,8 +55,6 @@ class OpType(object):
_CLASS = update_metadata_pb2.InstallOperation
REPLACE = _CLASS.REPLACE
REPLACE_BZ = _CLASS.REPLACE_BZ
MOVE = _CLASS.MOVE
BSDIFF = _CLASS.BSDIFF
SOURCE_COPY = _CLASS.SOURCE_COPY
SOURCE_BSDIFF = _CLASS.SOURCE_BSDIFF
ZERO = _CLASS.ZERO
@@ -59,13 +62,11 @@ class OpType(object):
REPLACE_XZ = _CLASS.REPLACE_XZ
PUFFDIFF = _CLASS.PUFFDIFF
BROTLI_BSDIFF = _CLASS.BROTLI_BSDIFF
ALL = (REPLACE, REPLACE_BZ, MOVE, BSDIFF, SOURCE_COPY, SOURCE_BSDIFF, ZERO,
ALL = (REPLACE, REPLACE_BZ, SOURCE_COPY, SOURCE_BSDIFF, ZERO,
DISCARD, REPLACE_XZ, PUFFDIFF, BROTLI_BSDIFF)
NAMES = {
REPLACE: 'REPLACE',
REPLACE_BZ: 'REPLACE_BZ',
MOVE: 'MOVE',
BSDIFF: 'BSDIFF',
SOURCE_COPY: 'SOURCE_COPY',
SOURCE_BSDIFF: 'SOURCE_BSDIFF',
ZERO: 'ZERO',
@@ -141,7 +142,7 @@ def Read(file_obj, length, offset=None, hasher=None):
try:
data = file_obj.read(length)
except IOError, e:
except IOError as e:
raise PayloadError('error reading from file (%s): %s' % (file_obj.name, e))
if len(data) != length:
@@ -162,13 +163,12 @@ def FormatExtent(ex, block_size=0):
end_block = ex.start_block + ex.num_blocks
if block_size:
return '%d->%d * %d' % (ex.start_block, end_block, block_size)
else:
return '%d->%d' % (ex.start_block, end_block)
return '%d->%d' % (ex.start_block, end_block)
def FormatSha256(digest):
"""Returns a canonical string representation of a SHA256 digest."""
return digest.encode('base64').strip()
return base64.b64encode(digest).decode('utf-8')
#

View File

@@ -16,6 +16,8 @@
"""Various formatting functions."""
from __future__ import division
def NumToPercent(num, total, min_precision=1, max_precision=5):
"""Returns the percentage (string) of |num| out of |total|.
@@ -50,7 +52,7 @@ def NumToPercent(num, total, min_precision=1, max_precision=5):
precision = min(min_precision, max_precision)
factor = 10 ** precision
while precision <= max_precision:
percent = num * 100 * factor / total
percent = num * 100 * factor // total
if percent:
break
factor *= 10
@@ -102,8 +104,8 @@ def BytesToHumanReadable(size, precision=1, decimal=False):
magnitude = next_magnitude
if exp != 0:
whole = size / magnitude
frac = (size % magnitude) * (10 ** precision) / magnitude
whole = size // magnitude
frac = (size % magnitude) * (10 ** precision) // magnitude
while frac and not frac % 10:
frac /= 10
return '%d%s %s' % (whole, '.%d' % frac if frac else '', suffixes[exp - 1])

View File

@@ -1,4 +1,4 @@
#!/usr/bin/python2
#!/usr/bin/env python
#
# Copyright (C) 2013 The Android Open Source Project
#
@@ -17,6 +17,11 @@
"""Unit tests for format_utils.py."""
# Disable check for function names to avoid errors based on old code
# pylint: disable-msg=invalid-name
from __future__ import absolute_import
import unittest
from update_payload import format_utils

View File

@@ -16,6 +16,9 @@
"""Histogram generation tools."""
from __future__ import absolute_import
from __future__ import division
from collections import defaultdict
from update_payload import format_utils
@@ -110,7 +113,7 @@ class Histogram(object):
hist_bar = '|'
for key, count in self.data:
if self.total:
bar_len = count * self.scale / self.total
bar_len = count * self.scale // self.total
hist_bar = '|%s|' % ('#' * bar_len).ljust(self.scale)
line = '%s %s %s' % (

View File

@@ -1,4 +1,4 @@
#!/usr/bin/python2
#!/usr/bin/env python
#
# Copyright (C) 2013 The Android Open Source Project
#
@@ -17,6 +17,11 @@
"""Unit tests for histogram.py."""
# Disable check for function names to avoid errors based on old code
# pylint: disable-msg=invalid-name
from __future__ import absolute_import
import unittest
from update_payload import format_utils

View File

@@ -16,6 +16,7 @@
"""Tools for reading, verifying and applying Chrome OS update payloads."""
from __future__ import absolute_import
from __future__ import print_function
import hashlib
@@ -64,7 +65,7 @@ class Payload(object):
"""Update payload header struct."""
# Header constants; sizes are in bytes.
_MAGIC = 'CrAU'
_MAGIC = b'CrAU'
_VERSION_SIZE = 8
_MANIFEST_LEN_SIZE = 8
_METADATA_SIGNATURE_LEN_SIZE = 4
@@ -111,7 +112,6 @@ class Payload(object):
payload_file, self._METADATA_SIGNATURE_LEN_SIZE, True,
hasher=hasher)
def __init__(self, payload_file, payload_file_offset=0):
"""Initialize the payload object.
@@ -263,9 +263,7 @@ class Payload(object):
def IsDelta(self):
"""Returns True iff the payload appears to be a delta."""
self._AssertInit()
return (self.manifest.HasField('old_kernel_info') or
self.manifest.HasField('old_rootfs_info') or
any(partition.HasField('old_partition_info')
return (any(partition.HasField('old_partition_info')
for partition in self.manifest.partitions))
def IsFull(self):
@@ -273,19 +271,19 @@ class Payload(object):
return not self.IsDelta()
def Check(self, pubkey_file_name=None, metadata_sig_file=None,
report_out_file=None, assert_type=None, block_size=0,
rootfs_part_size=0, kernel_part_size=0, allow_unhashed=False,
metadata_size=0, report_out_file=None, assert_type=None,
block_size=0, part_sizes=None, allow_unhashed=False,
disabled_tests=()):
"""Checks the payload integrity.
Args:
pubkey_file_name: public key used for signature verification
metadata_sig_file: metadata signature, if verification is desired
metadata_size: metadata size, if verification is desired
report_out_file: file object to dump the report to
assert_type: assert that payload is either 'full' or 'delta'
block_size: expected filesystem / payload block size
rootfs_part_size: the size of (physical) rootfs partitions in bytes
kernel_part_size: the size of (physical) kernel partitions in bytes
part_sizes: map of partition label to (physical) size in bytes
allow_unhashed: allow unhashed operation blobs
disabled_tests: list of tests to disable
@@ -300,20 +298,18 @@ class Payload(object):
allow_unhashed=allow_unhashed, disabled_tests=disabled_tests)
helper.Run(pubkey_file_name=pubkey_file_name,
metadata_sig_file=metadata_sig_file,
rootfs_part_size=rootfs_part_size,
kernel_part_size=kernel_part_size,
metadata_size=metadata_size,
part_sizes=part_sizes,
report_out_file=report_out_file)
def Apply(self, new_kernel_part, new_rootfs_part, old_kernel_part=None,
old_rootfs_part=None, bsdiff_in_place=True, bspatch_path=None,
puffpatch_path=None, truncate_to_expected_size=True):
def Apply(self, new_parts, old_parts=None, bsdiff_in_place=True,
bspatch_path=None, puffpatch_path=None,
truncate_to_expected_size=True):
"""Applies the update payload.
Args:
new_kernel_part: name of dest kernel partition file
new_rootfs_part: name of dest rootfs partition file
old_kernel_part: name of source kernel partition file (optional)
old_rootfs_part: name of source rootfs partition file (optional)
new_parts: map of partition name to dest partition file
old_parts: map of partition name to partition file (optional)
bsdiff_in_place: whether to perform BSDIFF operations in-place (optional)
bspatch_path: path to the bspatch binary (optional)
puffpatch_path: path to the puffpatch binary (optional)
@@ -331,6 +327,4 @@ class Payload(object):
self, bsdiff_in_place=bsdiff_in_place, bspatch_path=bspatch_path,
puffpatch_path=puffpatch_path,
truncate_to_expected_size=truncate_to_expected_size)
helper.Run(new_kernel_part, new_rootfs_part,
old_kernel_part=old_kernel_part,
old_rootfs_part=old_rootfs_part)
helper.Run(new_parts, old_parts=old_parts)

View File

@@ -16,9 +16,10 @@
"""Utilities for unit testing."""
from __future__ import absolute_import
from __future__ import print_function
import cStringIO
import io
import hashlib
import os
import struct
@@ -70,7 +71,7 @@ def _WriteInt(file_obj, size, is_unsigned, val):
"""
try:
file_obj.write(struct.pack(common.IntPackingFmtStr(size, is_unsigned), val))
except IOError, e:
except IOError as e:
raise payload.PayloadError('error writing to file (%s): %s' %
(file_obj.name, e))
@@ -173,31 +174,37 @@ class PayloadGenerator(object):
self.block_size = block_size
_SetMsgField(self.manifest, 'block_size', block_size)
def SetPartInfo(self, is_kernel, is_new, part_size, part_hash):
def SetPartInfo(self, part_name, is_new, part_size, part_hash):
"""Set the partition info entry.
Args:
is_kernel: whether this is kernel partition info
is_new: whether to set old (False) or new (True) info
part_size: the partition size (in fact, filesystem size)
part_hash: the partition hash
part_name: The name of the partition.
is_new: Whether to set old (False) or new (True) info.
part_size: The partition size (in fact, filesystem size).
part_hash: The partition hash.
"""
if is_kernel:
part_info = (self.manifest.new_kernel_info if is_new
else self.manifest.old_kernel_info)
else:
part_info = (self.manifest.new_rootfs_info if is_new
else self.manifest.old_rootfs_info)
partition = next((x for x in self.manifest.partitions
if x.partition_name == part_name), None)
if partition is None:
partition = self.manifest.partitions.add()
partition.partition_name = part_name
part_info = (partition.new_partition_info if is_new
else partition.old_partition_info)
_SetMsgField(part_info, 'size', part_size)
_SetMsgField(part_info, 'hash', part_hash)
def AddOperation(self, is_kernel, op_type, data_offset=None,
def AddOperation(self, part_name, op_type, data_offset=None,
data_length=None, src_extents=None, src_length=None,
dst_extents=None, dst_length=None, data_sha256_hash=None):
"""Adds an InstallOperation entry."""
operations = (self.manifest.kernel_install_operations if is_kernel
else self.manifest.install_operations)
partition = next((x for x in self.manifest.partitions
if x.partition_name == part_name), None)
if partition is None:
partition = self.manifest.partitions.add()
partition.partition_name = part_name
operations = partition.operations
op = operations.add()
op.type = op_type
@@ -277,7 +284,7 @@ class EnhancedPayloadGenerator(PayloadGenerator):
self.data_blobs.append(data_blob)
return data_length, data_offset
def AddOperationWithData(self, is_kernel, op_type, src_extents=None,
def AddOperationWithData(self, part_name, op_type, src_extents=None,
src_length=None, dst_extents=None, dst_length=None,
data_blob=None, do_hash_data_blob=True):
"""Adds an install operation and associated data blob.
@@ -287,12 +294,12 @@ class EnhancedPayloadGenerator(PayloadGenerator):
necessary offset/length accounting.
Args:
is_kernel: whether this is a kernel (True) or rootfs (False) operation
op_type: one of REPLACE, REPLACE_BZ, REPLACE_XZ, MOVE or BSDIFF
part_name: The name of the partition (e.g. kernel or root).
op_type: one of REPLACE, REPLACE_BZ, REPLACE_XZ.
src_extents: list of (start, length) pairs indicating src block ranges
src_length: size of the src data in bytes (needed for BSDIFF)
src_length: size of the src data in bytes (needed for diff operations)
dst_extents: list of (start, length) pairs indicating dst block ranges
dst_length: size of the dst data in bytes (needed for BSDIFF)
dst_length: size of the dst data in bytes (needed for diff operations)
data_blob: a data blob associated with this operation
do_hash_data_blob: whether or not to compute and add a data blob hash
"""
@@ -302,15 +309,13 @@ class EnhancedPayloadGenerator(PayloadGenerator):
data_sha256_hash = hashlib.sha256(data_blob).digest()
data_length, data_offset = self.AddData(data_blob)
self.AddOperation(is_kernel, op_type, data_offset=data_offset,
self.AddOperation(part_name, op_type, data_offset=data_offset,
data_length=data_length, src_extents=src_extents,
src_length=src_length, dst_extents=dst_extents,
dst_length=dst_length, data_sha256_hash=data_sha256_hash)
def WriteToFileWithData(self, file_obj, sigs_data=None,
privkey_file_name=None,
do_add_pseudo_operation=False,
is_pseudo_in_kernel=False, padding=None):
privkey_file_name=None, padding=None):
"""Writes the payload content to a file, optionally signing the content.
Args:
@@ -319,10 +324,6 @@ class EnhancedPayloadGenerator(PayloadGenerator):
payload signature fields assumed to be preset by the caller)
privkey_file_name: key used for signing the payload (optional; used only
if explicit signatures blob not provided)
do_add_pseudo_operation: whether a pseudo-operation should be added to
account for the signature blob
is_pseudo_in_kernel: whether the pseudo-operation should be added to
kernel (True) or rootfs (False) operations
padding: stuff to dump past the normal data blobs provided (optional)
Raises:
@@ -335,7 +336,7 @@ class EnhancedPayloadGenerator(PayloadGenerator):
if do_generate_sigs_data:
# First, sign some arbitrary data to obtain the size of a signature blob.
fake_sig = SignSha256('fake-payload-data', privkey_file_name)
fake_sig = SignSha256(b'fake-payload-data', privkey_file_name)
fake_sigs_gen = SignaturesGenerator()
fake_sigs_gen.AddSig(1, fake_sig)
sigs_len = len(fake_sigs_gen.ToBinary())
@@ -343,20 +344,9 @@ class EnhancedPayloadGenerator(PayloadGenerator):
# Update the payload with proper signature attributes.
self.SetSignatures(self.curr_offset, sigs_len)
# Add a pseudo-operation to account for the signature blob, if requested.
if do_add_pseudo_operation:
if not self.block_size:
raise TestError('cannot add pseudo-operation without knowing the '
'payload block size')
self.AddOperation(
is_pseudo_in_kernel, common.OpType.REPLACE,
data_offset=self.curr_offset, data_length=sigs_len,
dst_extents=[(common.PSEUDO_EXTENT_MARKER,
(sigs_len + self.block_size - 1) / self.block_size)])
if do_generate_sigs_data:
# Once all payload fields are updated, dump and sign it.
temp_payload_file = cStringIO.StringIO()
temp_payload_file = io.BytesIO()
self.WriteToFile(temp_payload_file, data_blobs=self.data_blobs)
sig = SignSha256(temp_payload_file.getvalue(), privkey_file_name)
sigs_gen = SignaturesGenerator()

View File

@@ -1,19 +1,27 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: update_metadata.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import descriptor_pb2
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='update_metadata.proto',
package='chromeos_update_engine',
serialized_pb='\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xe6\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\r\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\r\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xa5\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x08\n\x04MOVE\x10\x02\x12\n\n\x06\x42SDIFF\x10\x03\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x0c\n\x08PUFFDIFF\x10\t\x12\x11\n\rBROTLI_BSDIFF\x10\n\"\xa6\x03\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\"\xc4\x05\n\x14\x44\x65ltaArchiveManifest\x12\x44\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12K\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12>\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdateB\x02H\x03')
syntax='proto2',
serialized_options=_b('H\003'),
serialized_pb=_b('\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"\x9f\x01\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1aO\n\tSignature\x12\x13\n\x07version\x18\x01 \x01(\rB\x02\x18\x01\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\x1f\n\x17unpadded_signature_size\x18\x03 \x01(\x07\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xee\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xad\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x0c\n\x04MOVE\x10\x02\x1a\x02\x08\x01\x12\x0e\n\x06\x42SDIFF\x10\x03\x1a\x02\x08\x01\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xd7\x05\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"s\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\x12\x18\n\x10snapshot_enabled\x18\x02 \x01(\x08\"\xe1\x06\n\x14\x44\x65ltaArchiveManifest\x12H\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12O\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12\x42\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadata\x12\x16\n\x0epartial_update\x18\x10 \x01(\x08\x42\x02H\x03')
)
@@ -25,54 +33,55 @@ _INSTALLOPERATION_TYPE = _descriptor.EnumDescriptor(
values=[
_descriptor.EnumValueDescriptor(
name='REPLACE', index=0, number=0,
options=None,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REPLACE_BZ', index=1, number=1,
options=None,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MOVE', index=2, number=2,
options=None,
serialized_options=_b('\010\001'),
type=None),
_descriptor.EnumValueDescriptor(
name='BSDIFF', index=3, number=3,
options=None,
serialized_options=_b('\010\001'),
type=None),
_descriptor.EnumValueDescriptor(
name='SOURCE_COPY', index=4, number=4,
options=None,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SOURCE_BSDIFF', index=5, number=5,
options=None,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ZERO', index=6, number=6,
options=None,
name='REPLACE_XZ', index=6, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DISCARD', index=7, number=7,
options=None,
name='ZERO', index=7, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REPLACE_XZ', index=8, number=8,
options=None,
name='DISCARD', index=8, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PUFFDIFF', index=9, number=9,
options=None,
name='BROTLI_BSDIFF', index=9, number=10,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BROTLI_BSDIFF', index=10, number=10,
options=None,
name='PUFFDIFF', index=10, number=9,
serialized_options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=712,
serialized_end=877,
serialized_options=None,
serialized_start=750,
serialized_end=923,
)
_sym_db.RegisterEnumDescriptor(_INSTALLOPERATION_TYPE)
_EXTENT = _descriptor.Descriptor(
@@ -88,23 +97,26 @@ _EXTENT = _descriptor.Descriptor(
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_blocks', full_name='chromeos_update_engine.Extent.num_blocks', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=49,
serialized_end=98,
)
@@ -123,25 +135,35 @@ _SIGNATURES_SIGNATURE = _descriptor.Descriptor(
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=_b('\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='chromeos_update_engine.Signatures.Signature.data', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='unpadded_signature_size', full_name='chromeos_update_engine.Signatures.Signature.unpadded_signature_size', index=2,
number=3, type=7, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
serialized_start=180,
serialized_end=222,
oneofs=[
],
serialized_start=181,
serialized_end=260,
)
_SIGNATURES = _descriptor.Descriptor(
@@ -157,18 +179,21 @@ _SIGNATURES = _descriptor.Descriptor(
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_SIGNATURES_SIGNATURE, ],
enum_types=[
],
options=None,
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
serialized_start=100,
serialized_end=222,
oneofs=[
],
serialized_start=101,
serialized_end=260,
)
@@ -185,25 +210,28 @@ _PARTITIONINFO = _descriptor.Descriptor(
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hash', full_name='chromeos_update_engine.PartitionInfo.hash', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
serialized_start=224,
serialized_end=267,
oneofs=[
],
serialized_start=262,
serialized_end=305,
)
@@ -217,56 +245,59 @@ _IMAGEINFO = _descriptor.Descriptor(
_descriptor.FieldDescriptor(
name='board', full_name='chromeos_update_engine.ImageInfo.board', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='key', full_name='chromeos_update_engine.ImageInfo.key', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='channel', full_name='chromeos_update_engine.ImageInfo.channel', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='chromeos_update_engine.ImageInfo.version', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='build_channel', full_name='chromeos_update_engine.ImageInfo.build_channel', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='build_version', full_name='chromeos_update_engine.ImageInfo.build_version', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
serialized_start=269,
serialized_end=388,
oneofs=[
],
serialized_start=307,
serialized_end=426,
)
@@ -283,63 +314,63 @@ _INSTALLOPERATION = _descriptor.Descriptor(
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_offset', full_name='chromeos_update_engine.InstallOperation.data_offset', index=1,
number=2, type=13, cpp_type=3, label=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_length', full_name='chromeos_update_engine.InstallOperation.data_length', index=2,
number=3, type=13, cpp_type=3, label=1,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='src_extents', full_name='chromeos_update_engine.InstallOperation.src_extents', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='src_length', full_name='chromeos_update_engine.InstallOperation.src_length', index=4,
number=5, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dst_extents', full_name='chromeos_update_engine.InstallOperation.dst_extents', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dst_length', full_name='chromeos_update_engine.InstallOperation.dst_length', index=6,
number=7, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_sha256_hash', full_name='chromeos_update_engine.InstallOperation.data_sha256_hash', index=7,
number=8, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='src_sha256_hash', full_name='chromeos_update_engine.InstallOperation.src_sha256_hash', index=8,
number=9, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
@@ -347,11 +378,14 @@ _INSTALLOPERATION = _descriptor.Descriptor(
enum_types=[
_INSTALLOPERATION_TYPE,
],
options=None,
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
serialized_start=391,
serialized_end=877,
oneofs=[
],
serialized_start=429,
serialized_end=923,
)
@@ -365,77 +399,212 @@ _PARTITIONUPDATE = _descriptor.Descriptor(
_descriptor.FieldDescriptor(
name='partition_name', full_name='chromeos_update_engine.PartitionUpdate.partition_name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='run_postinstall', full_name='chromeos_update_engine.PartitionUpdate.run_postinstall', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='postinstall_path', full_name='chromeos_update_engine.PartitionUpdate.postinstall_path', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='filesystem_type', full_name='chromeos_update_engine.PartitionUpdate.filesystem_type', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='new_partition_signature', full_name='chromeos_update_engine.PartitionUpdate.new_partition_signature', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='old_partition_info', full_name='chromeos_update_engine.PartitionUpdate.old_partition_info', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='new_partition_info', full_name='chromeos_update_engine.PartitionUpdate.new_partition_info', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='operations', full_name='chromeos_update_engine.PartitionUpdate.operations', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='postinstall_optional', full_name='chromeos_update_engine.PartitionUpdate.postinstall_optional', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hash_tree_data_extent', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_data_extent', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hash_tree_extent', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_extent', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hash_tree_algorithm', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_algorithm', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hash_tree_salt', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_salt', index=12,
number=13, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fec_data_extent', full_name='chromeos_update_engine.PartitionUpdate.fec_data_extent', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fec_extent', full_name='chromeos_update_engine.PartitionUpdate.fec_extent', index=14,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fec_roots', full_name='chromeos_update_engine.PartitionUpdate.fec_roots', index=15,
number=16, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=2,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
serialized_start=880,
serialized_end=1302,
oneofs=[
],
serialized_start=926,
serialized_end=1653,
)
_DYNAMICPARTITIONGROUP = _descriptor.Descriptor(
name='DynamicPartitionGroup',
full_name='chromeos_update_engine.DynamicPartitionGroup',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='chromeos_update_engine.DynamicPartitionGroup.name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='size', full_name='chromeos_update_engine.DynamicPartitionGroup.size', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='partition_names', full_name='chromeos_update_engine.DynamicPartitionGroup.partition_names', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1655,
serialized_end=1731,
)
_DYNAMICPARTITIONMETADATA = _descriptor.Descriptor(
name='DynamicPartitionMetadata',
full_name='chromeos_update_engine.DynamicPartitionMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='groups', full_name='chromeos_update_engine.DynamicPartitionMetadata.groups', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='snapshot_enabled', full_name='chromeos_update_engine.DynamicPartitionMetadata.snapshot_enabled', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1733,
serialized_end=1848,
)
@@ -452,114 +621,143 @@ _DELTAARCHIVEMANIFEST = _descriptor.Descriptor(
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=_b('\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kernel_install_operations', full_name='chromeos_update_engine.DeltaArchiveManifest.kernel_install_operations', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=_b('\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='block_size', full_name='chromeos_update_engine.DeltaArchiveManifest.block_size', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=4096,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='signatures_offset', full_name='chromeos_update_engine.DeltaArchiveManifest.signatures_offset', index=3,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='signatures_size', full_name='chromeos_update_engine.DeltaArchiveManifest.signatures_size', index=4,
number=5, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='old_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_kernel_info', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=_b('\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='new_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_kernel_info', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=_b('\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='old_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_rootfs_info', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=_b('\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='new_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_rootfs_info', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=_b('\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='old_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_image_info', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='new_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_image_info', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='minor_version', full_name='chromeos_update_engine.DeltaArchiveManifest.minor_version', index=11,
number=12, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='partitions', full_name='chromeos_update_engine.DeltaArchiveManifest.partitions', index=12,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_timestamp', full_name='chromeos_update_engine.DeltaArchiveManifest.max_timestamp', index=13,
number=14, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dynamic_partition_metadata', full_name='chromeos_update_engine.DeltaArchiveManifest.dynamic_partition_metadata', index=14,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='partial_update', full_name='chromeos_update_engine.DeltaArchiveManifest.partial_update', index=15,
number=16, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
serialized_start=1305,
serialized_end=2013,
oneofs=[
],
serialized_start=1851,
serialized_end=2716,
)
_SIGNATURES_SIGNATURE.containing_type = _SIGNATURES;
_SIGNATURES_SIGNATURE.containing_type = _SIGNATURES
_SIGNATURES.fields_by_name['signatures'].message_type = _SIGNATURES_SIGNATURE
_INSTALLOPERATION.fields_by_name['type'].enum_type = _INSTALLOPERATION_TYPE
_INSTALLOPERATION.fields_by_name['src_extents'].message_type = _EXTENT
_INSTALLOPERATION.fields_by_name['dst_extents'].message_type = _EXTENT
_INSTALLOPERATION_TYPE.containing_type = _INSTALLOPERATION;
_INSTALLOPERATION_TYPE.containing_type = _INSTALLOPERATION
_PARTITIONUPDATE.fields_by_name['new_partition_signature'].message_type = _SIGNATURES_SIGNATURE
_PARTITIONUPDATE.fields_by_name['old_partition_info'].message_type = _PARTITIONINFO
_PARTITIONUPDATE.fields_by_name['new_partition_info'].message_type = _PARTITIONINFO
_PARTITIONUPDATE.fields_by_name['operations'].message_type = _INSTALLOPERATION
_PARTITIONUPDATE.fields_by_name['hash_tree_data_extent'].message_type = _EXTENT
_PARTITIONUPDATE.fields_by_name['hash_tree_extent'].message_type = _EXTENT
_PARTITIONUPDATE.fields_by_name['fec_data_extent'].message_type = _EXTENT
_PARTITIONUPDATE.fields_by_name['fec_extent'].message_type = _EXTENT
_DYNAMICPARTITIONMETADATA.fields_by_name['groups'].message_type = _DYNAMICPARTITIONGROUP
_DELTAARCHIVEMANIFEST.fields_by_name['install_operations'].message_type = _INSTALLOPERATION
_DELTAARCHIVEMANIFEST.fields_by_name['kernel_install_operations'].message_type = _INSTALLOPERATION
_DELTAARCHIVEMANIFEST.fields_by_name['old_kernel_info'].message_type = _PARTITIONINFO
@@ -569,63 +767,98 @@ _DELTAARCHIVEMANIFEST.fields_by_name['new_rootfs_info'].message_type = _PARTITIO
_DELTAARCHIVEMANIFEST.fields_by_name['old_image_info'].message_type = _IMAGEINFO
_DELTAARCHIVEMANIFEST.fields_by_name['new_image_info'].message_type = _IMAGEINFO
_DELTAARCHIVEMANIFEST.fields_by_name['partitions'].message_type = _PARTITIONUPDATE
_DELTAARCHIVEMANIFEST.fields_by_name['dynamic_partition_metadata'].message_type = _DYNAMICPARTITIONMETADATA
DESCRIPTOR.message_types_by_name['Extent'] = _EXTENT
DESCRIPTOR.message_types_by_name['Signatures'] = _SIGNATURES
DESCRIPTOR.message_types_by_name['PartitionInfo'] = _PARTITIONINFO
DESCRIPTOR.message_types_by_name['ImageInfo'] = _IMAGEINFO
DESCRIPTOR.message_types_by_name['InstallOperation'] = _INSTALLOPERATION
DESCRIPTOR.message_types_by_name['PartitionUpdate'] = _PARTITIONUPDATE
DESCRIPTOR.message_types_by_name['DynamicPartitionGroup'] = _DYNAMICPARTITIONGROUP
DESCRIPTOR.message_types_by_name['DynamicPartitionMetadata'] = _DYNAMICPARTITIONMETADATA
DESCRIPTOR.message_types_by_name['DeltaArchiveManifest'] = _DELTAARCHIVEMANIFEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
class Extent(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _EXTENT
Extent = _reflection.GeneratedProtocolMessageType('Extent', (_message.Message,), {
'DESCRIPTOR' : _EXTENT,
'__module__' : 'update_metadata_pb2'
# @@protoc_insertion_point(class_scope:chromeos_update_engine.Extent)
})
_sym_db.RegisterMessage(Extent)
class Signatures(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
class Signature(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _SIGNATURES_SIGNATURE
Signatures = _reflection.GeneratedProtocolMessageType('Signatures', (_message.Message,), {
'Signature' : _reflection.GeneratedProtocolMessageType('Signature', (_message.Message,), {
'DESCRIPTOR' : _SIGNATURES_SIGNATURE,
'__module__' : 'update_metadata_pb2'
# @@protoc_insertion_point(class_scope:chromeos_update_engine.Signatures.Signature)
DESCRIPTOR = _SIGNATURES
})
,
'DESCRIPTOR' : _SIGNATURES,
'__module__' : 'update_metadata_pb2'
# @@protoc_insertion_point(class_scope:chromeos_update_engine.Signatures)
})
_sym_db.RegisterMessage(Signatures)
_sym_db.RegisterMessage(Signatures.Signature)
class PartitionInfo(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PARTITIONINFO
PartitionInfo = _reflection.GeneratedProtocolMessageType('PartitionInfo', (_message.Message,), {
'DESCRIPTOR' : _PARTITIONINFO,
'__module__' : 'update_metadata_pb2'
# @@protoc_insertion_point(class_scope:chromeos_update_engine.PartitionInfo)
})
_sym_db.RegisterMessage(PartitionInfo)
class ImageInfo(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _IMAGEINFO
ImageInfo = _reflection.GeneratedProtocolMessageType('ImageInfo', (_message.Message,), {
'DESCRIPTOR' : _IMAGEINFO,
'__module__' : 'update_metadata_pb2'
# @@protoc_insertion_point(class_scope:chromeos_update_engine.ImageInfo)
})
_sym_db.RegisterMessage(ImageInfo)
class InstallOperation(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _INSTALLOPERATION
InstallOperation = _reflection.GeneratedProtocolMessageType('InstallOperation', (_message.Message,), {
'DESCRIPTOR' : _INSTALLOPERATION,
'__module__' : 'update_metadata_pb2'
# @@protoc_insertion_point(class_scope:chromeos_update_engine.InstallOperation)
})
_sym_db.RegisterMessage(InstallOperation)
class PartitionUpdate(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PARTITIONUPDATE
PartitionUpdate = _reflection.GeneratedProtocolMessageType('PartitionUpdate', (_message.Message,), {
'DESCRIPTOR' : _PARTITIONUPDATE,
'__module__' : 'update_metadata_pb2'
# @@protoc_insertion_point(class_scope:chromeos_update_engine.PartitionUpdate)
})
_sym_db.RegisterMessage(PartitionUpdate)
class DeltaArchiveManifest(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DELTAARCHIVEMANIFEST
DynamicPartitionGroup = _reflection.GeneratedProtocolMessageType('DynamicPartitionGroup', (_message.Message,), {
'DESCRIPTOR' : _DYNAMICPARTITIONGROUP,
'__module__' : 'update_metadata_pb2'
# @@protoc_insertion_point(class_scope:chromeos_update_engine.DynamicPartitionGroup)
})
_sym_db.RegisterMessage(DynamicPartitionGroup)
DynamicPartitionMetadata = _reflection.GeneratedProtocolMessageType('DynamicPartitionMetadata', (_message.Message,), {
'DESCRIPTOR' : _DYNAMICPARTITIONMETADATA,
'__module__' : 'update_metadata_pb2'
# @@protoc_insertion_point(class_scope:chromeos_update_engine.DynamicPartitionMetadata)
})
_sym_db.RegisterMessage(DynamicPartitionMetadata)
DeltaArchiveManifest = _reflection.GeneratedProtocolMessageType('DeltaArchiveManifest', (_message.Message,), {
'DESCRIPTOR' : _DELTAARCHIVEMANIFEST,
'__module__' : 'update_metadata_pb2'
# @@protoc_insertion_point(class_scope:chromeos_update_engine.DeltaArchiveManifest)
})
_sym_db.RegisterMessage(DeltaArchiveManifest)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), 'H\003')
DESCRIPTOR._options = None
_SIGNATURES_SIGNATURE.fields_by_name['version']._options = None
_INSTALLOPERATION_TYPE.values_by_name["MOVE"]._options = None
_INSTALLOPERATION_TYPE.values_by_name["BSDIFF"]._options = None
_DELTAARCHIVEMANIFEST.fields_by_name['install_operations']._options = None
_DELTAARCHIVEMANIFEST.fields_by_name['kernel_install_operations']._options = None
_DELTAARCHIVEMANIFEST.fields_by_name['old_kernel_info']._options = None
_DELTAARCHIVEMANIFEST.fields_by_name['new_kernel_info']._options = None
_DELTAARCHIVEMANIFEST.fields_by_name['old_rootfs_info']._options = None
_DELTAARCHIVEMANIFEST.fields_by_name['new_rootfs_info']._options = None
# @@protoc_insertion_point(module_scope)