update-payload-extractor: Import master update_payload
Change-Id: I94798f08b2e02677390024abcd43d45af7f585ee
This commit is contained in:
@@ -24,31 +24,32 @@ follows:
|
||||
checker.Run(...)
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
|
||||
import array
|
||||
import base64
|
||||
import collections
|
||||
import hashlib
|
||||
import itertools
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
from six.moves import range
|
||||
|
||||
from update_payload import common
|
||||
from update_payload import error
|
||||
from update_payload import format_utils
|
||||
from update_payload import histogram
|
||||
from update_payload import update_metadata_pb2
|
||||
|
||||
|
||||
#
|
||||
# Constants.
|
||||
#
|
||||
|
||||
_CHECK_DST_PSEUDO_EXTENTS = 'dst-pseudo-extents'
|
||||
_CHECK_MOVE_SAME_SRC_DST_BLOCK = 'move-same-src-dst-block'
|
||||
_CHECK_PAYLOAD_SIG = 'payload-sig'
|
||||
CHECKS_TO_DISABLE = (
|
||||
_CHECK_DST_PSEUDO_EXTENTS,
|
||||
_CHECK_MOVE_SAME_SRC_DST_BLOCK,
|
||||
_CHECK_PAYLOAD_SIG,
|
||||
)
|
||||
@@ -65,14 +66,13 @@ _DEFAULT_PUBKEY_FILE_NAME = os.path.join(os.path.dirname(__file__),
|
||||
# Supported minor version map to payload types allowed to be using them.
|
||||
_SUPPORTED_MINOR_VERSIONS = {
|
||||
0: (_TYPE_FULL,),
|
||||
1: (_TYPE_DELTA,),
|
||||
2: (_TYPE_DELTA,),
|
||||
3: (_TYPE_DELTA,),
|
||||
4: (_TYPE_DELTA,),
|
||||
5: (_TYPE_DELTA,),
|
||||
6: (_TYPE_DELTA,),
|
||||
}
|
||||
|
||||
_OLD_DELTA_USABLE_PART_SIZE = 2 * 1024 * 1024 * 1024
|
||||
|
||||
#
|
||||
# Helper functions.
|
||||
@@ -321,8 +321,6 @@ class PayloadChecker(object):
|
||||
self.allow_unhashed = allow_unhashed
|
||||
|
||||
# Disable specific tests.
|
||||
self.check_dst_pseudo_extents = (
|
||||
_CHECK_DST_PSEUDO_EXTENTS not in disabled_tests)
|
||||
self.check_move_same_src_dst_block = (
|
||||
_CHECK_MOVE_SAME_SRC_DST_BLOCK not in disabled_tests)
|
||||
self.check_payload_sig = _CHECK_PAYLOAD_SIG not in disabled_tests
|
||||
@@ -330,15 +328,12 @@ class PayloadChecker(object):
|
||||
# Reset state; these will be assigned when the manifest is checked.
|
||||
self.sigs_offset = 0
|
||||
self.sigs_size = 0
|
||||
self.old_rootfs_fs_size = 0
|
||||
self.old_kernel_fs_size = 0
|
||||
self.new_rootfs_fs_size = 0
|
||||
self.new_kernel_fs_size = 0
|
||||
self.old_part_info = {}
|
||||
self.new_part_info = {}
|
||||
self.new_fs_sizes = collections.defaultdict(int)
|
||||
self.old_fs_sizes = collections.defaultdict(int)
|
||||
self.minor_version = None
|
||||
# TODO(*): When fixing crbug.com/794404, the major version should be
|
||||
# correclty handled in update_payload scripts. So stop forcing
|
||||
# major_verions=1 here and set it to the correct value.
|
||||
self.major_version = 1
|
||||
self.major_version = None
|
||||
|
||||
@staticmethod
|
||||
def _CheckElem(msg, name, report, is_mandatory, is_submsg, convert=str,
|
||||
@@ -368,22 +363,56 @@ class PayloadChecker(object):
|
||||
Raises:
|
||||
error.PayloadError if a mandatory element is missing.
|
||||
"""
|
||||
element_result = collections.namedtuple('element_result', ['msg', 'report'])
|
||||
|
||||
if not msg.HasField(name):
|
||||
if is_mandatory:
|
||||
raise error.PayloadError('%smissing mandatory %s %r.' %
|
||||
(msg_name + ' ' if msg_name else '',
|
||||
'sub-message' if is_submsg else 'field',
|
||||
name))
|
||||
return None, None
|
||||
return element_result(None, None)
|
||||
|
||||
value = getattr(msg, name)
|
||||
if is_submsg:
|
||||
return value, report and report.AddSubReport(name)
|
||||
return element_result(value, report and report.AddSubReport(name))
|
||||
else:
|
||||
if report:
|
||||
report.AddField(name, convert(value), linebreak=linebreak,
|
||||
indent=indent)
|
||||
return value, None
|
||||
return element_result(value, None)
|
||||
|
||||
@staticmethod
|
||||
def _CheckRepeatedElemNotPresent(msg, field_name, msg_name):
|
||||
"""Checks that a repeated element is not specified in the message.
|
||||
|
||||
Args:
|
||||
msg: The message containing the element.
|
||||
field_name: The name of the element.
|
||||
msg_name: The name of the message object (for error reporting).
|
||||
|
||||
Raises:
|
||||
error.PayloadError if the repeated element is present or non-empty.
|
||||
"""
|
||||
if getattr(msg, field_name, None):
|
||||
raise error.PayloadError('%sfield %r not empty.' %
|
||||
(msg_name + ' ' if msg_name else '', field_name))
|
||||
|
||||
@staticmethod
|
||||
def _CheckElemNotPresent(msg, field_name, msg_name):
|
||||
"""Checks that an element is not specified in the message.
|
||||
|
||||
Args:
|
||||
msg: The message containing the element.
|
||||
field_name: The name of the element.
|
||||
msg_name: The name of the message object (for error reporting).
|
||||
|
||||
Raises:
|
||||
error.PayloadError if the repeated element is present.
|
||||
"""
|
||||
if msg.HasField(field_name):
|
||||
raise error.PayloadError('%sfield %r exists.' %
|
||||
(msg_name + ' ' if msg_name else '', field_name))
|
||||
|
||||
@staticmethod
|
||||
def _CheckMandatoryField(msg, field_name, report, msg_name, convert=str,
|
||||
@@ -432,6 +461,22 @@ class PayloadChecker(object):
|
||||
(present, missing,
|
||||
' in ' + obj_name if obj_name else ''))
|
||||
|
||||
@staticmethod
|
||||
def _CheckPresentIffMany(vals, name, obj_name):
|
||||
"""Checks that a set of vals and names imply every other element.
|
||||
|
||||
Args:
|
||||
vals: The set of values to be compared.
|
||||
name: The name of the objects holding the corresponding value.
|
||||
obj_name: Name of the object containing these values.
|
||||
|
||||
Raises:
|
||||
error.PayloadError if assertion does not hold.
|
||||
"""
|
||||
if any(vals) and not all(vals):
|
||||
raise error.PayloadError('%r is not present in all values%s.' %
|
||||
(name, ' in ' + obj_name if obj_name else ''))
|
||||
|
||||
@staticmethod
|
||||
def _Run(cmd, send_data=None):
|
||||
"""Runs a subprocess, returns its output.
|
||||
@@ -544,13 +589,12 @@ class PayloadChecker(object):
|
||||
raise error.PayloadError('Unsupported minor version: %d' %
|
||||
self.minor_version)
|
||||
|
||||
def _CheckManifest(self, report, rootfs_part_size=0, kernel_part_size=0):
|
||||
def _CheckManifest(self, report, part_sizes=None):
|
||||
"""Checks the payload manifest.
|
||||
|
||||
Args:
|
||||
report: A report object to add to.
|
||||
rootfs_part_size: Size of the rootfs partition in bytes.
|
||||
kernel_part_size: Size of the kernel partition in bytes.
|
||||
part_sizes: Map of partition label to partition size in bytes.
|
||||
|
||||
Returns:
|
||||
A tuple consisting of the partition block size used during the update
|
||||
@@ -559,6 +603,9 @@ class PayloadChecker(object):
|
||||
Raises:
|
||||
error.PayloadError if any of the checks fail.
|
||||
"""
|
||||
self.major_version = self.payload.header.version
|
||||
|
||||
part_sizes = part_sizes or collections.defaultdict(int)
|
||||
manifest = self.payload.manifest
|
||||
report.AddSection('manifest')
|
||||
|
||||
@@ -577,39 +624,45 @@ class PayloadChecker(object):
|
||||
self._CheckPresentIff(self.sigs_offset, self.sigs_size,
|
||||
'signatures_offset', 'signatures_size', 'manifest')
|
||||
|
||||
# Check: old_kernel_info <==> old_rootfs_info.
|
||||
oki_msg, oki_report = self._CheckOptionalSubMsg(manifest,
|
||||
'old_kernel_info', report)
|
||||
ori_msg, ori_report = self._CheckOptionalSubMsg(manifest,
|
||||
'old_rootfs_info', report)
|
||||
self._CheckPresentIff(oki_msg, ori_msg, 'old_kernel_info',
|
||||
'old_rootfs_info', 'manifest')
|
||||
if oki_msg: # equivalently, ori_msg
|
||||
for part in manifest.partitions:
|
||||
name = part.partition_name
|
||||
self.old_part_info[name] = self._CheckOptionalSubMsg(
|
||||
part, 'old_partition_info', report)
|
||||
self.new_part_info[name] = self._CheckMandatorySubMsg(
|
||||
part, 'new_partition_info', report, 'manifest.partitions')
|
||||
|
||||
# Check: Old-style partition infos should not be specified.
|
||||
for _, part in common.CROS_PARTITIONS:
|
||||
self._CheckElemNotPresent(manifest, 'old_%s_info' % part, 'manifest')
|
||||
self._CheckElemNotPresent(manifest, 'new_%s_info' % part, 'manifest')
|
||||
|
||||
# Check: If old_partition_info is specified anywhere, it must be
|
||||
# specified everywhere.
|
||||
old_part_msgs = [part.msg for part in self.old_part_info.values() if part]
|
||||
self._CheckPresentIffMany(old_part_msgs, 'old_partition_info',
|
||||
'manifest.partitions')
|
||||
|
||||
is_delta = any(part and part.msg for part in self.old_part_info.values())
|
||||
if is_delta:
|
||||
# Assert/mark delta payload.
|
||||
if self.payload_type == _TYPE_FULL:
|
||||
raise error.PayloadError(
|
||||
'Apparent full payload contains old_{kernel,rootfs}_info.')
|
||||
self.payload_type = _TYPE_DELTA
|
||||
|
||||
# Check: {size, hash} present in old_{kernel,rootfs}_info.
|
||||
self.old_kernel_fs_size = self._CheckMandatoryField(
|
||||
oki_msg, 'size', oki_report, 'old_kernel_info')
|
||||
self._CheckMandatoryField(oki_msg, 'hash', oki_report, 'old_kernel_info',
|
||||
convert=common.FormatSha256)
|
||||
self.old_rootfs_fs_size = self._CheckMandatoryField(
|
||||
ori_msg, 'size', ori_report, 'old_rootfs_info')
|
||||
self._CheckMandatoryField(ori_msg, 'hash', ori_report, 'old_rootfs_info',
|
||||
convert=common.FormatSha256)
|
||||
for part, (msg, part_report) in self.old_part_info.items():
|
||||
# Check: {size, hash} present in old_{kernel,rootfs}_info.
|
||||
field = 'old_%s_info' % part
|
||||
self.old_fs_sizes[part] = self._CheckMandatoryField(msg, 'size',
|
||||
part_report, field)
|
||||
self._CheckMandatoryField(msg, 'hash', part_report, field,
|
||||
convert=common.FormatSha256)
|
||||
|
||||
# Check: old_{kernel,rootfs} size must fit in respective partition.
|
||||
if kernel_part_size and self.old_kernel_fs_size > kernel_part_size:
|
||||
raise error.PayloadError(
|
||||
'Old kernel content (%d) exceed partition size (%d).' %
|
||||
(self.old_kernel_fs_size, kernel_part_size))
|
||||
if rootfs_part_size and self.old_rootfs_fs_size > rootfs_part_size:
|
||||
raise error.PayloadError(
|
||||
'Old rootfs content (%d) exceed partition size (%d).' %
|
||||
(self.old_rootfs_fs_size, rootfs_part_size))
|
||||
# Check: old_{kernel,rootfs} size must fit in respective partition.
|
||||
if self.old_fs_sizes[part] > part_sizes[part] > 0:
|
||||
raise error.PayloadError(
|
||||
'Old %s content (%d) exceed partition size (%d).' %
|
||||
(part, self.old_fs_sizes[part], part_sizes[part]))
|
||||
else:
|
||||
# Assert/mark full payload.
|
||||
if self.payload_type == _TYPE_DELTA:
|
||||
@@ -617,31 +670,19 @@ class PayloadChecker(object):
|
||||
'Apparent delta payload missing old_{kernel,rootfs}_info.')
|
||||
self.payload_type = _TYPE_FULL
|
||||
|
||||
# Check: new_kernel_info present; contains {size, hash}.
|
||||
nki_msg, nki_report = self._CheckMandatorySubMsg(
|
||||
manifest, 'new_kernel_info', report, 'manifest')
|
||||
self.new_kernel_fs_size = self._CheckMandatoryField(
|
||||
nki_msg, 'size', nki_report, 'new_kernel_info')
|
||||
self._CheckMandatoryField(nki_msg, 'hash', nki_report, 'new_kernel_info',
|
||||
convert=common.FormatSha256)
|
||||
# Check: new_{kernel,rootfs}_info present; contains {size, hash}.
|
||||
for part, (msg, part_report) in self.new_part_info.items():
|
||||
field = 'new_%s_info' % part
|
||||
self.new_fs_sizes[part] = self._CheckMandatoryField(msg, 'size',
|
||||
part_report, field)
|
||||
self._CheckMandatoryField(msg, 'hash', part_report, field,
|
||||
convert=common.FormatSha256)
|
||||
|
||||
# Check: new_rootfs_info present; contains {size, hash}.
|
||||
nri_msg, nri_report = self._CheckMandatorySubMsg(
|
||||
manifest, 'new_rootfs_info', report, 'manifest')
|
||||
self.new_rootfs_fs_size = self._CheckMandatoryField(
|
||||
nri_msg, 'size', nri_report, 'new_rootfs_info')
|
||||
self._CheckMandatoryField(nri_msg, 'hash', nri_report, 'new_rootfs_info',
|
||||
convert=common.FormatSha256)
|
||||
|
||||
# Check: new_{kernel,rootfs} size must fit in respective partition.
|
||||
if kernel_part_size and self.new_kernel_fs_size > kernel_part_size:
|
||||
raise error.PayloadError(
|
||||
'New kernel content (%d) exceed partition size (%d).' %
|
||||
(self.new_kernel_fs_size, kernel_part_size))
|
||||
if rootfs_part_size and self.new_rootfs_fs_size > rootfs_part_size:
|
||||
raise error.PayloadError(
|
||||
'New rootfs content (%d) exceed partition size (%d).' %
|
||||
(self.new_rootfs_fs_size, rootfs_part_size))
|
||||
# Check: new_{kernel,rootfs} size must fit in respective partition.
|
||||
if self.new_fs_sizes[part] > part_sizes[part] > 0:
|
||||
raise error.PayloadError(
|
||||
'New %s content (%d) exceed partition size (%d).' %
|
||||
(part, self.new_fs_sizes[part], part_sizes[part]))
|
||||
|
||||
# Check: minor_version makes sense for the payload type. This check should
|
||||
# run after the payload type has been set.
|
||||
@@ -667,8 +708,7 @@ class PayloadChecker(object):
|
||||
self._CheckBlocksFitLength(length, total_blocks, self.block_size,
|
||||
'%s: %s' % (op_name, length_name))
|
||||
|
||||
def _CheckExtents(self, extents, usable_size, block_counters, name,
|
||||
allow_pseudo=False, allow_signature=False):
|
||||
def _CheckExtents(self, extents, usable_size, block_counters, name):
|
||||
"""Checks a sequence of extents.
|
||||
|
||||
Args:
|
||||
@@ -676,8 +716,6 @@ class PayloadChecker(object):
|
||||
usable_size: The usable size of the partition to which the extents apply.
|
||||
block_counters: Array of counters corresponding to the number of blocks.
|
||||
name: The name of the extent block.
|
||||
allow_pseudo: Whether or not pseudo block numbers are allowed.
|
||||
allow_signature: Whether or not the extents are used for a signature.
|
||||
|
||||
Returns:
|
||||
The total number of blocks in the extents.
|
||||
@@ -698,20 +736,15 @@ class PayloadChecker(object):
|
||||
if num_blocks == 0:
|
||||
raise error.PayloadError('%s: extent length is zero.' % ex_name)
|
||||
|
||||
if start_block != common.PSEUDO_EXTENT_MARKER:
|
||||
# Check: Make sure we're within the partition limit.
|
||||
if usable_size and end_block * self.block_size > usable_size:
|
||||
raise error.PayloadError(
|
||||
'%s: extent (%s) exceeds usable partition size (%d).' %
|
||||
(ex_name, common.FormatExtent(ex, self.block_size), usable_size))
|
||||
# Check: Make sure we're within the partition limit.
|
||||
if usable_size and end_block * self.block_size > usable_size:
|
||||
raise error.PayloadError(
|
||||
'%s: extent (%s) exceeds usable partition size (%d).' %
|
||||
(ex_name, common.FormatExtent(ex, self.block_size), usable_size))
|
||||
|
||||
# Record block usage.
|
||||
for i in xrange(start_block, end_block):
|
||||
block_counters[i] += 1
|
||||
elif not (allow_pseudo or (allow_signature and len(extents) == 1)):
|
||||
# Pseudo-extents must be allowed explicitly, or otherwise be part of a
|
||||
# signature operation (in which case there has to be exactly one).
|
||||
raise error.PayloadError('%s: unexpected pseudo-extent.' % ex_name)
|
||||
# Record block usage.
|
||||
for i in range(start_block, end_block):
|
||||
block_counters[i] += 1
|
||||
|
||||
total_num_blocks += num_blocks
|
||||
|
||||
@@ -729,6 +762,11 @@ class PayloadChecker(object):
|
||||
Raises:
|
||||
error.PayloadError if any check fails.
|
||||
"""
|
||||
# Check: total_dst_blocks is not a floating point.
|
||||
if isinstance(total_dst_blocks, float):
|
||||
raise error.PayloadError('%s: contains invalid data type of '
|
||||
'total_dst_blocks.' % op_name)
|
||||
|
||||
# Check: Does not contain src extents.
|
||||
if op.src_extents:
|
||||
raise error.PayloadError('%s: contains src_extents.' % op_name)
|
||||
@@ -742,96 +780,13 @@ class PayloadChecker(object):
|
||||
self.block_size,
|
||||
op_name + '.data_length', 'dst')
|
||||
else:
|
||||
# Check: data_length must be smaller than the alotted dst blocks.
|
||||
# Check: data_length must be smaller than the allotted dst blocks.
|
||||
if data_length >= total_dst_blocks * self.block_size:
|
||||
raise error.PayloadError(
|
||||
'%s: data_length (%d) must be less than allotted dst block '
|
||||
'space (%d * %d).' %
|
||||
(op_name, data_length, total_dst_blocks, self.block_size))
|
||||
|
||||
def _CheckMoveOperation(self, op, data_offset, total_src_blocks,
|
||||
total_dst_blocks, op_name):
|
||||
"""Specific checks for MOVE operations.
|
||||
|
||||
Args:
|
||||
op: The operation object from the manifest.
|
||||
data_offset: The offset of a data blob for the operation.
|
||||
total_src_blocks: Total number of blocks in src_extents.
|
||||
total_dst_blocks: Total number of blocks in dst_extents.
|
||||
op_name: Operation name for error reporting.
|
||||
|
||||
Raises:
|
||||
error.PayloadError if any check fails.
|
||||
"""
|
||||
# Check: No data_{offset,length}.
|
||||
if data_offset is not None:
|
||||
raise error.PayloadError('%s: contains data_{offset,length}.' % op_name)
|
||||
|
||||
# Check: total_src_blocks == total_dst_blocks.
|
||||
if total_src_blocks != total_dst_blocks:
|
||||
raise error.PayloadError(
|
||||
'%s: total src blocks (%d) != total dst blocks (%d).' %
|
||||
(op_name, total_src_blocks, total_dst_blocks))
|
||||
|
||||
# Check: For all i, i-th src block index != i-th dst block index.
|
||||
i = 0
|
||||
src_extent_iter = iter(op.src_extents)
|
||||
dst_extent_iter = iter(op.dst_extents)
|
||||
src_extent = dst_extent = None
|
||||
src_idx = src_num = dst_idx = dst_num = 0
|
||||
while i < total_src_blocks:
|
||||
# Get the next source extent, if needed.
|
||||
if not src_extent:
|
||||
try:
|
||||
src_extent = src_extent_iter.next()
|
||||
except StopIteration:
|
||||
raise error.PayloadError('%s: ran out of src extents (%d/%d).' %
|
||||
(op_name, i, total_src_blocks))
|
||||
src_idx = src_extent.start_block
|
||||
src_num = src_extent.num_blocks
|
||||
|
||||
# Get the next dest extent, if needed.
|
||||
if not dst_extent:
|
||||
try:
|
||||
dst_extent = dst_extent_iter.next()
|
||||
except StopIteration:
|
||||
raise error.PayloadError('%s: ran out of dst extents (%d/%d).' %
|
||||
(op_name, i, total_dst_blocks))
|
||||
dst_idx = dst_extent.start_block
|
||||
dst_num = dst_extent.num_blocks
|
||||
|
||||
# Check: start block is not 0. See crbug/480751; there are still versions
|
||||
# of update_engine which fail when seeking to 0 in PReadAll and PWriteAll,
|
||||
# so we need to fail payloads that try to MOVE to/from block 0.
|
||||
if src_idx == 0 or dst_idx == 0:
|
||||
raise error.PayloadError(
|
||||
'%s: MOVE operation cannot have extent with start block 0' %
|
||||
op_name)
|
||||
|
||||
if self.check_move_same_src_dst_block and src_idx == dst_idx:
|
||||
raise error.PayloadError(
|
||||
'%s: src/dst block number %d is the same (%d).' %
|
||||
(op_name, i, src_idx))
|
||||
|
||||
advance = min(src_num, dst_num)
|
||||
i += advance
|
||||
|
||||
src_idx += advance
|
||||
src_num -= advance
|
||||
if src_num == 0:
|
||||
src_extent = None
|
||||
|
||||
dst_idx += advance
|
||||
dst_num -= advance
|
||||
if dst_num == 0:
|
||||
dst_extent = None
|
||||
|
||||
# Make sure we've exhausted all src/dst extents.
|
||||
if src_extent:
|
||||
raise error.PayloadError('%s: excess src blocks.' % op_name)
|
||||
if dst_extent:
|
||||
raise error.PayloadError('%s: excess dst blocks.' % op_name)
|
||||
|
||||
def _CheckZeroOperation(self, op, op_name):
|
||||
"""Specific checks for ZERO operations.
|
||||
|
||||
@@ -851,7 +806,7 @@ class PayloadChecker(object):
|
||||
raise error.PayloadError('%s: contains data_offset.' % op_name)
|
||||
|
||||
def _CheckAnyDiffOperation(self, op, data_length, total_dst_blocks, op_name):
|
||||
"""Specific checks for BSDIFF, SOURCE_BSDIFF, PUFFDIFF and BROTLI_BSDIFF
|
||||
"""Specific checks for SOURCE_BSDIFF, PUFFDIFF and BROTLI_BSDIFF
|
||||
operations.
|
||||
|
||||
Args:
|
||||
@@ -867,7 +822,7 @@ class PayloadChecker(object):
|
||||
if data_length is None:
|
||||
raise error.PayloadError('%s: missing data_{offset,length}.' % op_name)
|
||||
|
||||
# Check: data_length is strictly smaller than the alotted dst blocks.
|
||||
# Check: data_length is strictly smaller than the allotted dst blocks.
|
||||
if data_length >= total_dst_blocks * self.block_size:
|
||||
raise error.PayloadError(
|
||||
'%s: data_length (%d) must be smaller than allotted dst space '
|
||||
@@ -876,8 +831,7 @@ class PayloadChecker(object):
|
||||
total_dst_blocks * self.block_size))
|
||||
|
||||
# Check the existence of src_length and dst_length for legacy bsdiffs.
|
||||
if (op.type == common.OpType.BSDIFF or
|
||||
(op.type == common.OpType.SOURCE_BSDIFF and self.minor_version <= 3)):
|
||||
if op.type == common.OpType.SOURCE_BSDIFF and self.minor_version <= 3:
|
||||
if not op.HasField('src_length') or not op.HasField('dst_length'):
|
||||
raise error.PayloadError('%s: require {src,dst}_length.' % op_name)
|
||||
else:
|
||||
@@ -926,21 +880,19 @@ class PayloadChecker(object):
|
||||
if self.minor_version >= 3 and op.src_sha256_hash is None:
|
||||
raise error.PayloadError('%s: source hash missing.' % op_name)
|
||||
|
||||
def _CheckOperation(self, op, op_name, is_last, old_block_counters,
|
||||
new_block_counters, old_usable_size, new_usable_size,
|
||||
prev_data_offset, allow_signature, blob_hash_counts):
|
||||
def _CheckOperation(self, op, op_name, old_block_counters, new_block_counters,
|
||||
old_usable_size, new_usable_size, prev_data_offset,
|
||||
blob_hash_counts):
|
||||
"""Checks a single update operation.
|
||||
|
||||
Args:
|
||||
op: The operation object.
|
||||
op_name: Operation name string for error reporting.
|
||||
is_last: Whether this is the last operation in the sequence.
|
||||
old_block_counters: Arrays of block read counters.
|
||||
new_block_counters: Arrays of block write counters.
|
||||
old_usable_size: The overall usable size for src data in bytes.
|
||||
new_usable_size: The overall usable size for dst data in bytes.
|
||||
prev_data_offset: Offset of last used data bytes.
|
||||
allow_signature: Whether this may be a signature operation.
|
||||
blob_hash_counts: Counters for hashed/unhashed blobs.
|
||||
|
||||
Returns:
|
||||
@@ -952,14 +904,10 @@ class PayloadChecker(object):
|
||||
# Check extents.
|
||||
total_src_blocks = self._CheckExtents(
|
||||
op.src_extents, old_usable_size, old_block_counters,
|
||||
op_name + '.src_extents', allow_pseudo=True)
|
||||
allow_signature_in_extents = (allow_signature and is_last and
|
||||
op.type == common.OpType.REPLACE)
|
||||
op_name + '.src_extents')
|
||||
total_dst_blocks = self._CheckExtents(
|
||||
op.dst_extents, new_usable_size, new_block_counters,
|
||||
op_name + '.dst_extents',
|
||||
allow_pseudo=(not self.check_dst_pseudo_extents),
|
||||
allow_signature=allow_signature_in_extents)
|
||||
op_name + '.dst_extents')
|
||||
|
||||
# Check: data_offset present <==> data_length present.
|
||||
data_offset = self._CheckOptionalField(op, 'data_offset', None)
|
||||
@@ -995,9 +943,7 @@ class PayloadChecker(object):
|
||||
(op_name, common.FormatSha256(op.data_sha256_hash),
|
||||
common.FormatSha256(actual_hash.digest())))
|
||||
elif data_offset is not None:
|
||||
if allow_signature_in_extents:
|
||||
blob_hash_counts['signature'] += 1
|
||||
elif self.allow_unhashed:
|
||||
if self.allow_unhashed:
|
||||
blob_hash_counts['unhashed'] += 1
|
||||
else:
|
||||
raise error.PayloadError('%s: unhashed operation not allowed.' %
|
||||
@@ -1011,18 +957,11 @@ class PayloadChecker(object):
|
||||
(op_name, data_offset, prev_data_offset))
|
||||
|
||||
# Type-specific checks.
|
||||
if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
|
||||
if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ,
|
||||
common.OpType.REPLACE_XZ):
|
||||
self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name)
|
||||
elif op.type == common.OpType.REPLACE_XZ and (self.minor_version >= 3 or
|
||||
self.major_version >= 2):
|
||||
self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name)
|
||||
elif op.type == common.OpType.MOVE and self.minor_version == 1:
|
||||
self._CheckMoveOperation(op, data_offset, total_src_blocks,
|
||||
total_dst_blocks, op_name)
|
||||
elif op.type == common.OpType.ZERO and self.minor_version >= 4:
|
||||
self._CheckZeroOperation(op, op_name)
|
||||
elif op.type == common.OpType.BSDIFF and self.minor_version == 1:
|
||||
self._CheckAnyDiffOperation(op, data_length, total_dst_blocks, op_name)
|
||||
elif op.type == common.OpType.SOURCE_COPY and self.minor_version >= 2:
|
||||
self._CheckSourceCopyOperation(data_offset, total_src_blocks,
|
||||
total_dst_blocks, op_name)
|
||||
@@ -1044,7 +983,7 @@ class PayloadChecker(object):
|
||||
|
||||
def _SizeToNumBlocks(self, size):
|
||||
"""Returns the number of blocks needed to contain a given byte size."""
|
||||
return (size + self.block_size - 1) / self.block_size
|
||||
return (size + self.block_size - 1) // self.block_size
|
||||
|
||||
def _AllocBlockCounters(self, total_size):
|
||||
"""Returns a freshly initialized array of block counters.
|
||||
@@ -1064,7 +1003,7 @@ class PayloadChecker(object):
|
||||
|
||||
def _CheckOperations(self, operations, report, base_name, old_fs_size,
|
||||
new_fs_size, old_usable_size, new_usable_size,
|
||||
prev_data_offset, allow_signature):
|
||||
prev_data_offset):
|
||||
"""Checks a sequence of update operations.
|
||||
|
||||
Args:
|
||||
@@ -1076,7 +1015,6 @@ class PayloadChecker(object):
|
||||
old_usable_size: The overall usable size of the old partition in bytes.
|
||||
new_usable_size: The overall usable size of the new partition in bytes.
|
||||
prev_data_offset: Offset of last used data bytes.
|
||||
allow_signature: Whether this sequence may contain signature operations.
|
||||
|
||||
Returns:
|
||||
The total data blob size used.
|
||||
@@ -1091,9 +1029,7 @@ class PayloadChecker(object):
|
||||
common.OpType.REPLACE: 0,
|
||||
common.OpType.REPLACE_BZ: 0,
|
||||
common.OpType.REPLACE_XZ: 0,
|
||||
common.OpType.MOVE: 0,
|
||||
common.OpType.ZERO: 0,
|
||||
common.OpType.BSDIFF: 0,
|
||||
common.OpType.SOURCE_COPY: 0,
|
||||
common.OpType.SOURCE_BSDIFF: 0,
|
||||
common.OpType.PUFFDIFF: 0,
|
||||
@@ -1104,8 +1040,6 @@ class PayloadChecker(object):
|
||||
common.OpType.REPLACE: 0,
|
||||
common.OpType.REPLACE_BZ: 0,
|
||||
common.OpType.REPLACE_XZ: 0,
|
||||
# MOVE operations don't have blobs.
|
||||
common.OpType.BSDIFF: 0,
|
||||
# SOURCE_COPY operations don't have blobs.
|
||||
common.OpType.SOURCE_BSDIFF: 0,
|
||||
common.OpType.PUFFDIFF: 0,
|
||||
@@ -1116,8 +1050,6 @@ class PayloadChecker(object):
|
||||
'hashed': 0,
|
||||
'unhashed': 0,
|
||||
}
|
||||
if allow_signature:
|
||||
blob_hash_counts['signature'] = 0
|
||||
|
||||
# Allocate old and new block counters.
|
||||
old_block_counters = (self._AllocBlockCounters(old_usable_size)
|
||||
@@ -1130,16 +1062,14 @@ class PayloadChecker(object):
|
||||
op_num += 1
|
||||
|
||||
# Check: Type is valid.
|
||||
if op.type not in op_counts.keys():
|
||||
if op.type not in op_counts:
|
||||
raise error.PayloadError('%s: invalid type (%d).' % (op_name, op.type))
|
||||
op_counts[op.type] += 1
|
||||
|
||||
is_last = op_num == len(operations)
|
||||
curr_data_used = self._CheckOperation(
|
||||
op, op_name, is_last, old_block_counters, new_block_counters,
|
||||
op, op_name, old_block_counters, new_block_counters,
|
||||
old_usable_size, new_usable_size,
|
||||
prev_data_offset + total_data_used, allow_signature,
|
||||
blob_hash_counts)
|
||||
prev_data_offset + total_data_used, blob_hash_counts)
|
||||
if curr_data_used:
|
||||
op_blob_totals[op.type] += curr_data_used
|
||||
total_data_used += curr_data_used
|
||||
@@ -1193,18 +1123,17 @@ class PayloadChecker(object):
|
||||
if not sigs.signatures:
|
||||
raise error.PayloadError('Signature block is empty.')
|
||||
|
||||
last_ops_section = (self.payload.manifest.kernel_install_operations or
|
||||
self.payload.manifest.install_operations)
|
||||
fake_sig_op = last_ops_section[-1]
|
||||
# Check: signatures_{offset,size} must match the last (fake) operation.
|
||||
if not (fake_sig_op.type == common.OpType.REPLACE and
|
||||
self.sigs_offset == fake_sig_op.data_offset and
|
||||
self.sigs_size == fake_sig_op.data_length):
|
||||
raise error.PayloadError(
|
||||
'Signatures_{offset,size} (%d+%d) does not match last operation '
|
||||
'(%d+%d).' %
|
||||
(self.sigs_offset, self.sigs_size, fake_sig_op.data_offset,
|
||||
fake_sig_op.data_length))
|
||||
# Check that we don't have the signature operation blob at the end (used to
|
||||
# be for major version 1).
|
||||
last_partition = self.payload.manifest.partitions[-1]
|
||||
if last_partition.operations:
|
||||
last_op = last_partition.operations[-1]
|
||||
# Check: signatures_{offset,size} must match the last (fake) operation.
|
||||
if (last_op.type == common.OpType.REPLACE and
|
||||
last_op.data_offset == self.sigs_offset and
|
||||
last_op.data_length == self.sigs_size):
|
||||
raise error.PayloadError('It seems like the last operation is the '
|
||||
'signature blob. This is an invalid payload.')
|
||||
|
||||
# Compute the checksum of all data up to signature blob.
|
||||
# TODO(garnold) we're re-reading the whole data section into a string
|
||||
@@ -1231,17 +1160,16 @@ class PayloadChecker(object):
|
||||
raise error.PayloadError('Unknown signature version (%d).' %
|
||||
sig.version)
|
||||
|
||||
def Run(self, pubkey_file_name=None, metadata_sig_file=None,
|
||||
rootfs_part_size=0, kernel_part_size=0, report_out_file=None):
|
||||
def Run(self, pubkey_file_name=None, metadata_sig_file=None, metadata_size=0,
|
||||
part_sizes=None, report_out_file=None):
|
||||
"""Checker entry point, invoking all checks.
|
||||
|
||||
Args:
|
||||
pubkey_file_name: Public key used for signature verification.
|
||||
metadata_sig_file: Metadata signature, if verification is desired.
|
||||
rootfs_part_size: The size of rootfs partitions in bytes (default: infer
|
||||
based on payload type and version).
|
||||
kernel_part_size: The size of kernel partitions in bytes (default: use
|
||||
reported filesystem size).
|
||||
metadata_size: Metadata size, if verification is desired.
|
||||
part_sizes: Mapping of partition label to size in bytes (default: infer
|
||||
based on payload type and version or filesystem).
|
||||
report_out_file: File object to dump the report to.
|
||||
|
||||
Raises:
|
||||
@@ -1258,6 +1186,12 @@ class PayloadChecker(object):
|
||||
self.payload.ResetFile()
|
||||
|
||||
try:
|
||||
# Check metadata_size (if provided).
|
||||
if metadata_size and self.payload.metadata_size != metadata_size:
|
||||
raise error.PayloadError('Invalid payload metadata size in payload(%d) '
|
||||
'vs given(%d)' % (self.payload.metadata_size,
|
||||
metadata_size))
|
||||
|
||||
# Check metadata signature (if provided).
|
||||
if metadata_sig_file:
|
||||
metadata_sig = base64.b64decode(metadata_sig_file.read())
|
||||
@@ -1268,65 +1202,60 @@ class PayloadChecker(object):
|
||||
# Part 1: Check the file header.
|
||||
report.AddSection('header')
|
||||
# Check: Payload version is valid.
|
||||
if self.payload.header.version != 1:
|
||||
if self.payload.header.version not in (1, 2):
|
||||
raise error.PayloadError('Unknown payload version (%d).' %
|
||||
self.payload.header.version)
|
||||
report.AddField('version', self.payload.header.version)
|
||||
report.AddField('manifest len', self.payload.header.manifest_len)
|
||||
|
||||
# Part 2: Check the manifest.
|
||||
self._CheckManifest(report, rootfs_part_size, kernel_part_size)
|
||||
self._CheckManifest(report, part_sizes)
|
||||
assert self.payload_type, 'payload type should be known by now'
|
||||
|
||||
# Infer the usable partition size when validating rootfs operations:
|
||||
# - If rootfs partition size was provided, use that.
|
||||
# - Otherwise, if this is an older delta (minor version < 2), stick with
|
||||
# a known constant size. This is necessary because older deltas may
|
||||
# exceed the filesystem size when moving data blocks around.
|
||||
# - Otherwise, use the encoded filesystem size.
|
||||
new_rootfs_usable_size = self.new_rootfs_fs_size
|
||||
old_rootfs_usable_size = self.old_rootfs_fs_size
|
||||
if rootfs_part_size:
|
||||
new_rootfs_usable_size = rootfs_part_size
|
||||
old_rootfs_usable_size = rootfs_part_size
|
||||
elif self.payload_type == _TYPE_DELTA and self.minor_version in (None, 1):
|
||||
new_rootfs_usable_size = _OLD_DELTA_USABLE_PART_SIZE
|
||||
old_rootfs_usable_size = _OLD_DELTA_USABLE_PART_SIZE
|
||||
# Make sure deprecated values are not present in the payload.
|
||||
for field in ('install_operations', 'kernel_install_operations'):
|
||||
self._CheckRepeatedElemNotPresent(self.payload.manifest, field,
|
||||
'manifest')
|
||||
for field in ('old_kernel_info', 'old_rootfs_info',
|
||||
'new_kernel_info', 'new_rootfs_info'):
|
||||
self._CheckElemNotPresent(self.payload.manifest, field, 'manifest')
|
||||
|
||||
# Part 3: Examine rootfs operations.
|
||||
# TODO(garnold)(chromium:243559) only default to the filesystem size if
|
||||
# no explicit size provided *and* the partition size is not embedded in
|
||||
# the payload; see issue for more details.
|
||||
report.AddSection('rootfs operations')
|
||||
total_blob_size = self._CheckOperations(
|
||||
self.payload.manifest.install_operations, report,
|
||||
'install_operations', self.old_rootfs_fs_size,
|
||||
self.new_rootfs_fs_size, old_rootfs_usable_size,
|
||||
new_rootfs_usable_size, 0, False)
|
||||
total_blob_size = 0
|
||||
for part, operations in ((p.partition_name, p.operations)
|
||||
for p in self.payload.manifest.partitions):
|
||||
report.AddSection('%s operations' % part)
|
||||
|
||||
# Part 4: Examine kernel operations.
|
||||
# TODO(garnold)(chromium:243559) as above.
|
||||
report.AddSection('kernel operations')
|
||||
total_blob_size += self._CheckOperations(
|
||||
self.payload.manifest.kernel_install_operations, report,
|
||||
'kernel_install_operations', self.old_kernel_fs_size,
|
||||
self.new_kernel_fs_size,
|
||||
kernel_part_size if kernel_part_size else self.old_kernel_fs_size,
|
||||
kernel_part_size if kernel_part_size else self.new_kernel_fs_size,
|
||||
total_blob_size, True)
|
||||
new_fs_usable_size = self.new_fs_sizes[part]
|
||||
old_fs_usable_size = self.old_fs_sizes[part]
|
||||
|
||||
if part_sizes is not None and part_sizes.get(part, None):
|
||||
new_fs_usable_size = old_fs_usable_size = part_sizes[part]
|
||||
|
||||
# TODO(chromium:243559) only default to the filesystem size if no
|
||||
# explicit size provided *and* the partition size is not embedded in the
|
||||
# payload; see issue for more details.
|
||||
total_blob_size += self._CheckOperations(
|
||||
operations, report, '%s_install_operations' % part,
|
||||
self.old_fs_sizes[part], self.new_fs_sizes[part],
|
||||
old_fs_usable_size, new_fs_usable_size, total_blob_size)
|
||||
|
||||
# Check: Operations data reach the end of the payload file.
|
||||
used_payload_size = self.payload.data_offset + total_blob_size
|
||||
# Major versions 2 and higher have a signature at the end, so it should be
|
||||
# considered in the total size of the image.
|
||||
if self.sigs_size:
|
||||
used_payload_size += self.sigs_size
|
||||
|
||||
if used_payload_size != payload_file_size:
|
||||
raise error.PayloadError(
|
||||
'Used payload size (%d) different from actual file size (%d).' %
|
||||
(used_payload_size, payload_file_size))
|
||||
|
||||
# Part 5: Handle payload signatures message.
|
||||
# Part 4: Handle payload signatures message.
|
||||
if self.check_payload_sig and self.sigs_size:
|
||||
self._CheckSignatures(report, pubkey_file_name)
|
||||
|
||||
# Part 6: Summary.
|
||||
# Part 5: Summary.
|
||||
report.AddSection('summary')
|
||||
report.AddField('update type', self.payload_type)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user