Add 'update-payload-extractor/' from commit '4632cf0a0a6db27cce20c25cb40fc469a2c8e9aa'

https://github.com/gmrt/update_payload_extractor

git-subtree-dir: update-payload-extractor
git-subtree-mainline: 9a231bd70b
git-subtree-split: 4632cf0a0a
Change-Id: I9ae25d32a7e9aa6664309e8b916811844d0cac50
This commit is contained in:
Kevin F. Haggerty
2020-03-15 15:06:30 -06:00
17 changed files with 5471 additions and 0 deletions

View File

@@ -0,0 +1,63 @@
#!/usr/bin/env python2
import argparse
import errno
import os
import update_payload
from update_payload import applier
def list_content(payload_file_name):
with open(payload_file_name, 'rb') as payload_file:
payload = update_payload.Payload(payload_file)
payload.Init()
for part in payload.manifest.partitions:
print("{} ({} bytes)".format(part.partition_name,
part.new_partition_info.size))
def extract(payload_file_name, output_dir="output", partition_names=None):
try:
os.makedirs(output_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
with open(payload_file_name, 'rb') as payload_file:
payload = update_payload.Payload(payload_file)
payload.Init()
if payload.IsDelta():
print("Delta payloads are not supported")
exit(1)
helper = applier.PayloadApplier(payload)
for part in payload.manifest.partitions:
if partition_names and part.partition_name not in partition_names:
continue
print("Extracting {}".format(part.partition_name))
output_file = os.path.join(output_dir, part.partition_name)
helper._ApplyToPartition(
part.operations, part.partition_name,
'install_operations', output_file,
part.new_partition_info)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("payload", metavar="payload.bin",
help="Path to the payload.bin")
parser.add_argument("--output_dir", default="output",
help="Output directory")
parser.add_argument("--partitions", type=str, nargs='+',
help="Name of the partitions to extract")
parser.add_argument("--list_partitions", action="store_true",
help="List the partitions included in the payload.bin")
args = parser.parse_args()
if args.list_partitions:
list_content(args.payload)
else:
extract(args.payload, args.output_dir, args.partitions)

View File

@@ -0,0 +1,22 @@
#
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Library for processing, verifying and applying Chrome OS update payloads."""
# Just raise the interface classes to the root namespace.
from update_payload.checker import CHECKS_TO_DISABLE
from update_payload.error import PayloadError
from update_payload.payload import Payload

View File

@@ -0,0 +1,667 @@
#
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Applying a Chrome OS update payload.
This module is used internally by the main Payload class for applying an update
payload. The interface for invoking the applier is as follows:
applier = PayloadApplier(payload)
applier.Run(...)
"""
from __future__ import print_function
import array
import bz2
import hashlib
import itertools
# Not everywhere we can have the lzma library so we ignore it if we didn't have
# it because it is not going to be used. For example, 'cros flash' uses
# devserver code which eventually loads this file, but the lzma library is not
# included in the client test devices, and it is not necessary to do so. But
# lzma is not used in 'cros flash' so it should be fine. Python 3.x include
# lzma, but for backward compatibility with Python 2.7, backports-lzma is
# needed.
try:
import lzma
except ImportError:
try:
from backports import lzma
except ImportError:
pass
import os
import shutil
import subprocess
import sys
import tempfile
from update_payload import common
from update_payload.error import PayloadError
#
# Helper functions.
#
def _VerifySha256(file_obj, expected_hash, name, length=-1):
"""Verifies the SHA256 hash of a file.
Args:
file_obj: file object to read
expected_hash: the hash digest we expect to be getting
name: name string of this hash, for error reporting
length: precise length of data to verify (optional)
Raises:
PayloadError if computed hash doesn't match expected one, or if fails to
read the specified length of data.
"""
hasher = hashlib.sha256()
block_length = 1024 * 1024
max_length = length if length >= 0 else sys.maxint
while max_length > 0:
read_length = min(max_length, block_length)
data = file_obj.read(read_length)
if not data:
break
max_length -= len(data)
hasher.update(data)
if length >= 0 and max_length > 0:
raise PayloadError(
'insufficient data (%d instead of %d) when verifying %s' %
(length - max_length, length, name))
actual_hash = hasher.digest()
if actual_hash != expected_hash:
raise PayloadError('%s hash (%s) not as expected (%s)' %
(name, common.FormatSha256(actual_hash),
common.FormatSha256(expected_hash)))
def _ReadExtents(file_obj, extents, block_size, max_length=-1):
"""Reads data from file as defined by extent sequence.
This tries to be efficient by not copying data as it is read in chunks.
Args:
file_obj: file object
extents: sequence of block extents (offset and length)
block_size: size of each block
max_length: maximum length to read (optional)
Returns:
A character array containing the concatenated read data.
"""
data = array.array('c')
if max_length < 0:
max_length = sys.maxint
for ex in extents:
if max_length == 0:
break
read_length = min(max_length, ex.num_blocks * block_size)
# Fill with zeros or read from file, depending on the type of extent.
if ex.start_block == common.PSEUDO_EXTENT_MARKER:
data.extend(itertools.repeat('\0', read_length))
else:
file_obj.seek(ex.start_block * block_size)
data.fromfile(file_obj, read_length)
max_length -= read_length
return data
def _WriteExtents(file_obj, data, extents, block_size, base_name):
"""Writes data to file as defined by extent sequence.
This tries to be efficient by not copy data as it is written in chunks.
Args:
file_obj: file object
data: data to write
extents: sequence of block extents (offset and length)
block_size: size of each block
base_name: name string of extent sequence for error reporting
Raises:
PayloadError when things don't add up.
"""
data_offset = 0
data_length = len(data)
for ex, ex_name in common.ExtentIter(extents, base_name):
if not data_length:
raise PayloadError('%s: more write extents than data' % ex_name)
write_length = min(data_length, ex.num_blocks * block_size)
# Only do actual writing if this is not a pseudo-extent.
if ex.start_block != common.PSEUDO_EXTENT_MARKER:
file_obj.seek(ex.start_block * block_size)
data_view = buffer(data, data_offset, write_length)
file_obj.write(data_view)
data_offset += write_length
data_length -= write_length
if data_length:
raise PayloadError('%s: more data than write extents' % base_name)
def _ExtentsToBspatchArg(extents, block_size, base_name, data_length=-1):
"""Translates an extent sequence into a bspatch-compatible string argument.
Args:
extents: sequence of block extents (offset and length)
block_size: size of each block
base_name: name string of extent sequence for error reporting
data_length: the actual total length of the data in bytes (optional)
Returns:
A tuple consisting of (i) a string of the form
"off_1:len_1,...,off_n:len_n", (ii) an offset where zero padding is needed
for filling the last extent, (iii) the length of the padding (zero means no
padding is needed and the extents cover the full length of data).
Raises:
PayloadError if data_length is too short or too long.
"""
arg = ''
pad_off = pad_len = 0
if data_length < 0:
data_length = sys.maxint
for ex, ex_name in common.ExtentIter(extents, base_name):
if not data_length:
raise PayloadError('%s: more extents than total data length' % ex_name)
is_pseudo = ex.start_block == common.PSEUDO_EXTENT_MARKER
start_byte = -1 if is_pseudo else ex.start_block * block_size
num_bytes = ex.num_blocks * block_size
if data_length < num_bytes:
# We're only padding a real extent.
if not is_pseudo:
pad_off = start_byte + data_length
pad_len = num_bytes - data_length
num_bytes = data_length
arg += '%s%d:%d' % (arg and ',', start_byte, num_bytes)
data_length -= num_bytes
if data_length:
raise PayloadError('%s: extents not covering full data length' % base_name)
return arg, pad_off, pad_len
#
# Payload application.
#
class PayloadApplier(object):
"""Applying an update payload.
This is a short-lived object whose purpose is to isolate the logic used for
applying an update payload.
"""
def __init__(self, payload, bsdiff_in_place=True, bspatch_path=None,
puffpatch_path=None, truncate_to_expected_size=True):
"""Initialize the applier.
Args:
payload: the payload object to check
bsdiff_in_place: whether to perform BSDIFF operation in-place (optional)
bspatch_path: path to the bspatch binary (optional)
puffpatch_path: path to the puffpatch binary (optional)
truncate_to_expected_size: whether to truncate the resulting partitions
to their expected sizes, as specified in the
payload (optional)
"""
assert payload.is_init, 'uninitialized update payload'
self.payload = payload
self.block_size = payload.manifest.block_size
self.minor_version = payload.manifest.minor_version
self.bsdiff_in_place = bsdiff_in_place
self.bspatch_path = bspatch_path or 'bspatch'
self.puffpatch_path = puffpatch_path or 'puffin'
self.truncate_to_expected_size = truncate_to_expected_size
def _ApplyReplaceOperation(self, op, op_name, out_data, part_file, part_size):
"""Applies a REPLACE{,_BZ,_XZ} operation.
Args:
op: the operation object
op_name: name string for error reporting
out_data: the data to be written
part_file: the partition file object
part_size: the size of the partition
Raises:
PayloadError if something goes wrong.
"""
block_size = self.block_size
data_length = len(out_data)
# Decompress data if needed.
if op.type == common.OpType.REPLACE_BZ:
out_data = bz2.decompress(out_data)
data_length = len(out_data)
elif op.type == common.OpType.REPLACE_XZ:
# pylint: disable=no-member
out_data = lzma.decompress(out_data)
data_length = len(out_data)
# Write data to blocks specified in dst extents.
data_start = 0
for ex, ex_name in common.ExtentIter(op.dst_extents,
'%s.dst_extents' % op_name):
start_block = ex.start_block
num_blocks = ex.num_blocks
count = num_blocks * block_size
# Make sure it's not a fake (signature) operation.
if start_block != common.PSEUDO_EXTENT_MARKER:
data_end = data_start + count
# Make sure we're not running past partition boundary.
if (start_block + num_blocks) * block_size > part_size:
raise PayloadError(
'%s: extent (%s) exceeds partition size (%d)' %
(ex_name, common.FormatExtent(ex, block_size),
part_size))
# Make sure that we have enough data to write.
if data_end >= data_length + block_size:
raise PayloadError(
'%s: more dst blocks than data (even with padding)')
# Pad with zeros if necessary.
if data_end > data_length:
padding = data_end - data_length
out_data += '\0' * padding
self.payload.payload_file.seek(start_block * block_size)
part_file.seek(start_block * block_size)
part_file.write(out_data[data_start:data_end])
data_start += count
# Make sure we wrote all data.
if data_start < data_length:
raise PayloadError('%s: wrote fewer bytes (%d) than expected (%d)' %
(op_name, data_start, data_length))
def _ApplyMoveOperation(self, op, op_name, part_file):
"""Applies a MOVE operation.
Note that this operation must read the whole block data from the input and
only then dump it, due to our in-place update semantics; otherwise, it
might clobber data midway through.
Args:
op: the operation object
op_name: name string for error reporting
part_file: the partition file object
Raises:
PayloadError if something goes wrong.
"""
block_size = self.block_size
# Gather input raw data from src extents.
in_data = _ReadExtents(part_file, op.src_extents, block_size)
# Dump extracted data to dst extents.
_WriteExtents(part_file, in_data, op.dst_extents, block_size,
'%s.dst_extents' % op_name)
def _ApplyZeroOperation(self, op, op_name, part_file):
"""Applies a ZERO operation.
Args:
op: the operation object
op_name: name string for error reporting
part_file: the partition file object
Raises:
PayloadError if something goes wrong.
"""
block_size = self.block_size
base_name = '%s.dst_extents' % op_name
# Iterate over the extents and write zero.
# pylint: disable=unused-variable
for ex, ex_name in common.ExtentIter(op.dst_extents, base_name):
# Only do actual writing if this is not a pseudo-extent.
if ex.start_block != common.PSEUDO_EXTENT_MARKER:
part_file.seek(ex.start_block * block_size)
part_file.write('\0' * (ex.num_blocks * block_size))
def _ApplySourceCopyOperation(self, op, op_name, old_part_file,
new_part_file):
"""Applies a SOURCE_COPY operation.
Args:
op: the operation object
op_name: name string for error reporting
old_part_file: the old partition file object
new_part_file: the new partition file object
Raises:
PayloadError if something goes wrong.
"""
if not old_part_file:
raise PayloadError(
'%s: no source partition file provided for operation type (%d)' %
(op_name, op.type))
block_size = self.block_size
# Gather input raw data from src extents.
in_data = _ReadExtents(old_part_file, op.src_extents, block_size)
# Dump extracted data to dst extents.
_WriteExtents(new_part_file, in_data, op.dst_extents, block_size,
'%s.dst_extents' % op_name)
def _BytesInExtents(self, extents, base_name):
"""Counts the length of extents in bytes.
Args:
extents: The list of Extents.
base_name: For error reporting.
Returns:
The number of bytes in extents.
"""
length = 0
# pylint: disable=unused-variable
for ex, ex_name in common.ExtentIter(extents, base_name):
length += ex.num_blocks * self.block_size
return length
def _ApplyDiffOperation(self, op, op_name, patch_data, old_part_file,
new_part_file):
"""Applies a SOURCE_BSDIFF, BROTLI_BSDIFF or PUFFDIFF operation.
Args:
op: the operation object
op_name: name string for error reporting
patch_data: the binary patch content
old_part_file: the source partition file object
new_part_file: the target partition file object
Raises:
PayloadError if something goes wrong.
"""
if not old_part_file:
raise PayloadError(
'%s: no source partition file provided for operation type (%d)' %
(op_name, op.type))
block_size = self.block_size
# Dump patch data to file.
with tempfile.NamedTemporaryFile(delete=False) as patch_file:
patch_file_name = patch_file.name
patch_file.write(patch_data)
if (hasattr(new_part_file, 'fileno') and
((not old_part_file) or hasattr(old_part_file, 'fileno'))):
# Construct input and output extents argument for bspatch.
in_extents_arg, _, _ = _ExtentsToBspatchArg(
op.src_extents, block_size, '%s.src_extents' % op_name,
data_length=op.src_length if op.src_length else
self._BytesInExtents(op.src_extents, "%s.src_extents"))
out_extents_arg, pad_off, pad_len = _ExtentsToBspatchArg(
op.dst_extents, block_size, '%s.dst_extents' % op_name,
data_length=op.dst_length if op.dst_length else
self._BytesInExtents(op.dst_extents, "%s.dst_extents"))
new_file_name = '/dev/fd/%d' % new_part_file.fileno()
# Diff from source partition.
old_file_name = '/dev/fd/%d' % old_part_file.fileno()
if op.type in (common.OpType.BSDIFF, common.OpType.SOURCE_BSDIFF,
common.OpType.BROTLI_BSDIFF):
# Invoke bspatch on partition file with extents args.
bspatch_cmd = [self.bspatch_path, old_file_name, new_file_name,
patch_file_name, in_extents_arg, out_extents_arg]
subprocess.check_call(bspatch_cmd)
elif op.type == common.OpType.PUFFDIFF:
# Invoke puffpatch on partition file with extents args.
puffpatch_cmd = [self.puffpatch_path,
"--operation=puffpatch",
"--src_file=%s" % old_file_name,
"--dst_file=%s" % new_file_name,
"--patch_file=%s" % patch_file_name,
"--src_extents=%s" % in_extents_arg,
"--dst_extents=%s" % out_extents_arg]
subprocess.check_call(puffpatch_cmd)
else:
raise PayloadError("Unknown operation %s", op.type)
# Pad with zeros past the total output length.
if pad_len:
new_part_file.seek(pad_off)
new_part_file.write('\0' * pad_len)
else:
# Gather input raw data and write to a temp file.
input_part_file = old_part_file if old_part_file else new_part_file
in_data = _ReadExtents(input_part_file, op.src_extents, block_size,
max_length=op.src_length if op.src_length else
self._BytesInExtents(op.src_extents,
"%s.src_extents"))
with tempfile.NamedTemporaryFile(delete=False) as in_file:
in_file_name = in_file.name
in_file.write(in_data)
# Allocate temporary output file.
with tempfile.NamedTemporaryFile(delete=False) as out_file:
out_file_name = out_file.name
if op.type in (common.OpType.BSDIFF, common.OpType.SOURCE_BSDIFF,
common.OpType.BROTLI_BSDIFF):
# Invoke bspatch.
bspatch_cmd = [self.bspatch_path, in_file_name, out_file_name,
patch_file_name]
subprocess.check_call(bspatch_cmd)
elif op.type == common.OpType.PUFFDIFF:
# Invoke puffpatch.
puffpatch_cmd = [self.puffpatch_path,
"--operation=puffpatch",
"--src_file=%s" % in_file_name,
"--dst_file=%s" % out_file_name,
"--patch_file=%s" % patch_file_name]
subprocess.check_call(puffpatch_cmd)
else:
raise PayloadError("Unknown operation %s", op.type)
# Read output.
with open(out_file_name, 'rb') as out_file:
out_data = out_file.read()
if len(out_data) != op.dst_length:
raise PayloadError(
'%s: actual patched data length (%d) not as expected (%d)' %
(op_name, len(out_data), op.dst_length))
# Write output back to partition, with padding.
unaligned_out_len = len(out_data) % block_size
if unaligned_out_len:
out_data += '\0' * (block_size - unaligned_out_len)
_WriteExtents(new_part_file, out_data, op.dst_extents, block_size,
'%s.dst_extents' % op_name)
# Delete input/output files.
os.remove(in_file_name)
os.remove(out_file_name)
# Delete patch file.
os.remove(patch_file_name)
def _ApplyOperations(self, operations, base_name, old_part_file,
new_part_file, part_size):
"""Applies a sequence of update operations to a partition.
This assumes an in-place update semantics for MOVE and BSDIFF, namely all
reads are performed first, then the data is processed and written back to
the same file.
Args:
operations: the sequence of operations
base_name: the name of the operation sequence
old_part_file: the old partition file object, open for reading/writing
new_part_file: the new partition file object, open for reading/writing
part_size: the partition size
Raises:
PayloadError if anything goes wrong while processing the payload.
"""
for op, op_name in common.OperationIter(operations, base_name):
# Read data blob.
data = self.payload.ReadDataBlob(op.data_offset, op.data_length)
if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ,
common.OpType.REPLACE_XZ):
self._ApplyReplaceOperation(op, op_name, data, new_part_file, part_size)
elif op.type == common.OpType.MOVE:
self._ApplyMoveOperation(op, op_name, new_part_file)
elif op.type == common.OpType.ZERO:
self._ApplyZeroOperation(op, op_name, new_part_file)
elif op.type == common.OpType.BSDIFF:
self._ApplyDiffOperation(op, op_name, data, new_part_file,
new_part_file)
elif op.type == common.OpType.SOURCE_COPY:
self._ApplySourceCopyOperation(op, op_name, old_part_file,
new_part_file)
elif op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.PUFFDIFF,
common.OpType.BROTLI_BSDIFF):
self._ApplyDiffOperation(op, op_name, data, old_part_file,
new_part_file)
else:
raise PayloadError('%s: unknown operation type (%d)' %
(op_name, op.type))
def _ApplyToPartition(self, operations, part_name, base_name,
new_part_file_name, new_part_info,
old_part_file_name=None, old_part_info=None):
"""Applies an update to a partition.
Args:
operations: the sequence of update operations to apply
part_name: the name of the partition, for error reporting
base_name: the name of the operation sequence
new_part_file_name: file name to write partition data to
new_part_info: size and expected hash of dest partition
old_part_file_name: file name of source partition (optional)
old_part_info: size and expected hash of source partition (optional)
Raises:
PayloadError if anything goes wrong with the update.
"""
# Do we have a source partition?
if old_part_file_name:
# Verify the source partition.
with open(old_part_file_name, 'rb') as old_part_file:
_VerifySha256(old_part_file, old_part_info.hash,
'old ' + part_name, length=old_part_info.size)
new_part_file_mode = 'r+b'
if self.minor_version == common.INPLACE_MINOR_PAYLOAD_VERSION:
# Copy the src partition to the dst one; make sure we don't truncate it.
shutil.copyfile(old_part_file_name, new_part_file_name)
elif (self.minor_version == common.SOURCE_MINOR_PAYLOAD_VERSION or
self.minor_version == common.OPSRCHASH_MINOR_PAYLOAD_VERSION or
self.minor_version == common.BROTLI_BSDIFF_MINOR_PAYLOAD_VERSION or
self.minor_version == common.PUFFDIFF_MINOR_PAYLOAD_VERSION):
# In minor version >= 2, we don't want to copy the partitions, so
# instead just make the new partition file.
open(new_part_file_name, 'w').close()
else:
raise PayloadError("Unknown minor version: %d" % self.minor_version)
else:
# We need to create/truncate the dst partition file.
new_part_file_mode = 'w+b'
# Apply operations.
with open(new_part_file_name, new_part_file_mode) as new_part_file:
old_part_file = (open(old_part_file_name, 'r+b')
if old_part_file_name else None)
try:
self._ApplyOperations(operations, base_name, old_part_file,
new_part_file, new_part_info.size)
finally:
if old_part_file:
old_part_file.close()
# Truncate the result, if so instructed.
if self.truncate_to_expected_size:
new_part_file.seek(0, 2)
if new_part_file.tell() > new_part_info.size:
new_part_file.seek(new_part_info.size)
new_part_file.truncate()
# Verify the resulting partition.
with open(new_part_file_name, 'rb') as new_part_file:
_VerifySha256(new_part_file, new_part_info.hash,
'new ' + part_name, length=new_part_info.size)
def Run(self, new_kernel_part, new_rootfs_part, old_kernel_part=None,
old_rootfs_part=None):
"""Applier entry point, invoking all update operations.
Args:
new_kernel_part: name of dest kernel partition file
new_rootfs_part: name of dest rootfs partition file
old_kernel_part: name of source kernel partition file (optional)
old_rootfs_part: name of source rootfs partition file (optional)
Raises:
PayloadError if payload application failed.
"""
self.payload.ResetFile()
# Make sure the arguments are sane and match the payload.
if not (new_kernel_part and new_rootfs_part):
raise PayloadError('missing dst {kernel,rootfs} partitions')
if not (old_kernel_part or old_rootfs_part):
if not self.payload.IsFull():
raise PayloadError('trying to apply a non-full update without src '
'{kernel,rootfs} partitions')
elif old_kernel_part and old_rootfs_part:
if not self.payload.IsDelta():
raise PayloadError('trying to apply a non-delta update onto src '
'{kernel,rootfs} partitions')
else:
raise PayloadError('not all src partitions provided')
# Apply update to rootfs.
self._ApplyToPartition(
self.payload.manifest.install_operations, 'rootfs',
'install_operations', new_rootfs_part,
self.payload.manifest.new_rootfs_info, old_rootfs_part,
self.payload.manifest.old_rootfs_info)
# Apply update to kernel update.
self._ApplyToPartition(
self.payload.manifest.kernel_install_operations, 'kernel',
'kernel_install_operations', new_kernel_part,
self.payload.manifest.new_kernel_info, old_kernel_part,
self.payload.manifest.old_kernel_info)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,218 @@
#
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utilities for update payload processing."""
from __future__ import print_function
from update_payload import update_metadata_pb2
from update_payload.error import PayloadError
#
# Constants.
#
PSEUDO_EXTENT_MARKER = (1L << 64) - 1 # UINT64_MAX
SIG_ASN1_HEADER = (
'\x30\x31\x30\x0d\x06\x09\x60\x86'
'\x48\x01\x65\x03\x04\x02\x01\x05'
'\x00\x04\x20'
)
CHROMEOS_MAJOR_PAYLOAD_VERSION = 1
BRILLO_MAJOR_PAYLOAD_VERSION = 2
INPLACE_MINOR_PAYLOAD_VERSION = 1
SOURCE_MINOR_PAYLOAD_VERSION = 2
OPSRCHASH_MINOR_PAYLOAD_VERSION = 3
BROTLI_BSDIFF_MINOR_PAYLOAD_VERSION = 4
PUFFDIFF_MINOR_PAYLOAD_VERSION = 5
#
# Payload operation types.
#
class OpType(object):
"""Container for operation type constants."""
_CLASS = update_metadata_pb2.InstallOperation
REPLACE = _CLASS.REPLACE
REPLACE_BZ = _CLASS.REPLACE_BZ
MOVE = _CLASS.MOVE
BSDIFF = _CLASS.BSDIFF
SOURCE_COPY = _CLASS.SOURCE_COPY
SOURCE_BSDIFF = _CLASS.SOURCE_BSDIFF
ZERO = _CLASS.ZERO
DISCARD = _CLASS.DISCARD
REPLACE_XZ = _CLASS.REPLACE_XZ
PUFFDIFF = _CLASS.PUFFDIFF
BROTLI_BSDIFF = _CLASS.BROTLI_BSDIFF
ALL = (REPLACE, REPLACE_BZ, MOVE, BSDIFF, SOURCE_COPY, SOURCE_BSDIFF, ZERO,
DISCARD, REPLACE_XZ, PUFFDIFF, BROTLI_BSDIFF)
NAMES = {
REPLACE: 'REPLACE',
REPLACE_BZ: 'REPLACE_BZ',
MOVE: 'MOVE',
BSDIFF: 'BSDIFF',
SOURCE_COPY: 'SOURCE_COPY',
SOURCE_BSDIFF: 'SOURCE_BSDIFF',
ZERO: 'ZERO',
DISCARD: 'DISCARD',
REPLACE_XZ: 'REPLACE_XZ',
PUFFDIFF: 'PUFFDIFF',
BROTLI_BSDIFF: 'BROTLI_BSDIFF',
}
def __init__(self):
pass
#
# Checked and hashed reading of data.
#
def IntPackingFmtStr(size, is_unsigned):
"""Returns an integer format string for use by the struct module.
Args:
size: the integer size in bytes (2, 4 or 8)
is_unsigned: whether it is signed or not
Returns:
A format string for packing/unpacking integer values; assumes network byte
order (big-endian).
Raises:
PayloadError if something is wrong with the arguments.
"""
# Determine the base conversion format.
if size == 2:
fmt = 'h'
elif size == 4:
fmt = 'i'
elif size == 8:
fmt = 'q'
else:
raise PayloadError('unsupport numeric field size (%s)' % size)
# Signed or unsigned?
if is_unsigned:
fmt = fmt.upper()
# Make it network byte order (big-endian).
fmt = '!' + fmt
return fmt
def Read(file_obj, length, offset=None, hasher=None):
"""Reads binary data from a file.
Args:
file_obj: an open file object
length: the length of the data to read
offset: an offset to seek to prior to reading; this is an absolute offset
from either the beginning (non-negative) or end (negative) of the
file. (optional)
hasher: a hashing object to pass the read data through (optional)
Returns:
A string containing the read data.
Raises:
PayloadError if a read error occurred or not enough data was read.
"""
if offset is not None:
if offset >= 0:
file_obj.seek(offset)
else:
file_obj.seek(offset, 2)
try:
data = file_obj.read(length)
except IOError, e:
raise PayloadError('error reading from file (%s): %s' % (file_obj.name, e))
if len(data) != length:
raise PayloadError(
'reading from file (%s) too short (%d instead of %d bytes)' %
(file_obj.name, len(data), length))
if hasher:
hasher.update(data)
return data
#
# Formatting functions.
#
def FormatExtent(ex, block_size=0):
end_block = ex.start_block + ex.num_blocks
if block_size:
return '%d->%d * %d' % (ex.start_block, end_block, block_size)
else:
return '%d->%d' % (ex.start_block, end_block)
def FormatSha256(digest):
"""Returns a canonical string representation of a SHA256 digest."""
return digest.encode('base64').strip()
#
# Useful iterators.
#
def _ObjNameIter(items, base_name, reverse=False, name_format_func=None):
"""A generic (item, name) tuple iterators.
Args:
items: the sequence of objects to iterate on
base_name: the base name for all objects
reverse: whether iteration should be in reverse order
name_format_func: a function to apply to the name string
Yields:
An iterator whose i-th invocation returns (items[i], name), where name ==
base_name + '[i]' (with a formatting function optionally applied to it).
"""
idx, inc = (len(items), -1) if reverse else (1, 1)
if reverse:
items = reversed(items)
for item in items:
item_name = '%s[%d]' % (base_name, idx)
if name_format_func:
item_name = name_format_func(item, item_name)
yield (item, item_name)
idx += inc
def _OperationNameFormatter(op, op_name):
return '%s(%s)' % (op_name, OpType.NAMES.get(op.type, '?'))
def OperationIter(operations, base_name, reverse=False):
"""An (item, name) iterator for update operations."""
return _ObjNameIter(operations, base_name, reverse=reverse,
name_format_func=_OperationNameFormatter)
def ExtentIter(extents, base_name, reverse=False):
"""An (item, name) iterator for operation extents."""
return _ObjNameIter(extents, base_name, reverse=reverse)
def SignatureIter(sigs, base_name, reverse=False):
"""An (item, name) iterator for signatures."""
return _ObjNameIter(sigs, base_name, reverse=reverse)

View File

@@ -0,0 +1,21 @@
#
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Payload handling errors."""
class PayloadError(Exception):
"""An update payload general processing error."""

View File

@@ -0,0 +1,109 @@
#
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Various formatting functions."""
def NumToPercent(num, total, min_precision=1, max_precision=5):
"""Returns the percentage (string) of |num| out of |total|.
If the percentage includes a fraction, it will be computed down to the least
precision that yields a non-zero and ranging between |min_precision| and
|max_precision|. Values are always rounded down. All arithmetic operations
are integer built-ins. Examples (using default precision):
(1, 1) => 100%
(3, 10) => 30%
(3, 9) => 33.3%
(3, 900) => 0.3%
(3, 9000000) => 0.00003%
(3, 900000000) => 0%
(5, 2) => 250%
Args:
num: the value of the part
total: the value of the whole
min_precision: minimum precision for fractional percentage
max_precision: maximum precision for fractional percentage
Returns:
Percentage string, or None if percent cannot be computed (i.e. total is
zero).
"""
if total == 0:
return None
percent = 0
precision = min(min_precision, max_precision)
factor = 10 ** precision
while precision <= max_precision:
percent = num * 100 * factor / total
if percent:
break
factor *= 10
precision += 1
whole, frac = divmod(percent, factor)
while frac and not frac % 10:
frac /= 10
precision -= 1
return '%d%s%%' % (whole, '.%0*d' % (precision, frac) if frac else '')
def BytesToHumanReadable(size, precision=1, decimal=False):
"""Returns a human readable representation of a given |size|.
The returned string includes unit notations in either binary (KiB, MiB, etc)
or decimal (kB, MB, etc), based on the value of |decimal|. The chosen unit is
the largest that yields a whole (or mixed) number. It may contain up to
|precision| fractional digits. Values are always rounded down. Largest unit
is an exabyte. All arithmetic operations are integer built-ins. Examples
(using default precision and binary units):
4096 => 4 KiB
5000 => 4.8 KiB
500000 => 488.2 KiB
5000000 => 4.7 MiB
Args:
size: the size in bytes
precision: the number of digits past the decimal point
decimal: whether to compute/present decimal or binary units
Returns:
Readable size string, or None if no conversion is applicable (i.e. size is
less than the smallest unit).
"""
constants = (
(('KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB'), 1024),
(('kB', 'MB', 'GB', 'TB', 'PB', 'EB'), 1000)
)
suffixes, base = constants[decimal]
exp, magnitude = 0, 1
while exp < len(suffixes):
next_magnitude = magnitude * base
if size < next_magnitude:
break
exp += 1
magnitude = next_magnitude
if exp != 0:
whole = size / magnitude
frac = (size % magnitude) * (10 ** precision) / magnitude
while frac and not frac % 10:
frac /= 10
return '%d%s %s' % (whole, '.%d' % frac if frac else '', suffixes[exp - 1])

View File

@@ -0,0 +1,89 @@
#!/usr/bin/python2
#
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for format_utils.py."""
import unittest
from update_payload import format_utils
class NumToPercentTest(unittest.TestCase):
""" Tests number conversion to percentage format."""
def testHundredPercent(self):
self.assertEqual(format_utils.NumToPercent(1, 1), '100%')
def testOverHundredPercent(self):
self.assertEqual(format_utils.NumToPercent(5, 2), '250%')
def testWholePercent(self):
self.assertEqual(format_utils.NumToPercent(3, 10), '30%')
def testDefaultMinPrecision(self):
self.assertEqual(format_utils.NumToPercent(3, 9), '33.3%')
self.assertEqual(format_utils.NumToPercent(3, 900), '0.3%')
def testDefaultMaxPrecision(self):
self.assertEqual(format_utils.NumToPercent(3, 9000000), '0.00003%')
self.assertEqual(format_utils.NumToPercent(3, 90000000), '0%')
def testCustomMinPrecision(self):
self.assertEqual(format_utils.NumToPercent(3, 9, min_precision=3),
'33.333%')
self.assertEqual(format_utils.NumToPercent(3, 9, min_precision=0),
'33%')
def testCustomMaxPrecision(self):
self.assertEqual(format_utils.NumToPercent(3, 900, max_precision=1),
'0.3%')
self.assertEqual(format_utils.NumToPercent(3, 9000, max_precision=1),
'0%')
class BytesToHumanReadableTest(unittest.TestCase):
""" Tests number conversion to human readable format."""
def testBaseTwo(self):
self.assertEqual(format_utils.BytesToHumanReadable(0x1000), '4 KiB')
self.assertEqual(format_utils.BytesToHumanReadable(0x400000), '4 MiB')
self.assertEqual(format_utils.BytesToHumanReadable(0x100000000), '4 GiB')
self.assertEqual(format_utils.BytesToHumanReadable(0x40000000000), '4 TiB')
def testDecimal(self):
self.assertEqual(format_utils.BytesToHumanReadable(5000, decimal=True),
'5 kB')
self.assertEqual(format_utils.BytesToHumanReadable(5000000, decimal=True),
'5 MB')
self.assertEqual(format_utils.BytesToHumanReadable(5000000000,
decimal=True),
'5 GB')
def testDefaultPrecision(self):
self.assertEqual(format_utils.BytesToHumanReadable(5000), '4.8 KiB')
self.assertEqual(format_utils.BytesToHumanReadable(500000), '488.2 KiB')
self.assertEqual(format_utils.BytesToHumanReadable(5000000), '4.7 MiB')
def testCustomPrecision(self):
self.assertEqual(format_utils.BytesToHumanReadable(5000, precision=3),
'4.882 KiB')
self.assertEqual(format_utils.BytesToHumanReadable(500000, precision=0),
'488 KiB')
self.assertEqual(format_utils.BytesToHumanReadable(5000000, precision=5),
'4.76837 MiB')
if __name__ == '__main__':
unittest.main()

View File

@@ -0,0 +1,129 @@
#
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Histogram generation tools."""
from collections import defaultdict
from update_payload import format_utils
class Histogram(object):
"""A histogram generating object.
This object serves the sole purpose of formatting (key, val) pairs as an
ASCII histogram, including bars and percentage markers, and taking care of
label alignment, scaling, etc. In addition to the standard __init__
interface, two static methods are provided for conveniently converting data
in different formats into a histogram. Histogram generation is exported via
its __str__ method, and looks as follows:
Yes |################ | 5 (83.3%)
No |### | 1 (16.6%)
TODO(garnold) we may want to add actual methods for adding data or tweaking
the output layout and formatting. For now, though, this is fine.
"""
def __init__(self, data, scale=20, formatter=None):
"""Initialize a histogram object.
Args:
data: list of (key, count) pairs constituting the histogram
scale: number of characters used to indicate 100%
formatter: function used for formatting raw histogram values
"""
self.data = data
self.scale = scale
self.formatter = formatter or str
self.max_key_len = max([len(str(key)) for key, count in self.data])
self.total = sum([count for key, count in self.data])
@staticmethod
def FromCountDict(count_dict, scale=20, formatter=None, key_names=None):
"""Takes a dictionary of counts and returns a histogram object.
This simply converts a mapping from names to counts into a list of (key,
count) pairs, optionally translating keys into name strings, then
generating and returning a histogram for them. This is a useful convenience
call for clients that update a dictionary of counters as they (say) scan a
data stream.
Args:
count_dict: dictionary mapping keys to occurrence counts
scale: number of characters used to indicate 100%
formatter: function used for formatting raw histogram values
key_names: dictionary mapping keys to name strings
Returns:
A histogram object based on the given data.
"""
namer = None
if key_names:
namer = lambda key: key_names[key]
else:
namer = lambda key: key
hist = [(namer(key), count) for key, count in count_dict.items()]
return Histogram(hist, scale, formatter)
@staticmethod
def FromKeyList(key_list, scale=20, formatter=None, key_names=None):
"""Takes a list of (possibly recurring) keys and returns a histogram object.
This converts the list into a dictionary of counters, then uses
FromCountDict() to generate the actual histogram. For example:
['a', 'a', 'b', 'a', 'b'] --> {'a': 3, 'b': 2} --> ...
Args:
key_list: list of (possibly recurring) keys
scale: number of characters used to indicate 100%
formatter: function used for formatting raw histogram values
key_names: dictionary mapping keys to name strings
Returns:
A histogram object based on the given data.
"""
count_dict = defaultdict(int) # Unset items default to zero
for key in key_list:
count_dict[key] += 1
return Histogram.FromCountDict(count_dict, scale, formatter, key_names)
def __str__(self):
hist_lines = []
hist_bar = '|'
for key, count in self.data:
if self.total:
bar_len = count * self.scale / self.total
hist_bar = '|%s|' % ('#' * bar_len).ljust(self.scale)
line = '%s %s %s' % (
str(key).ljust(self.max_key_len),
hist_bar,
self.formatter(count))
percent_str = format_utils.NumToPercent(count, self.total)
if percent_str:
line += ' (%s)' % percent_str
hist_lines.append(line)
return '\n'.join(hist_lines)
def GetKeys(self):
"""Returns the keys of the histogram."""
return [key for key, _ in self.data]

View File

@@ -0,0 +1,72 @@
#!/usr/bin/python2
#
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for histogram.py."""
import unittest
from update_payload import format_utils
from update_payload import histogram
class HistogramTest(unittest.TestCase):
""" Tests histogram"""
@staticmethod
def AddHumanReadableSize(size):
fmt = format_utils.BytesToHumanReadable(size)
return '%s (%s)' % (size, fmt) if fmt else str(size)
def CompareToExpectedDefault(self, actual_str):
expected_str = (
'Yes |################ | 5 (83.3%)\n'
'No |### | 1 (16.6%)'
)
self.assertEqual(actual_str, expected_str)
def testExampleHistogram(self):
self.CompareToExpectedDefault(str(histogram.Histogram(
[('Yes', 5), ('No', 1)])))
def testFromCountDict(self):
self.CompareToExpectedDefault(str(histogram.Histogram.FromCountDict(
{'Yes': 5, 'No': 1})))
def testFromKeyList(self):
self.CompareToExpectedDefault(str(histogram.Histogram.FromKeyList(
['Yes', 'Yes', 'No', 'Yes', 'Yes', 'Yes'])))
def testCustomScale(self):
expected_str = (
'Yes |#### | 5 (83.3%)\n'
'No | | 1 (16.6%)'
)
actual_str = str(histogram.Histogram([('Yes', 5), ('No', 1)], scale=5))
self.assertEqual(actual_str, expected_str)
def testCustomFormatter(self):
expected_str = (
'Yes |################ | 5000 (4.8 KiB) (83.3%)\n'
'No |### | 1000 (16.6%)'
)
actual_str = str(histogram.Histogram(
[('Yes', 5000), ('No', 1000)], formatter=self.AddHumanReadableSize))
self.assertEqual(actual_str, expected_str)
if __name__ == '__main__':
unittest.main()

View File

@@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpQIBAAKCAQEAvtGHtqO21Uhy2wGz9fluIpIUR8G7dZoCZhZukGkm4mlfgL71
xPSArjx02/w/FhYxOusV6/XQeKgL3i8cni3HCkCOurZLpi2L5Ver6qrxKFh6WBVZ
0Dj7N6P/Mf5jZdhfvVyweLlsNK8Ypeb+RazfrsXhd4cy3dBMxouGwH7R7QQXTFCo
Cc8kgJBTxILl3jfvY8OrNKgYiCETa7tQdFkP0bfPwH9cAXuMjHXiZatim0tF+ivp
kM2v/6LTxtD6Rq1wks/N6CHi8efrRaviFp7c0mNmBNFaV54cHEUW2SlNIiRun7L0
1nAz/D8kuoHfx4E3Mtj0DbvngZJMX/X+rJQ5cQIDAQABAoIBADmE2X7hbJxwAUcp
BUExFdTP6dMTf9lcOjrhqiRXvgPjtYkOhvD+rsdWq/cf2zhiKibTdEEzUMr+BM3N
r7eyntvlR+DaUIVgF1pjigvryVPbD837aZ5NftRv194PC5FInttq1Dsf0ZEz8p8X
uS/xg1+ggG1SUK/yOSJkLpNZ5xelbclQJ9bnJST8PR8XbEieA83xt5M2DcooPzq0
/99m/daA5hmSWs6n8sFrIZDQxDhLyyW4J72jjoNTE87eCpwK855yXMelpEPDZNQi
nB3x5Y/bGbl81PInqL2q14lekrVYdYZ7bOBVlsmyvz6f1e4OOE1aaAM+w6ArA4az
6elZQE0CgYEA4GOU6BBu9jLqFdqV9jIkWsgz5ZWINz8PLJPtZzk5I9KO1m+GAUy2
h/1IGGR6qRQR49hMtq4C0lUifxquq0xivzJ87U9oxKC9yEeTxkmDe5csVHsnAtqT
xRgVM7Ysrut5NLU1zm0q3jBmkDu7d99LvscM/3n7eJ6RiYpnA54O6I8CgYEA2bNA
34PTvxBS2deRoxKQNlVU14FtirE+q0+k0wcE85wr7wIMpR13al8T1TpE8J1yvvZM
92HMGFGfYNDB46b8VfJ5AxEUFwdruec6sTVVfkMZMOqM/A08yiaLzQ1exDxNwaja
fLuG5FAVRD/2g7fLBcsmosyNgcgNr1XA8Q/nvf8CgYEAwaSOg7py19rWcqehlMZu
4z00tCNYWzz7LmA2l0clzYlPJTU3MvXt6+ujhRFpXXJpgfRPN7Nx0ewQihoPtNqF
uTSr5OwLoOyK+0Tx/UPByS2L3xgscWUJ8yQ2X9sOMqIZhmf/mDZTsU2ZpU03GlrE
dk43JF4zq0NEm6qp/dAwU3cCgYEAvECl+KKmmLIk8vvWlI2Y52Mi2rixYR2kc7+L
aHDJd1+1HhlHlgDFItbU765Trz5322phZArN0rnCeJYNFC9yRWBIBL7gAIoKPdgW
iOb15xlez04EXHGV/7kVa1wEdu0u0CiTxwjivMwDl+E36u8kQP5LirwYIgI800H0
doCqhUECgYEAjvA38OS7hy56Q4LQtmHFBuRIn4E5SrIGMwNIH6TGbEKQix3ajTCQ
0fSoLDGTkU6dH+T4v0WheveN2a2Kofqm0UQx5V2rfnY/Ut1fAAWgL/lsHLDnzPUZ
bvTOANl8TbT49xAfNXTaGWe7F7nYz+bK0UDif1tJNDLQw7USD5I8lbQ=
-----END RSA PRIVATE KEY-----

View File

@@ -0,0 +1,9 @@
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvtGHtqO21Uhy2wGz9flu
IpIUR8G7dZoCZhZukGkm4mlfgL71xPSArjx02/w/FhYxOusV6/XQeKgL3i8cni3H
CkCOurZLpi2L5Ver6qrxKFh6WBVZ0Dj7N6P/Mf5jZdhfvVyweLlsNK8Ypeb+Razf
rsXhd4cy3dBMxouGwH7R7QQXTFCoCc8kgJBTxILl3jfvY8OrNKgYiCETa7tQdFkP
0bfPwH9cAXuMjHXiZatim0tF+ivpkM2v/6LTxtD6Rq1wks/N6CHi8efrRaviFp7c
0mNmBNFaV54cHEUW2SlNIiRun7L01nAz/D8kuoHfx4E3Mtj0DbvngZJMX/X+rJQ5
cQIDAQAB
-----END PUBLIC KEY-----

View File

@@ -0,0 +1,336 @@
#
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tools for reading, verifying and applying Chrome OS update payloads."""
from __future__ import print_function
import hashlib
import struct
from update_payload import applier
from update_payload import checker
from update_payload import common
from update_payload import update_metadata_pb2
from update_payload.error import PayloadError
#
# Helper functions.
#
def _ReadInt(file_obj, size, is_unsigned, hasher=None):
"""Reads a binary-encoded integer from a file.
It will do the correct conversion based on the reported size and whether or
not a signed number is expected. Assumes a network (big-endian) byte
ordering.
Args:
file_obj: a file object
size: the integer size in bytes (2, 4 or 8)
is_unsigned: whether it is signed or not
hasher: an optional hasher to pass the value through
Returns:
An "unpacked" (Python) integer value.
Raises:
PayloadError if an read error occurred.
"""
return struct.unpack(common.IntPackingFmtStr(size, is_unsigned),
common.Read(file_obj, size, hasher=hasher))[0]
#
# Update payload.
#
class Payload(object):
"""Chrome OS update payload processor."""
class _PayloadHeader(object):
"""Update payload header struct."""
# Header constants; sizes are in bytes.
_MAGIC = 'CrAU'
_VERSION_SIZE = 8
_MANIFEST_LEN_SIZE = 8
_METADATA_SIGNATURE_LEN_SIZE = 4
def __init__(self):
self.version = None
self.manifest_len = None
self.metadata_signature_len = None
self.size = None
def ReadFromPayload(self, payload_file, hasher=None):
"""Reads the payload header from a file.
Reads the payload header from the |payload_file| and updates the |hasher|
if one is passed. The parsed header is stored in the _PayloadHeader
instance attributes.
Args:
payload_file: a file object
hasher: an optional hasher to pass the value through
Returns:
None.
Raises:
PayloadError if a read error occurred or the header is invalid.
"""
# Verify magic
magic = common.Read(payload_file, len(self._MAGIC), hasher=hasher)
if magic != self._MAGIC:
raise PayloadError('invalid payload magic: %s' % magic)
self.version = _ReadInt(payload_file, self._VERSION_SIZE, True,
hasher=hasher)
self.manifest_len = _ReadInt(payload_file, self._MANIFEST_LEN_SIZE, True,
hasher=hasher)
self.size = (len(self._MAGIC) + self._VERSION_SIZE +
self._MANIFEST_LEN_SIZE)
self.metadata_signature_len = 0
if self.version == common.BRILLO_MAJOR_PAYLOAD_VERSION:
self.size += self._METADATA_SIGNATURE_LEN_SIZE
self.metadata_signature_len = _ReadInt(
payload_file, self._METADATA_SIGNATURE_LEN_SIZE, True,
hasher=hasher)
def __init__(self, payload_file, payload_file_offset=0):
"""Initialize the payload object.
Args:
payload_file: update payload file object open for reading
payload_file_offset: the offset of the actual payload
"""
self.payload_file = payload_file
self.payload_file_offset = payload_file_offset
self.manifest_hasher = None
self.is_init = False
self.header = None
self.manifest = None
self.data_offset = None
self.metadata_signature = None
self.metadata_size = None
def _ReadHeader(self):
"""Reads and returns the payload header.
Returns:
A payload header object.
Raises:
PayloadError if a read error occurred.
"""
header = self._PayloadHeader()
header.ReadFromPayload(self.payload_file, self.manifest_hasher)
return header
def _ReadManifest(self):
"""Reads and returns the payload manifest.
Returns:
A string containing the payload manifest in binary form.
Raises:
PayloadError if a read error occurred.
"""
if not self.header:
raise PayloadError('payload header not present')
return common.Read(self.payload_file, self.header.manifest_len,
hasher=self.manifest_hasher)
def _ReadMetadataSignature(self):
"""Reads and returns the metadata signatures.
Returns:
A string containing the metadata signatures protobuf in binary form or
an empty string if no metadata signature found in the payload.
Raises:
PayloadError if a read error occurred.
"""
if not self.header:
raise PayloadError('payload header not present')
return common.Read(
self.payload_file, self.header.metadata_signature_len,
offset=self.payload_file_offset + self.header.size +
self.header.manifest_len)
def ReadDataBlob(self, offset, length):
"""Reads and returns a single data blob from the update payload.
Args:
offset: offset to the beginning of the blob from the end of the manifest
length: the blob's length
Returns:
A string containing the raw blob data.
Raises:
PayloadError if a read error occurred.
"""
return common.Read(self.payload_file, length,
offset=self.payload_file_offset + self.data_offset +
offset)
def Init(self):
"""Initializes the payload object.
This is a prerequisite for any other public API call.
Raises:
PayloadError if object already initialized or fails to initialize
correctly.
"""
if self.is_init:
raise PayloadError('payload object already initialized')
self.manifest_hasher = hashlib.sha256()
# Read the file header.
self.payload_file.seek(self.payload_file_offset)
self.header = self._ReadHeader()
# Read the manifest.
manifest_raw = self._ReadManifest()
self.manifest = update_metadata_pb2.DeltaArchiveManifest()
self.manifest.ParseFromString(manifest_raw)
# Read the metadata signature (if any).
metadata_signature_raw = self._ReadMetadataSignature()
if metadata_signature_raw:
self.metadata_signature = update_metadata_pb2.Signatures()
self.metadata_signature.ParseFromString(metadata_signature_raw)
self.metadata_size = self.header.size + self.header.manifest_len
self.data_offset = self.metadata_size + self.header.metadata_signature_len
self.is_init = True
def Describe(self):
"""Emits the payload embedded description data to standard output."""
def _DescribeImageInfo(description, image_info):
"""Display info about the image."""
def _DisplayIndentedValue(name, value):
print(' {:<14} {}'.format(name+':', value))
print('%s:' % description)
_DisplayIndentedValue('Channel', image_info.channel)
_DisplayIndentedValue('Board', image_info.board)
_DisplayIndentedValue('Version', image_info.version)
_DisplayIndentedValue('Key', image_info.key)
if image_info.build_channel != image_info.channel:
_DisplayIndentedValue('Build channel', image_info.build_channel)
if image_info.build_version != image_info.version:
_DisplayIndentedValue('Build version', image_info.build_version)
if self.manifest.HasField('old_image_info'):
_DescribeImageInfo('Old Image', self.manifest.old_image_info)
if self.manifest.HasField('new_image_info'):
_DescribeImageInfo('New Image', self.manifest.new_image_info)
def _AssertInit(self):
"""Raises an exception if the object was not initialized."""
if not self.is_init:
raise PayloadError('payload object not initialized')
def ResetFile(self):
"""Resets the offset of the payload file to right past the manifest."""
self.payload_file.seek(self.payload_file_offset + self.data_offset)
def IsDelta(self):
"""Returns True iff the payload appears to be a delta."""
self._AssertInit()
return (self.manifest.HasField('old_kernel_info') or
self.manifest.HasField('old_rootfs_info') or
any(partition.HasField('old_partition_info')
for partition in self.manifest.partitions))
def IsFull(self):
"""Returns True iff the payload appears to be a full."""
return not self.IsDelta()
def Check(self, pubkey_file_name=None, metadata_sig_file=None,
report_out_file=None, assert_type=None, block_size=0,
rootfs_part_size=0, kernel_part_size=0, allow_unhashed=False,
disabled_tests=()):
"""Checks the payload integrity.
Args:
pubkey_file_name: public key used for signature verification
metadata_sig_file: metadata signature, if verification is desired
report_out_file: file object to dump the report to
assert_type: assert that payload is either 'full' or 'delta'
block_size: expected filesystem / payload block size
rootfs_part_size: the size of (physical) rootfs partitions in bytes
kernel_part_size: the size of (physical) kernel partitions in bytes
allow_unhashed: allow unhashed operation blobs
disabled_tests: list of tests to disable
Raises:
PayloadError if payload verification failed.
"""
self._AssertInit()
# Create a short-lived payload checker object and run it.
helper = checker.PayloadChecker(
self, assert_type=assert_type, block_size=block_size,
allow_unhashed=allow_unhashed, disabled_tests=disabled_tests)
helper.Run(pubkey_file_name=pubkey_file_name,
metadata_sig_file=metadata_sig_file,
rootfs_part_size=rootfs_part_size,
kernel_part_size=kernel_part_size,
report_out_file=report_out_file)
def Apply(self, new_kernel_part, new_rootfs_part, old_kernel_part=None,
old_rootfs_part=None, bsdiff_in_place=True, bspatch_path=None,
puffpatch_path=None, truncate_to_expected_size=True):
"""Applies the update payload.
Args:
new_kernel_part: name of dest kernel partition file
new_rootfs_part: name of dest rootfs partition file
old_kernel_part: name of source kernel partition file (optional)
old_rootfs_part: name of source rootfs partition file (optional)
bsdiff_in_place: whether to perform BSDIFF operations in-place (optional)
bspatch_path: path to the bspatch binary (optional)
puffpatch_path: path to the puffpatch binary (optional)
truncate_to_expected_size: whether to truncate the resulting partitions
to their expected sizes, as specified in the
payload (optional)
Raises:
PayloadError if payload application failed.
"""
self._AssertInit()
# Create a short-lived payload applier object and run it.
helper = applier.PayloadApplier(
self, bsdiff_in_place=bsdiff_in_place, bspatch_path=bspatch_path,
puffpatch_path=puffpatch_path,
truncate_to_expected_size=truncate_to_expected_size)
helper.Run(new_kernel_part, new_rootfs_part,
old_kernel_part=old_kernel_part,
old_rootfs_part=old_rootfs_part)

View File

@@ -0,0 +1,369 @@
#
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utilities for unit testing."""
from __future__ import print_function
import cStringIO
import hashlib
import os
import struct
import subprocess
from update_payload import common
from update_payload import payload
from update_payload import update_metadata_pb2
class TestError(Exception):
"""An error during testing of update payload code."""
# Private/public RSA keys used for testing.
_PRIVKEY_FILE_NAME = os.path.join(os.path.dirname(__file__),
'payload-test-key.pem')
_PUBKEY_FILE_NAME = os.path.join(os.path.dirname(__file__),
'payload-test-key.pub')
def KiB(count):
return count << 10
def MiB(count):
return count << 20
def GiB(count):
return count << 30
def _WriteInt(file_obj, size, is_unsigned, val):
"""Writes a binary-encoded integer to a file.
It will do the correct conversion based on the reported size and whether or
not a signed number is expected. Assumes a network (big-endian) byte
ordering.
Args:
file_obj: a file object
size: the integer size in bytes (2, 4 or 8)
is_unsigned: whether it is signed or not
val: integer value to encode
Raises:
PayloadError if a write error occurred.
"""
try:
file_obj.write(struct.pack(common.IntPackingFmtStr(size, is_unsigned), val))
except IOError, e:
raise payload.PayloadError('error writing to file (%s): %s' %
(file_obj.name, e))
def _SetMsgField(msg, field_name, val):
"""Sets or clears a field in a protobuf message."""
if val is None:
msg.ClearField(field_name)
else:
setattr(msg, field_name, val)
def SignSha256(data, privkey_file_name):
"""Signs the data's SHA256 hash with an RSA private key.
Args:
data: the data whose SHA256 hash we want to sign
privkey_file_name: private key used for signing data
Returns:
The signature string, prepended with an ASN1 header.
Raises:
TestError if something goes wrong.
"""
data_sha256_hash = common.SIG_ASN1_HEADER + hashlib.sha256(data).digest()
sign_cmd = ['openssl', 'rsautl', '-sign', '-inkey', privkey_file_name]
try:
sign_process = subprocess.Popen(sign_cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
sig, _ = sign_process.communicate(input=data_sha256_hash)
except Exception as e:
raise TestError('signing subprocess failed: %s' % e)
return sig
class SignaturesGenerator(object):
"""Generates a payload signatures data block."""
def __init__(self):
self.sigs = update_metadata_pb2.Signatures()
def AddSig(self, version, data):
"""Adds a signature to the signature sequence.
Args:
version: signature version (None means do not assign)
data: signature binary data (None means do not assign)
"""
sig = self.sigs.signatures.add()
if version is not None:
sig.version = version
if data is not None:
sig.data = data
def ToBinary(self):
"""Returns the binary representation of the signature block."""
return self.sigs.SerializeToString()
class PayloadGenerator(object):
"""Generates an update payload allowing low-level control.
Attributes:
manifest: the protobuf containing the payload manifest
version: the payload version identifier
block_size: the block size pertaining to update operations
"""
def __init__(self, version=1):
self.manifest = update_metadata_pb2.DeltaArchiveManifest()
self.version = version
self.block_size = 0
@staticmethod
def _WriteExtent(ex, val):
"""Returns an Extent message."""
start_block, num_blocks = val
_SetMsgField(ex, 'start_block', start_block)
_SetMsgField(ex, 'num_blocks', num_blocks)
@staticmethod
def _AddValuesToRepeatedField(repeated_field, values, write_func):
"""Adds values to a repeated message field."""
if values:
for val in values:
new_item = repeated_field.add()
write_func(new_item, val)
@staticmethod
def _AddExtents(extents_field, values):
"""Adds extents to an extents field."""
PayloadGenerator._AddValuesToRepeatedField(
extents_field, values, PayloadGenerator._WriteExtent)
def SetBlockSize(self, block_size):
"""Sets the payload's block size."""
self.block_size = block_size
_SetMsgField(self.manifest, 'block_size', block_size)
def SetPartInfo(self, is_kernel, is_new, part_size, part_hash):
"""Set the partition info entry.
Args:
is_kernel: whether this is kernel partition info
is_new: whether to set old (False) or new (True) info
part_size: the partition size (in fact, filesystem size)
part_hash: the partition hash
"""
if is_kernel:
part_info = (self.manifest.new_kernel_info if is_new
else self.manifest.old_kernel_info)
else:
part_info = (self.manifest.new_rootfs_info if is_new
else self.manifest.old_rootfs_info)
_SetMsgField(part_info, 'size', part_size)
_SetMsgField(part_info, 'hash', part_hash)
def AddOperation(self, is_kernel, op_type, data_offset=None,
data_length=None, src_extents=None, src_length=None,
dst_extents=None, dst_length=None, data_sha256_hash=None):
"""Adds an InstallOperation entry."""
operations = (self.manifest.kernel_install_operations if is_kernel
else self.manifest.install_operations)
op = operations.add()
op.type = op_type
_SetMsgField(op, 'data_offset', data_offset)
_SetMsgField(op, 'data_length', data_length)
self._AddExtents(op.src_extents, src_extents)
_SetMsgField(op, 'src_length', src_length)
self._AddExtents(op.dst_extents, dst_extents)
_SetMsgField(op, 'dst_length', dst_length)
_SetMsgField(op, 'data_sha256_hash', data_sha256_hash)
def SetSignatures(self, sigs_offset, sigs_size):
"""Set the payload's signature block descriptors."""
_SetMsgField(self.manifest, 'signatures_offset', sigs_offset)
_SetMsgField(self.manifest, 'signatures_size', sigs_size)
def SetMinorVersion(self, minor_version):
"""Set the payload's minor version field."""
_SetMsgField(self.manifest, 'minor_version', minor_version)
def _WriteHeaderToFile(self, file_obj, manifest_len):
"""Writes a payload heaer to a file."""
# We need to access protected members in Payload for writing the header.
# pylint: disable=W0212
file_obj.write(payload.Payload._PayloadHeader._MAGIC)
_WriteInt(file_obj, payload.Payload._PayloadHeader._VERSION_SIZE, True,
self.version)
_WriteInt(file_obj, payload.Payload._PayloadHeader._MANIFEST_LEN_SIZE, True,
manifest_len)
def WriteToFile(self, file_obj, manifest_len=-1, data_blobs=None,
sigs_data=None, padding=None):
"""Writes the payload content to a file.
Args:
file_obj: a file object open for writing
manifest_len: manifest len to dump (otherwise computed automatically)
data_blobs: a list of data blobs to be concatenated to the payload
sigs_data: a binary Signatures message to be concatenated to the payload
padding: stuff to dump past the normal data blobs provided (optional)
"""
manifest = self.manifest.SerializeToString()
if manifest_len < 0:
manifest_len = len(manifest)
self._WriteHeaderToFile(file_obj, manifest_len)
file_obj.write(manifest)
if data_blobs:
for data_blob in data_blobs:
file_obj.write(data_blob)
if sigs_data:
file_obj.write(sigs_data)
if padding:
file_obj.write(padding)
class EnhancedPayloadGenerator(PayloadGenerator):
"""Payload generator with automatic handling of data blobs.
Attributes:
data_blobs: a list of blobs, in the order they were added
curr_offset: the currently consumed offset of blobs added to the payload
"""
def __init__(self):
super(EnhancedPayloadGenerator, self).__init__()
self.data_blobs = []
self.curr_offset = 0
def AddData(self, data_blob):
"""Adds a (possibly orphan) data blob."""
data_length = len(data_blob)
data_offset = self.curr_offset
self.curr_offset += data_length
self.data_blobs.append(data_blob)
return data_length, data_offset
def AddOperationWithData(self, is_kernel, op_type, src_extents=None,
src_length=None, dst_extents=None, dst_length=None,
data_blob=None, do_hash_data_blob=True):
"""Adds an install operation and associated data blob.
This takes care of obtaining a hash of the data blob (if so instructed)
and appending it to the internally maintained list of blobs, including the
necessary offset/length accounting.
Args:
is_kernel: whether this is a kernel (True) or rootfs (False) operation
op_type: one of REPLACE, REPLACE_BZ, REPLACE_XZ, MOVE or BSDIFF
src_extents: list of (start, length) pairs indicating src block ranges
src_length: size of the src data in bytes (needed for BSDIFF)
dst_extents: list of (start, length) pairs indicating dst block ranges
dst_length: size of the dst data in bytes (needed for BSDIFF)
data_blob: a data blob associated with this operation
do_hash_data_blob: whether or not to compute and add a data blob hash
"""
data_offset = data_length = data_sha256_hash = None
if data_blob is not None:
if do_hash_data_blob:
data_sha256_hash = hashlib.sha256(data_blob).digest()
data_length, data_offset = self.AddData(data_blob)
self.AddOperation(is_kernel, op_type, data_offset=data_offset,
data_length=data_length, src_extents=src_extents,
src_length=src_length, dst_extents=dst_extents,
dst_length=dst_length, data_sha256_hash=data_sha256_hash)
def WriteToFileWithData(self, file_obj, sigs_data=None,
privkey_file_name=None,
do_add_pseudo_operation=False,
is_pseudo_in_kernel=False, padding=None):
"""Writes the payload content to a file, optionally signing the content.
Args:
file_obj: a file object open for writing
sigs_data: signatures blob to be appended to the payload (optional;
payload signature fields assumed to be preset by the caller)
privkey_file_name: key used for signing the payload (optional; used only
if explicit signatures blob not provided)
do_add_pseudo_operation: whether a pseudo-operation should be added to
account for the signature blob
is_pseudo_in_kernel: whether the pseudo-operation should be added to
kernel (True) or rootfs (False) operations
padding: stuff to dump past the normal data blobs provided (optional)
Raises:
TestError: if arguments are inconsistent or something goes wrong.
"""
sigs_len = len(sigs_data) if sigs_data else 0
# Do we need to generate a genuine signatures blob?
do_generate_sigs_data = sigs_data is None and privkey_file_name
if do_generate_sigs_data:
# First, sign some arbitrary data to obtain the size of a signature blob.
fake_sig = SignSha256('fake-payload-data', privkey_file_name)
fake_sigs_gen = SignaturesGenerator()
fake_sigs_gen.AddSig(1, fake_sig)
sigs_len = len(fake_sigs_gen.ToBinary())
# Update the payload with proper signature attributes.
self.SetSignatures(self.curr_offset, sigs_len)
# Add a pseudo-operation to account for the signature blob, if requested.
if do_add_pseudo_operation:
if not self.block_size:
raise TestError('cannot add pseudo-operation without knowing the '
'payload block size')
self.AddOperation(
is_pseudo_in_kernel, common.OpType.REPLACE,
data_offset=self.curr_offset, data_length=sigs_len,
dst_extents=[(common.PSEUDO_EXTENT_MARKER,
(sigs_len + self.block_size - 1) / self.block_size)])
if do_generate_sigs_data:
# Once all payload fields are updated, dump and sign it.
temp_payload_file = cStringIO.StringIO()
self.WriteToFile(temp_payload_file, data_blobs=self.data_blobs)
sig = SignSha256(temp_payload_file.getvalue(), privkey_file_name)
sigs_gen = SignaturesGenerator()
sigs_gen.AddSig(1, sig)
sigs_data = sigs_gen.ToBinary()
assert len(sigs_data) == sigs_len, 'signature blob lengths mismatch'
# Dump the whole thing, complete with data and signature blob, to a file.
self.WriteToFile(file_obj, data_blobs=self.data_blobs, sigs_data=sigs_data,
padding=padding)

View File

@@ -0,0 +1,9 @@
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1Bg9BnjWhX3jJyECeXqF
O28nkYTF1NHWLlFHgzAGg+ysva22BL3S5LlsNejnYVg/xzx3izvAQyOF3I1TJVOy
2fH1DoZOWyKuckMyUrFQbO6OV1VIvPUPKckHadWcXSsHj2lBdDPH9xRDEBsXeztf
nAGBD8GlAyTU7iH+Bf+xzyK9k4BmITf4Nx4xWhRZ6gm2Fc2SEP3x5N5fohkLv5ZP
kFr0fj5wUK+0XF95rkGFBLIq2XACS3dmxMFToFl1HMM1HonUg9TAH+3dVH93zue1
y81mkTuGnNX+zYya5ov2kD8zW1V10iTOSJfOlho5T8FpKbG37o3yYcUiyMHKO1Iv
PQIDAQAB
-----END PUBLIC KEY-----

View File

@@ -0,0 +1,631 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: update_metadata.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
DESCRIPTOR = _descriptor.FileDescriptor(
name='update_metadata.proto',
package='chromeos_update_engine',
serialized_pb='\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xe6\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\r\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\r\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xa5\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x08\n\x04MOVE\x10\x02\x12\n\n\x06\x42SDIFF\x10\x03\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x0c\n\x08PUFFDIFF\x10\t\x12\x11\n\rBROTLI_BSDIFF\x10\n\"\xa6\x03\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\"\xc4\x05\n\x14\x44\x65ltaArchiveManifest\x12\x44\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12K\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12>\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdateB\x02H\x03')
_INSTALLOPERATION_TYPE = _descriptor.EnumDescriptor(
name='Type',
full_name='chromeos_update_engine.InstallOperation.Type',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='REPLACE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REPLACE_BZ', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MOVE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BSDIFF', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SOURCE_COPY', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SOURCE_BSDIFF', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ZERO', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DISCARD', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REPLACE_XZ', index=8, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PUFFDIFF', index=9, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BROTLI_BSDIFF', index=10, number=10,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=712,
serialized_end=877,
)
_EXTENT = _descriptor.Descriptor(
name='Extent',
full_name='chromeos_update_engine.Extent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='start_block', full_name='chromeos_update_engine.Extent.start_block', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_blocks', full_name='chromeos_update_engine.Extent.num_blocks', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=49,
serialized_end=98,
)
_SIGNATURES_SIGNATURE = _descriptor.Descriptor(
name='Signature',
full_name='chromeos_update_engine.Signatures.Signature',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='version', full_name='chromeos_update_engine.Signatures.Signature.version', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data', full_name='chromeos_update_engine.Signatures.Signature.data', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=180,
serialized_end=222,
)
_SIGNATURES = _descriptor.Descriptor(
name='Signatures',
full_name='chromeos_update_engine.Signatures',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='signatures', full_name='chromeos_update_engine.Signatures.signatures', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_SIGNATURES_SIGNATURE, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=100,
serialized_end=222,
)
_PARTITIONINFO = _descriptor.Descriptor(
name='PartitionInfo',
full_name='chromeos_update_engine.PartitionInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='size', full_name='chromeos_update_engine.PartitionInfo.size', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hash', full_name='chromeos_update_engine.PartitionInfo.hash', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=224,
serialized_end=267,
)
_IMAGEINFO = _descriptor.Descriptor(
name='ImageInfo',
full_name='chromeos_update_engine.ImageInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='board', full_name='chromeos_update_engine.ImageInfo.board', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key', full_name='chromeos_update_engine.ImageInfo.key', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='channel', full_name='chromeos_update_engine.ImageInfo.channel', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='version', full_name='chromeos_update_engine.ImageInfo.version', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='build_channel', full_name='chromeos_update_engine.ImageInfo.build_channel', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='build_version', full_name='chromeos_update_engine.ImageInfo.build_version', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=269,
serialized_end=388,
)
_INSTALLOPERATION = _descriptor.Descriptor(
name='InstallOperation',
full_name='chromeos_update_engine.InstallOperation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='chromeos_update_engine.InstallOperation.type', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data_offset', full_name='chromeos_update_engine.InstallOperation.data_offset', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data_length', full_name='chromeos_update_engine.InstallOperation.data_length', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='src_extents', full_name='chromeos_update_engine.InstallOperation.src_extents', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='src_length', full_name='chromeos_update_engine.InstallOperation.src_length', index=4,
number=5, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dst_extents', full_name='chromeos_update_engine.InstallOperation.dst_extents', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dst_length', full_name='chromeos_update_engine.InstallOperation.dst_length', index=6,
number=7, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data_sha256_hash', full_name='chromeos_update_engine.InstallOperation.data_sha256_hash', index=7,
number=8, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='src_sha256_hash', full_name='chromeos_update_engine.InstallOperation.src_sha256_hash', index=8,
number=9, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_INSTALLOPERATION_TYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=391,
serialized_end=877,
)
_PARTITIONUPDATE = _descriptor.Descriptor(
name='PartitionUpdate',
full_name='chromeos_update_engine.PartitionUpdate',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='partition_name', full_name='chromeos_update_engine.PartitionUpdate.partition_name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='run_postinstall', full_name='chromeos_update_engine.PartitionUpdate.run_postinstall', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='postinstall_path', full_name='chromeos_update_engine.PartitionUpdate.postinstall_path', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='filesystem_type', full_name='chromeos_update_engine.PartitionUpdate.filesystem_type', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='new_partition_signature', full_name='chromeos_update_engine.PartitionUpdate.new_partition_signature', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='old_partition_info', full_name='chromeos_update_engine.PartitionUpdate.old_partition_info', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='new_partition_info', full_name='chromeos_update_engine.PartitionUpdate.new_partition_info', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='operations', full_name='chromeos_update_engine.PartitionUpdate.operations', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='postinstall_optional', full_name='chromeos_update_engine.PartitionUpdate.postinstall_optional', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=880,
serialized_end=1302,
)
_DELTAARCHIVEMANIFEST = _descriptor.Descriptor(
name='DeltaArchiveManifest',
full_name='chromeos_update_engine.DeltaArchiveManifest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='install_operations', full_name='chromeos_update_engine.DeltaArchiveManifest.install_operations', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='kernel_install_operations', full_name='chromeos_update_engine.DeltaArchiveManifest.kernel_install_operations', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='block_size', full_name='chromeos_update_engine.DeltaArchiveManifest.block_size', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=4096,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='signatures_offset', full_name='chromeos_update_engine.DeltaArchiveManifest.signatures_offset', index=3,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='signatures_size', full_name='chromeos_update_engine.DeltaArchiveManifest.signatures_size', index=4,
number=5, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='old_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_kernel_info', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='new_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_kernel_info', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='old_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_rootfs_info', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='new_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_rootfs_info', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='old_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_image_info', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='new_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_image_info', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='minor_version', full_name='chromeos_update_engine.DeltaArchiveManifest.minor_version', index=11,
number=12, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='partitions', full_name='chromeos_update_engine.DeltaArchiveManifest.partitions', index=12,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1305,
serialized_end=2013,
)
_SIGNATURES_SIGNATURE.containing_type = _SIGNATURES;
_SIGNATURES.fields_by_name['signatures'].message_type = _SIGNATURES_SIGNATURE
_INSTALLOPERATION.fields_by_name['type'].enum_type = _INSTALLOPERATION_TYPE
_INSTALLOPERATION.fields_by_name['src_extents'].message_type = _EXTENT
_INSTALLOPERATION.fields_by_name['dst_extents'].message_type = _EXTENT
_INSTALLOPERATION_TYPE.containing_type = _INSTALLOPERATION;
_PARTITIONUPDATE.fields_by_name['new_partition_signature'].message_type = _SIGNATURES_SIGNATURE
_PARTITIONUPDATE.fields_by_name['old_partition_info'].message_type = _PARTITIONINFO
_PARTITIONUPDATE.fields_by_name['new_partition_info'].message_type = _PARTITIONINFO
_PARTITIONUPDATE.fields_by_name['operations'].message_type = _INSTALLOPERATION
_DELTAARCHIVEMANIFEST.fields_by_name['install_operations'].message_type = _INSTALLOPERATION
_DELTAARCHIVEMANIFEST.fields_by_name['kernel_install_operations'].message_type = _INSTALLOPERATION
_DELTAARCHIVEMANIFEST.fields_by_name['old_kernel_info'].message_type = _PARTITIONINFO
_DELTAARCHIVEMANIFEST.fields_by_name['new_kernel_info'].message_type = _PARTITIONINFO
_DELTAARCHIVEMANIFEST.fields_by_name['old_rootfs_info'].message_type = _PARTITIONINFO
_DELTAARCHIVEMANIFEST.fields_by_name['new_rootfs_info'].message_type = _PARTITIONINFO
_DELTAARCHIVEMANIFEST.fields_by_name['old_image_info'].message_type = _IMAGEINFO
_DELTAARCHIVEMANIFEST.fields_by_name['new_image_info'].message_type = _IMAGEINFO
_DELTAARCHIVEMANIFEST.fields_by_name['partitions'].message_type = _PARTITIONUPDATE
DESCRIPTOR.message_types_by_name['Extent'] = _EXTENT
DESCRIPTOR.message_types_by_name['Signatures'] = _SIGNATURES
DESCRIPTOR.message_types_by_name['PartitionInfo'] = _PARTITIONINFO
DESCRIPTOR.message_types_by_name['ImageInfo'] = _IMAGEINFO
DESCRIPTOR.message_types_by_name['InstallOperation'] = _INSTALLOPERATION
DESCRIPTOR.message_types_by_name['PartitionUpdate'] = _PARTITIONUPDATE
DESCRIPTOR.message_types_by_name['DeltaArchiveManifest'] = _DELTAARCHIVEMANIFEST
class Extent(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _EXTENT
# @@protoc_insertion_point(class_scope:chromeos_update_engine.Extent)
class Signatures(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
class Signature(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _SIGNATURES_SIGNATURE
# @@protoc_insertion_point(class_scope:chromeos_update_engine.Signatures.Signature)
DESCRIPTOR = _SIGNATURES
# @@protoc_insertion_point(class_scope:chromeos_update_engine.Signatures)
class PartitionInfo(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PARTITIONINFO
# @@protoc_insertion_point(class_scope:chromeos_update_engine.PartitionInfo)
class ImageInfo(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _IMAGEINFO
# @@protoc_insertion_point(class_scope:chromeos_update_engine.ImageInfo)
class InstallOperation(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _INSTALLOPERATION
# @@protoc_insertion_point(class_scope:chromeos_update_engine.InstallOperation)
class PartitionUpdate(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PARTITIONUPDATE
# @@protoc_insertion_point(class_scope:chromeos_update_engine.PartitionUpdate)
class DeltaArchiveManifest(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DELTAARCHIVEMANIFEST
# @@protoc_insertion_point(class_scope:chromeos_update_engine.DeltaArchiveManifest)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), 'H\003')
# @@protoc_insertion_point(module_scope)