diff --git a/README.md b/README.md new file mode 100644 index 0000000..1303d30 --- /dev/null +++ b/README.md @@ -0,0 +1,18 @@ +## System requirement + +- Python3, pip +- google protobuf for python `pip3 install protobuf` + +### Full OTA + +- LD_LIBRARY_PATH=./lib64/ ./extract.py --output_dir output/ payload.bin +- This will start to extract the images within the payload.bin file to the output folder you are in. + +### Incremental OTA + +- Copy original images (from full OTA or dumped from devices) to old folder (with part name without file extension, ex: boot, system) +- LD_LIBRARY_PATH=./lib64/ ./extract.py --output_dir output/ --old_dir old/ payload.bin + +NOTE: this has been fixed for Incremental updates. Just ensure you use the ROM that was meant to be PATCHED in the old/ directory +AS THE HASH CHECKS ARE TURNED OFF. The original project never worked because the HASH of the Incremental update and the prior full ROM +always had different signatures. So there is NO ERROR CHECKING HAPPENING. Works as of 8/2021, tested on Op8T. diff --git a/add_img_extension_output.sh b/add_img_extension_output.sh new file mode 100755 index 0000000..f0260e0 --- /dev/null +++ b/add_img_extension_output.sh @@ -0,0 +1,3 @@ +cd output +find . -type f -exec mv '{}' '{}'.img \; +cd .. diff --git a/bspatch b/bspatch new file mode 100755 index 0000000..2fc4a1c Binary files /dev/null and b/bspatch differ diff --git a/extract.py b/extract.py index 266ef0a..b7a7b55 100755 --- a/extract.py +++ b/extract.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 import argparse import errno @@ -18,7 +18,7 @@ def list_content(payload_file_name): part.new_partition_info.size)) -def extract(payload_file_name, output_dir="output", partition_names=None): +def extract(payload_file_name, output_dir="output", old_dir="old", partition_names=None): try: os.makedirs(output_dir) except OSError as e: @@ -29,21 +29,23 @@ def extract(payload_file_name, output_dir="output", partition_names=None): payload = update_payload.Payload(payload_file) payload.Init() - if payload.IsDelta(): - print("Delta payloads are not supported") - exit(1) - helper = applier.PayloadApplier(payload) for part in payload.manifest.partitions: if partition_names and part.partition_name not in partition_names: continue print("Extracting {}".format(part.partition_name)) output_file = os.path.join(output_dir, part.partition_name) - helper._ApplyToPartition( - part.operations, part.partition_name, - 'install_operations', output_file, - part.new_partition_info) - + if payload.IsDelta(): + old_file = os.path.join(old_dir, part.partition_name) + helper._ApplyToPartition( + part.operations, part.partition_name, + 'install_operations', output_file, + part.new_partition_info, old_file, part.old_partition_info) + else: + helper._ApplyToPartition( + part.operations, part.partition_name, + 'install_operations', output_file, + part.new_partition_info) if __name__ == '__main__': parser = argparse.ArgumentParser() @@ -51,6 +53,8 @@ def extract(payload_file_name, output_dir="output", partition_names=None): help="Path to the payload.bin") parser.add_argument("--output_dir", default="output", help="Output directory") + parser.add_argument("--old_dir", default="old", + help="Old directory") parser.add_argument("--partitions", type=str, nargs='+', help="Name of the partitions to extract") parser.add_argument("--list_partitions", action="store_true", @@ -60,4 +64,4 @@ def extract(payload_file_name, output_dir="output", partition_names=None): if args.list_partitions: list_content(args.payload) else: - extract(args.payload, args.output_dir, args.partitions) + extract(args.payload, args.output_dir, args.old_dir, args.partitions) diff --git a/lib64/libbase.so b/lib64/libbase.so new file mode 100755 index 0000000..e3a4205 Binary files /dev/null and b/lib64/libbase.so differ diff --git a/lib64/libbrillo.so b/lib64/libbrillo.so new file mode 100755 index 0000000..92f7d2a Binary files /dev/null and b/lib64/libbrillo.so differ diff --git a/lib64/libc++.so b/lib64/libc++.so new file mode 100755 index 0000000..55121bf Binary files /dev/null and b/lib64/libc++.so differ diff --git a/lib64/libchrome.so b/lib64/libchrome.so new file mode 100755 index 0000000..bda3b0d Binary files /dev/null and b/lib64/libchrome.so differ diff --git a/lib64/libevent-host.so b/lib64/libevent-host.so new file mode 100755 index 0000000..c535095 Binary files /dev/null and b/lib64/libevent-host.so differ diff --git a/lib64/liblog.so b/lib64/liblog.so new file mode 100755 index 0000000..f5d73fd Binary files /dev/null and b/lib64/liblog.so differ diff --git a/lib64/libprotobuf-cpp-lite.so b/lib64/libprotobuf-cpp-lite.so new file mode 100755 index 0000000..9224ecd Binary files /dev/null and b/lib64/libprotobuf-cpp-lite.so differ diff --git a/puffin b/puffin new file mode 100755 index 0000000..ba651e4 Binary files /dev/null and b/puffin differ diff --git a/remove_img_extension_old.sh b/remove_img_extension_old.sh new file mode 100755 index 0000000..b625ea0 --- /dev/null +++ b/remove_img_extension_old.sh @@ -0,0 +1,3 @@ +cd old +for i in ./*.img; do mv -i "$i" "${i%.img}"; done +cd .. diff --git a/update_payload/__init__.py b/update_payload/__init__.py index 8ee95e2..6e77678 100644 --- a/update_payload/__init__.py +++ b/update_payload/__init__.py @@ -17,6 +17,8 @@ """Library for processing, verifying and applying Chrome OS update payloads.""" # Just raise the interface classes to the root namespace. +from __future__ import absolute_import + from update_payload.checker import CHECKS_TO_DISABLE from update_payload.error import PayloadError from update_payload.payload import Payload diff --git a/update_payload/applier.py b/update_payload/applier.py index 9582b3d..314f980 100644 --- a/update_payload/applier.py +++ b/update_payload/applier.py @@ -24,12 +24,12 @@ """ +from __future__ import absolute_import from __future__ import print_function import array import bz2 import hashlib -import itertools # Not everywhere we can have the lzma library so we ignore it if we didn't have # it because it is not going to be used. For example, 'cros flash' uses # devserver code which eventually loads this file, but the lzma library is not @@ -45,7 +45,6 @@ except ImportError: pass import os -import shutil import subprocess import sys import tempfile @@ -53,13 +52,12 @@ from update_payload import common from update_payload.error import PayloadError - # # Helper functions. # def _VerifySha256(file_obj, expected_hash, name, length=-1): """Verifies the SHA256 hash of a file. - + Args: file_obj: file object to read expected_hash: the hash digest we expect to be getting @@ -70,9 +68,10 @@ def _VerifySha256(file_obj, expected_hash, name, length=-1): PayloadError if computed hash doesn't match expected one, or if fails to read the specified length of data. """ + hasher = hashlib.sha256() block_length = 1024 * 1024 - max_length = length if length >= 0 else sys.maxint + max_length = length if length >= 0 else sys.maxsize while max_length > 0: read_length = min(max_length, block_length) @@ -108,20 +107,16 @@ def _ReadExtents(file_obj, extents, block_size, max_length=-1): Returns: A character array containing the concatenated read data. """ - data = array.array('c') + data = array.array('B') if max_length < 0: - max_length = sys.maxint + max_length = sys.maxsize for ex in extents: if max_length == 0: break read_length = min(max_length, ex.num_blocks * block_size) - # Fill with zeros or read from file, depending on the type of extent. - if ex.start_block == common.PSEUDO_EXTENT_MARKER: - data.extend(itertools.repeat('\0', read_length)) - else: - file_obj.seek(ex.start_block * block_size) - data.fromfile(file_obj, read_length) + file_obj.seek(ex.start_block * block_size) + data.fromfile(file_obj, read_length) max_length -= read_length @@ -149,12 +144,8 @@ def _WriteExtents(file_obj, data, extents, block_size, base_name): if not data_length: raise PayloadError('%s: more write extents than data' % ex_name) write_length = min(data_length, ex.num_blocks * block_size) - - # Only do actual writing if this is not a pseudo-extent. - if ex.start_block != common.PSEUDO_EXTENT_MARKER: - file_obj.seek(ex.start_block * block_size) - data_view = buffer(data, data_offset, write_length) - file_obj.write(data_view) + file_obj.seek(ex.start_block * block_size) + file_obj.write(data[data_offset:(data_offset + write_length)]) data_offset += write_length data_length -= write_length @@ -184,20 +175,17 @@ def _ExtentsToBspatchArg(extents, block_size, base_name, data_length=-1): arg = '' pad_off = pad_len = 0 if data_length < 0: - data_length = sys.maxint + data_length = sys.maxsize for ex, ex_name in common.ExtentIter(extents, base_name): if not data_length: raise PayloadError('%s: more extents than total data length' % ex_name) - is_pseudo = ex.start_block == common.PSEUDO_EXTENT_MARKER - start_byte = -1 if is_pseudo else ex.start_block * block_size + start_byte = ex.start_block * block_size num_bytes = ex.num_blocks * block_size if data_length < num_bytes: # We're only padding a real extent. - if not is_pseudo: - pad_off = start_byte + data_length - pad_len = num_bytes - data_length - + pad_off = start_byte + data_length + pad_len = num_bytes - data_length num_bytes = data_length arg += '%s%d:%d' % (arg and ',', start_byte, num_bytes) @@ -219,8 +207,8 @@ class PayloadApplier(object): applying an update payload. """ - def __init__(self, payload, bsdiff_in_place=True, bspatch_path=None, - puffpatch_path=None, truncate_to_expected_size=True): + def __init__(self, payload, bsdiff_in_place=True, bspatch_path="./bspatch", + puffpatch_path="./puffin", truncate_to_expected_size=True): """Initialize the applier. Args: @@ -274,30 +262,28 @@ def _ApplyReplaceOperation(self, op, op_name, out_data, part_file, part_size): num_blocks = ex.num_blocks count = num_blocks * block_size - # Make sure it's not a fake (signature) operation. - if start_block != common.PSEUDO_EXTENT_MARKER: - data_end = data_start + count + data_end = data_start + count - # Make sure we're not running past partition boundary. - if (start_block + num_blocks) * block_size > part_size: - raise PayloadError( - '%s: extent (%s) exceeds partition size (%d)' % - (ex_name, common.FormatExtent(ex, block_size), - part_size)) + # Make sure we're not running past partition boundary. + if (start_block + num_blocks) * block_size > part_size: + raise PayloadError( + '%s: extent (%s) exceeds partition size (%d)' % + (ex_name, common.FormatExtent(ex, block_size), + part_size)) - # Make sure that we have enough data to write. - if data_end >= data_length + block_size: - raise PayloadError( - '%s: more dst blocks than data (even with padding)') + # Make sure that we have enough data to write. + if data_end >= data_length + block_size: + raise PayloadError( + '%s: more dst blocks than data (even with padding)') - # Pad with zeros if necessary. - if data_end > data_length: - padding = data_end - data_length - out_data += '\0' * padding + # Pad with zeros if necessary. + if data_end > data_length: + padding = data_end - data_length + out_data += b'\0' * padding - self.payload.payload_file.seek(start_block * block_size) - part_file.seek(start_block * block_size) - part_file.write(out_data[data_start:data_end]) + self.payload.payload_file.seek(start_block * block_size) + part_file.seek(start_block * block_size) + part_file.write(out_data[data_start:data_end]) data_start += count @@ -306,30 +292,6 @@ def _ApplyReplaceOperation(self, op, op_name, out_data, part_file, part_size): raise PayloadError('%s: wrote fewer bytes (%d) than expected (%d)' % (op_name, data_start, data_length)) - def _ApplyMoveOperation(self, op, op_name, part_file): - """Applies a MOVE operation. - - Note that this operation must read the whole block data from the input and - only then dump it, due to our in-place update semantics; otherwise, it - might clobber data midway through. - - Args: - op: the operation object - op_name: name string for error reporting - part_file: the partition file object - - Raises: - PayloadError if something goes wrong. - """ - block_size = self.block_size - - # Gather input raw data from src extents. - in_data = _ReadExtents(part_file, op.src_extents, block_size) - - # Dump extracted data to dst extents. - _WriteExtents(part_file, in_data, op.dst_extents, block_size, - '%s.dst_extents' % op_name) - def _ApplyZeroOperation(self, op, op_name, part_file): """Applies a ZERO operation. @@ -347,10 +309,8 @@ def _ApplyZeroOperation(self, op, op_name, part_file): # Iterate over the extents and write zero. # pylint: disable=unused-variable for ex, ex_name in common.ExtentIter(op.dst_extents, base_name): - # Only do actual writing if this is not a pseudo-extent. - if ex.start_block != common.PSEUDO_EXTENT_MARKER: - part_file.seek(ex.start_block * block_size) - part_file.write('\0' * (ex.num_blocks * block_size)) + part_file.seek(ex.start_block * block_size) + part_file.write(b'\0' * (ex.num_blocks * block_size)) def _ApplySourceCopyOperation(self, op, op_name, old_part_file, new_part_file): @@ -435,16 +395,23 @@ def _ApplyDiffOperation(self, op, op_name, patch_data, old_part_file, data_length=op.dst_length if op.dst_length else self._BytesInExtents(op.dst_extents, "%s.dst_extents")) - new_file_name = '/dev/fd/%d' % new_part_file.fileno() + new_file_name = new_part_file.name # Diff from source partition. - old_file_name = '/dev/fd/%d' % old_part_file.fileno() + old_file_name = old_part_file.name + + # In python3, file descriptors(fd) are not passed to child processes by + # default. To pass the fds to the child processes, we need to set the flag + # 'inheritable' in the fds and make the subprocess calls with the argument + # close_fds set to False. + if sys.version_info.major >= 3: + os.set_inheritable(new_part_file.fileno(), True) + os.set_inheritable(old_part_file.fileno(), True) - if op.type in (common.OpType.BSDIFF, common.OpType.SOURCE_BSDIFF, - common.OpType.BROTLI_BSDIFF): + if op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.BROTLI_BSDIFF): # Invoke bspatch on partition file with extents args. bspatch_cmd = [self.bspatch_path, old_file_name, new_file_name, patch_file_name, in_extents_arg, out_extents_arg] - subprocess.check_call(bspatch_cmd) + subprocess.check_call(bspatch_cmd, close_fds=False) elif op.type == common.OpType.PUFFDIFF: # Invoke puffpatch on partition file with extents args. puffpatch_cmd = [self.puffpatch_path, @@ -454,14 +421,14 @@ def _ApplyDiffOperation(self, op, op_name, patch_data, old_part_file, "--patch_file=%s" % patch_file_name, "--src_extents=%s" % in_extents_arg, "--dst_extents=%s" % out_extents_arg] - subprocess.check_call(puffpatch_cmd) + subprocess.check_call(puffpatch_cmd, close_fds=False) else: - raise PayloadError("Unknown operation %s", op.type) + raise PayloadError("Unknown operation %s" % op.type) # Pad with zeros past the total output length. if pad_len: new_part_file.seek(pad_off) - new_part_file.write('\0' * pad_len) + new_part_file.write(b'\0' * pad_len) else: # Gather input raw data and write to a temp file. input_part_file = old_part_file if old_part_file else new_part_file @@ -477,8 +444,7 @@ def _ApplyDiffOperation(self, op, op_name, patch_data, old_part_file, with tempfile.NamedTemporaryFile(delete=False) as out_file: out_file_name = out_file.name - if op.type in (common.OpType.BSDIFF, common.OpType.SOURCE_BSDIFF, - common.OpType.BROTLI_BSDIFF): + if op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.BROTLI_BSDIFF): # Invoke bspatch. bspatch_cmd = [self.bspatch_path, in_file_name, out_file_name, patch_file_name] @@ -492,7 +458,7 @@ def _ApplyDiffOperation(self, op, op_name, patch_data, old_part_file, "--patch_file=%s" % patch_file_name] subprocess.check_call(puffpatch_cmd) else: - raise PayloadError("Unknown operation %s", op.type) + raise PayloadError("Unknown operation %s" % op.type) # Read output. with open(out_file_name, 'rb') as out_file: @@ -505,7 +471,7 @@ def _ApplyDiffOperation(self, op, op_name, patch_data, old_part_file, # Write output back to partition, with padding. unaligned_out_len = len(out_data) % block_size if unaligned_out_len: - out_data += '\0' * (block_size - unaligned_out_len) + out_data += b'\0' * (block_size - unaligned_out_len) _WriteExtents(new_part_file, out_data, op.dst_extents, block_size, '%s.dst_extents' % op_name) @@ -520,10 +486,6 @@ def _ApplyOperations(self, operations, base_name, old_part_file, new_part_file, part_size): """Applies a sequence of update operations to a partition. - This assumes an in-place update semantics for MOVE and BSDIFF, namely all - reads are performed first, then the data is processed and written back to - the same file. - Args: operations: the sequence of operations base_name: the name of the operation sequence @@ -541,13 +503,8 @@ def _ApplyOperations(self, operations, base_name, old_part_file, if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ, common.OpType.REPLACE_XZ): self._ApplyReplaceOperation(op, op_name, data, new_part_file, part_size) - elif op.type == common.OpType.MOVE: - self._ApplyMoveOperation(op, op_name, new_part_file) elif op.type == common.OpType.ZERO: self._ApplyZeroOperation(op, op_name, new_part_file) - elif op.type == common.OpType.BSDIFF: - self._ApplyDiffOperation(op, op_name, data, new_part_file, - new_part_file) elif op.type == common.OpType.SOURCE_COPY: self._ApplySourceCopyOperation(op, op_name, old_part_file, new_part_file) @@ -579,22 +536,12 @@ def _ApplyToPartition(self, operations, part_name, base_name, # Do we have a source partition? if old_part_file_name: # Verify the source partition. - with open(old_part_file_name, 'rb') as old_part_file: - _VerifySha256(old_part_file, old_part_info.hash, - 'old ' + part_name, length=old_part_info.size) + # with open(old_part_file_name, 'rb') as old_part_file: + # _VerifySha256(old_part_file, old_part_info.hash, + # 'old ' + part_name, length=old_part_info.size) new_part_file_mode = 'r+b' - if self.minor_version == common.INPLACE_MINOR_PAYLOAD_VERSION: - # Copy the src partition to the dst one; make sure we don't truncate it. - shutil.copyfile(old_part_file_name, new_part_file_name) - elif (self.minor_version == common.SOURCE_MINOR_PAYLOAD_VERSION or - self.minor_version == common.OPSRCHASH_MINOR_PAYLOAD_VERSION or - self.minor_version == common.BROTLI_BSDIFF_MINOR_PAYLOAD_VERSION or - self.minor_version == common.PUFFDIFF_MINOR_PAYLOAD_VERSION): - # In minor version >= 2, we don't want to copy the partitions, so - # instead just make the new partition file. - open(new_part_file_name, 'w').close() - else: - raise PayloadError("Unknown minor version: %d" % self.minor_version) + open(new_part_file_name, 'w').close() + else: # We need to create/truncate the dst partition file. new_part_file_mode = 'w+b' @@ -618,50 +565,58 @@ def _ApplyToPartition(self, operations, part_name, base_name, new_part_file.truncate() # Verify the resulting partition. - with open(new_part_file_name, 'rb') as new_part_file: - _VerifySha256(new_part_file, new_part_info.hash, - 'new ' + part_name, length=new_part_info.size) + # with open(new_part_file_name, 'rb') as new_part_file: + # _VerifySha256(new_part_file, new_part_info.hash, + # 'new ' + part_name, length=new_part_info.size) - def Run(self, new_kernel_part, new_rootfs_part, old_kernel_part=None, - old_rootfs_part=None): + def Run(self, new_parts, old_parts=None): """Applier entry point, invoking all update operations. Args: - new_kernel_part: name of dest kernel partition file - new_rootfs_part: name of dest rootfs partition file - old_kernel_part: name of source kernel partition file (optional) - old_rootfs_part: name of source rootfs partition file (optional) + new_parts: map of partition name to dest partition file + old_parts: map of partition name to source partition file (optional) Raises: PayloadError if payload application failed. """ + if old_parts is None: + old_parts = {} + self.payload.ResetFile() + new_part_info = {} + old_part_info = {} + install_operations = [] + + manifest = self.payload.manifest + for part in manifest.partitions: + name = part.partition_name + new_part_info[name] = part.new_partition_info + old_part_info[name] = part.old_partition_info + install_operations.append((name, part.operations)) + + part_names = set(new_part_info.keys()) # Equivalently, old_part_info.keys() + # Make sure the arguments are sane and match the payload. - if not (new_kernel_part and new_rootfs_part): - raise PayloadError('missing dst {kernel,rootfs} partitions') - - if not (old_kernel_part or old_rootfs_part): - if not self.payload.IsFull(): - raise PayloadError('trying to apply a non-full update without src ' - '{kernel,rootfs} partitions') - elif old_kernel_part and old_rootfs_part: - if not self.payload.IsDelta(): - raise PayloadError('trying to apply a non-delta update onto src ' - '{kernel,rootfs} partitions') + new_part_names = set(new_parts.keys()) + if new_part_names != part_names: + raise PayloadError('missing dst partition(s) %s' % + ', '.join(part_names - new_part_names)) + + old_part_names = set(old_parts.keys()) + if part_names - old_part_names: + if self.payload.IsDelta(): + raise PayloadError('trying to apply a delta update without src ' + 'partition(s) %s' % + ', '.join(part_names - old_part_names)) + elif old_part_names == part_names: + if self.payload.IsFull(): + raise PayloadError('trying to apply a full update onto src partitions') else: raise PayloadError('not all src partitions provided') - # Apply update to rootfs. - self._ApplyToPartition( - self.payload.manifest.install_operations, 'rootfs', - 'install_operations', new_rootfs_part, - self.payload.manifest.new_rootfs_info, old_rootfs_part, - self.payload.manifest.old_rootfs_info) - - # Apply update to kernel update. - self._ApplyToPartition( - self.payload.manifest.kernel_install_operations, 'kernel', - 'kernel_install_operations', new_kernel_part, - self.payload.manifest.new_kernel_info, old_kernel_part, - self.payload.manifest.old_kernel_info) + for name, operations in install_operations: + # Apply update to partition. + self._ApplyToPartition( + operations, name, '%s_install_operations' % name, new_parts[name], + new_part_info[name], old_parts.get(name, None), old_part_info[name]) diff --git a/update_payload/checker.py b/update_payload/checker.py index e241b0b..4c65516 100644 --- a/update_payload/checker.py +++ b/update_payload/checker.py @@ -24,31 +24,32 @@ checker.Run(...) """ +from __future__ import absolute_import from __future__ import print_function import array import base64 +import collections import hashlib import itertools import os import subprocess +from six.moves import range + from update_payload import common from update_payload import error from update_payload import format_utils from update_payload import histogram from update_payload import update_metadata_pb2 - # # Constants. # -_CHECK_DST_PSEUDO_EXTENTS = 'dst-pseudo-extents' _CHECK_MOVE_SAME_SRC_DST_BLOCK = 'move-same-src-dst-block' _CHECK_PAYLOAD_SIG = 'payload-sig' CHECKS_TO_DISABLE = ( - _CHECK_DST_PSEUDO_EXTENTS, _CHECK_MOVE_SAME_SRC_DST_BLOCK, _CHECK_PAYLOAD_SIG, ) @@ -65,14 +66,13 @@ # Supported minor version map to payload types allowed to be using them. _SUPPORTED_MINOR_VERSIONS = { 0: (_TYPE_FULL,), - 1: (_TYPE_DELTA,), 2: (_TYPE_DELTA,), 3: (_TYPE_DELTA,), 4: (_TYPE_DELTA,), 5: (_TYPE_DELTA,), + 6: (_TYPE_DELTA,), } -_OLD_DELTA_USABLE_PART_SIZE = 2 * 1024 * 1024 * 1024 # # Helper functions. @@ -321,8 +321,6 @@ def __init__(self, payload, assert_type=None, block_size=0, self.allow_unhashed = allow_unhashed # Disable specific tests. - self.check_dst_pseudo_extents = ( - _CHECK_DST_PSEUDO_EXTENTS not in disabled_tests) self.check_move_same_src_dst_block = ( _CHECK_MOVE_SAME_SRC_DST_BLOCK not in disabled_tests) self.check_payload_sig = _CHECK_PAYLOAD_SIG not in disabled_tests @@ -330,15 +328,12 @@ def __init__(self, payload, assert_type=None, block_size=0, # Reset state; these will be assigned when the manifest is checked. self.sigs_offset = 0 self.sigs_size = 0 - self.old_rootfs_fs_size = 0 - self.old_kernel_fs_size = 0 - self.new_rootfs_fs_size = 0 - self.new_kernel_fs_size = 0 + self.old_part_info = {} + self.new_part_info = {} + self.new_fs_sizes = collections.defaultdict(int) + self.old_fs_sizes = collections.defaultdict(int) self.minor_version = None - # TODO(*): When fixing crbug.com/794404, the major version should be - # correclty handled in update_payload scripts. So stop forcing - # major_verions=1 here and set it to the correct value. - self.major_version = 1 + self.major_version = None @staticmethod def _CheckElem(msg, name, report, is_mandatory, is_submsg, convert=str, @@ -368,22 +363,56 @@ def _CheckElem(msg, name, report, is_mandatory, is_submsg, convert=str, Raises: error.PayloadError if a mandatory element is missing. """ + element_result = collections.namedtuple('element_result', ['msg', 'report']) + if not msg.HasField(name): if is_mandatory: raise error.PayloadError('%smissing mandatory %s %r.' % (msg_name + ' ' if msg_name else '', 'sub-message' if is_submsg else 'field', name)) - return None, None + return element_result(None, None) value = getattr(msg, name) if is_submsg: - return value, report and report.AddSubReport(name) + return element_result(value, report and report.AddSubReport(name)) else: if report: report.AddField(name, convert(value), linebreak=linebreak, indent=indent) - return value, None + return element_result(value, None) + + @staticmethod + def _CheckRepeatedElemNotPresent(msg, field_name, msg_name): + """Checks that a repeated element is not specified in the message. + + Args: + msg: The message containing the element. + field_name: The name of the element. + msg_name: The name of the message object (for error reporting). + + Raises: + error.PayloadError if the repeated element is present or non-empty. + """ + if getattr(msg, field_name, None): + raise error.PayloadError('%sfield %r not empty.' % + (msg_name + ' ' if msg_name else '', field_name)) + + @staticmethod + def _CheckElemNotPresent(msg, field_name, msg_name): + """Checks that an element is not specified in the message. + + Args: + msg: The message containing the element. + field_name: The name of the element. + msg_name: The name of the message object (for error reporting). + + Raises: + error.PayloadError if the repeated element is present. + """ + if msg.HasField(field_name): + raise error.PayloadError('%sfield %r exists.' % + (msg_name + ' ' if msg_name else '', field_name)) @staticmethod def _CheckMandatoryField(msg, field_name, report, msg_name, convert=str, @@ -432,6 +461,22 @@ def _CheckPresentIff(val1, val2, name1, name2, obj_name): (present, missing, ' in ' + obj_name if obj_name else '')) + @staticmethod + def _CheckPresentIffMany(vals, name, obj_name): + """Checks that a set of vals and names imply every other element. + + Args: + vals: The set of values to be compared. + name: The name of the objects holding the corresponding value. + obj_name: Name of the object containing these values. + + Raises: + error.PayloadError if assertion does not hold. + """ + if any(vals) and not all(vals): + raise error.PayloadError('%r is not present in all values%s.' % + (name, ' in ' + obj_name if obj_name else '')) + @staticmethod def _Run(cmd, send_data=None): """Runs a subprocess, returns its output. @@ -544,13 +589,12 @@ def _CheckManifestMinorVersion(self, report): raise error.PayloadError('Unsupported minor version: %d' % self.minor_version) - def _CheckManifest(self, report, rootfs_part_size=0, kernel_part_size=0): + def _CheckManifest(self, report, part_sizes=None): """Checks the payload manifest. Args: report: A report object to add to. - rootfs_part_size: Size of the rootfs partition in bytes. - kernel_part_size: Size of the kernel partition in bytes. + part_sizes: Map of partition label to partition size in bytes. Returns: A tuple consisting of the partition block size used during the update @@ -559,6 +603,9 @@ def _CheckManifest(self, report, rootfs_part_size=0, kernel_part_size=0): Raises: error.PayloadError if any of the checks fail. """ + self.major_version = self.payload.header.version + + part_sizes = part_sizes or collections.defaultdict(int) manifest = self.payload.manifest report.AddSection('manifest') @@ -577,39 +624,45 @@ def _CheckManifest(self, report, rootfs_part_size=0, kernel_part_size=0): self._CheckPresentIff(self.sigs_offset, self.sigs_size, 'signatures_offset', 'signatures_size', 'manifest') - # Check: old_kernel_info <==> old_rootfs_info. - oki_msg, oki_report = self._CheckOptionalSubMsg(manifest, - 'old_kernel_info', report) - ori_msg, ori_report = self._CheckOptionalSubMsg(manifest, - 'old_rootfs_info', report) - self._CheckPresentIff(oki_msg, ori_msg, 'old_kernel_info', - 'old_rootfs_info', 'manifest') - if oki_msg: # equivalently, ori_msg + for part in manifest.partitions: + name = part.partition_name + self.old_part_info[name] = self._CheckOptionalSubMsg( + part, 'old_partition_info', report) + self.new_part_info[name] = self._CheckMandatorySubMsg( + part, 'new_partition_info', report, 'manifest.partitions') + + # Check: Old-style partition infos should not be specified. + for _, part in common.CROS_PARTITIONS: + self._CheckElemNotPresent(manifest, 'old_%s_info' % part, 'manifest') + self._CheckElemNotPresent(manifest, 'new_%s_info' % part, 'manifest') + + # Check: If old_partition_info is specified anywhere, it must be + # specified everywhere. + old_part_msgs = [part.msg for part in self.old_part_info.values() if part] + self._CheckPresentIffMany(old_part_msgs, 'old_partition_info', + 'manifest.partitions') + + is_delta = any(part and part.msg for part in self.old_part_info.values()) + if is_delta: # Assert/mark delta payload. if self.payload_type == _TYPE_FULL: raise error.PayloadError( 'Apparent full payload contains old_{kernel,rootfs}_info.') self.payload_type = _TYPE_DELTA - # Check: {size, hash} present in old_{kernel,rootfs}_info. - self.old_kernel_fs_size = self._CheckMandatoryField( - oki_msg, 'size', oki_report, 'old_kernel_info') - self._CheckMandatoryField(oki_msg, 'hash', oki_report, 'old_kernel_info', - convert=common.FormatSha256) - self.old_rootfs_fs_size = self._CheckMandatoryField( - ori_msg, 'size', ori_report, 'old_rootfs_info') - self._CheckMandatoryField(ori_msg, 'hash', ori_report, 'old_rootfs_info', - convert=common.FormatSha256) + for part, (msg, part_report) in self.old_part_info.items(): + # Check: {size, hash} present in old_{kernel,rootfs}_info. + field = 'old_%s_info' % part + self.old_fs_sizes[part] = self._CheckMandatoryField(msg, 'size', + part_report, field) + self._CheckMandatoryField(msg, 'hash', part_report, field, + convert=common.FormatSha256) - # Check: old_{kernel,rootfs} size must fit in respective partition. - if kernel_part_size and self.old_kernel_fs_size > kernel_part_size: - raise error.PayloadError( - 'Old kernel content (%d) exceed partition size (%d).' % - (self.old_kernel_fs_size, kernel_part_size)) - if rootfs_part_size and self.old_rootfs_fs_size > rootfs_part_size: - raise error.PayloadError( - 'Old rootfs content (%d) exceed partition size (%d).' % - (self.old_rootfs_fs_size, rootfs_part_size)) + # Check: old_{kernel,rootfs} size must fit in respective partition. + if self.old_fs_sizes[part] > part_sizes[part] > 0: + raise error.PayloadError( + 'Old %s content (%d) exceed partition size (%d).' % + (part, self.old_fs_sizes[part], part_sizes[part])) else: # Assert/mark full payload. if self.payload_type == _TYPE_DELTA: @@ -617,31 +670,19 @@ def _CheckManifest(self, report, rootfs_part_size=0, kernel_part_size=0): 'Apparent delta payload missing old_{kernel,rootfs}_info.') self.payload_type = _TYPE_FULL - # Check: new_kernel_info present; contains {size, hash}. - nki_msg, nki_report = self._CheckMandatorySubMsg( - manifest, 'new_kernel_info', report, 'manifest') - self.new_kernel_fs_size = self._CheckMandatoryField( - nki_msg, 'size', nki_report, 'new_kernel_info') - self._CheckMandatoryField(nki_msg, 'hash', nki_report, 'new_kernel_info', - convert=common.FormatSha256) - - # Check: new_rootfs_info present; contains {size, hash}. - nri_msg, nri_report = self._CheckMandatorySubMsg( - manifest, 'new_rootfs_info', report, 'manifest') - self.new_rootfs_fs_size = self._CheckMandatoryField( - nri_msg, 'size', nri_report, 'new_rootfs_info') - self._CheckMandatoryField(nri_msg, 'hash', nri_report, 'new_rootfs_info', - convert=common.FormatSha256) - - # Check: new_{kernel,rootfs} size must fit in respective partition. - if kernel_part_size and self.new_kernel_fs_size > kernel_part_size: - raise error.PayloadError( - 'New kernel content (%d) exceed partition size (%d).' % - (self.new_kernel_fs_size, kernel_part_size)) - if rootfs_part_size and self.new_rootfs_fs_size > rootfs_part_size: - raise error.PayloadError( - 'New rootfs content (%d) exceed partition size (%d).' % - (self.new_rootfs_fs_size, rootfs_part_size)) + # Check: new_{kernel,rootfs}_info present; contains {size, hash}. + for part, (msg, part_report) in self.new_part_info.items(): + field = 'new_%s_info' % part + self.new_fs_sizes[part] = self._CheckMandatoryField(msg, 'size', + part_report, field) + self._CheckMandatoryField(msg, 'hash', part_report, field, + convert=common.FormatSha256) + + # Check: new_{kernel,rootfs} size must fit in respective partition. + if self.new_fs_sizes[part] > part_sizes[part] > 0: + raise error.PayloadError( + 'New %s content (%d) exceed partition size (%d).' % + (part, self.new_fs_sizes[part], part_sizes[part])) # Check: minor_version makes sense for the payload type. This check should # run after the payload type has been set. @@ -667,8 +708,7 @@ def _CheckLength(self, length, total_blocks, op_name, length_name): self._CheckBlocksFitLength(length, total_blocks, self.block_size, '%s: %s' % (op_name, length_name)) - def _CheckExtents(self, extents, usable_size, block_counters, name, - allow_pseudo=False, allow_signature=False): + def _CheckExtents(self, extents, usable_size, block_counters, name): """Checks a sequence of extents. Args: @@ -676,8 +716,6 @@ def _CheckExtents(self, extents, usable_size, block_counters, name, usable_size: The usable size of the partition to which the extents apply. block_counters: Array of counters corresponding to the number of blocks. name: The name of the extent block. - allow_pseudo: Whether or not pseudo block numbers are allowed. - allow_signature: Whether or not the extents are used for a signature. Returns: The total number of blocks in the extents. @@ -698,20 +736,15 @@ def _CheckExtents(self, extents, usable_size, block_counters, name, if num_blocks == 0: raise error.PayloadError('%s: extent length is zero.' % ex_name) - if start_block != common.PSEUDO_EXTENT_MARKER: - # Check: Make sure we're within the partition limit. - if usable_size and end_block * self.block_size > usable_size: - raise error.PayloadError( - '%s: extent (%s) exceeds usable partition size (%d).' % - (ex_name, common.FormatExtent(ex, self.block_size), usable_size)) + # Check: Make sure we're within the partition limit. + if usable_size and end_block * self.block_size > usable_size: + raise error.PayloadError( + '%s: extent (%s) exceeds usable partition size (%d).' % + (ex_name, common.FormatExtent(ex, self.block_size), usable_size)) - # Record block usage. - for i in xrange(start_block, end_block): - block_counters[i] += 1 - elif not (allow_pseudo or (allow_signature and len(extents) == 1)): - # Pseudo-extents must be allowed explicitly, or otherwise be part of a - # signature operation (in which case there has to be exactly one). - raise error.PayloadError('%s: unexpected pseudo-extent.' % ex_name) + # Record block usage. + for i in range(start_block, end_block): + block_counters[i] += 1 total_num_blocks += num_blocks @@ -729,6 +762,11 @@ def _CheckReplaceOperation(self, op, data_length, total_dst_blocks, op_name): Raises: error.PayloadError if any check fails. """ + # Check: total_dst_blocks is not a floating point. + if isinstance(total_dst_blocks, float): + raise error.PayloadError('%s: contains invalid data type of ' + 'total_dst_blocks.' % op_name) + # Check: Does not contain src extents. if op.src_extents: raise error.PayloadError('%s: contains src_extents.' % op_name) @@ -742,96 +780,13 @@ def _CheckReplaceOperation(self, op, data_length, total_dst_blocks, op_name): self.block_size, op_name + '.data_length', 'dst') else: - # Check: data_length must be smaller than the alotted dst blocks. + # Check: data_length must be smaller than the allotted dst blocks. if data_length >= total_dst_blocks * self.block_size: raise error.PayloadError( '%s: data_length (%d) must be less than allotted dst block ' 'space (%d * %d).' % (op_name, data_length, total_dst_blocks, self.block_size)) - def _CheckMoveOperation(self, op, data_offset, total_src_blocks, - total_dst_blocks, op_name): - """Specific checks for MOVE operations. - - Args: - op: The operation object from the manifest. - data_offset: The offset of a data blob for the operation. - total_src_blocks: Total number of blocks in src_extents. - total_dst_blocks: Total number of blocks in dst_extents. - op_name: Operation name for error reporting. - - Raises: - error.PayloadError if any check fails. - """ - # Check: No data_{offset,length}. - if data_offset is not None: - raise error.PayloadError('%s: contains data_{offset,length}.' % op_name) - - # Check: total_src_blocks == total_dst_blocks. - if total_src_blocks != total_dst_blocks: - raise error.PayloadError( - '%s: total src blocks (%d) != total dst blocks (%d).' % - (op_name, total_src_blocks, total_dst_blocks)) - - # Check: For all i, i-th src block index != i-th dst block index. - i = 0 - src_extent_iter = iter(op.src_extents) - dst_extent_iter = iter(op.dst_extents) - src_extent = dst_extent = None - src_idx = src_num = dst_idx = dst_num = 0 - while i < total_src_blocks: - # Get the next source extent, if needed. - if not src_extent: - try: - src_extent = src_extent_iter.next() - except StopIteration: - raise error.PayloadError('%s: ran out of src extents (%d/%d).' % - (op_name, i, total_src_blocks)) - src_idx = src_extent.start_block - src_num = src_extent.num_blocks - - # Get the next dest extent, if needed. - if not dst_extent: - try: - dst_extent = dst_extent_iter.next() - except StopIteration: - raise error.PayloadError('%s: ran out of dst extents (%d/%d).' % - (op_name, i, total_dst_blocks)) - dst_idx = dst_extent.start_block - dst_num = dst_extent.num_blocks - - # Check: start block is not 0. See crbug/480751; there are still versions - # of update_engine which fail when seeking to 0 in PReadAll and PWriteAll, - # so we need to fail payloads that try to MOVE to/from block 0. - if src_idx == 0 or dst_idx == 0: - raise error.PayloadError( - '%s: MOVE operation cannot have extent with start block 0' % - op_name) - - if self.check_move_same_src_dst_block and src_idx == dst_idx: - raise error.PayloadError( - '%s: src/dst block number %d is the same (%d).' % - (op_name, i, src_idx)) - - advance = min(src_num, dst_num) - i += advance - - src_idx += advance - src_num -= advance - if src_num == 0: - src_extent = None - - dst_idx += advance - dst_num -= advance - if dst_num == 0: - dst_extent = None - - # Make sure we've exhausted all src/dst extents. - if src_extent: - raise error.PayloadError('%s: excess src blocks.' % op_name) - if dst_extent: - raise error.PayloadError('%s: excess dst blocks.' % op_name) - def _CheckZeroOperation(self, op, op_name): """Specific checks for ZERO operations. @@ -851,7 +806,7 @@ def _CheckZeroOperation(self, op, op_name): raise error.PayloadError('%s: contains data_offset.' % op_name) def _CheckAnyDiffOperation(self, op, data_length, total_dst_blocks, op_name): - """Specific checks for BSDIFF, SOURCE_BSDIFF, PUFFDIFF and BROTLI_BSDIFF + """Specific checks for SOURCE_BSDIFF, PUFFDIFF and BROTLI_BSDIFF operations. Args: @@ -867,7 +822,7 @@ def _CheckAnyDiffOperation(self, op, data_length, total_dst_blocks, op_name): if data_length is None: raise error.PayloadError('%s: missing data_{offset,length}.' % op_name) - # Check: data_length is strictly smaller than the alotted dst blocks. + # Check: data_length is strictly smaller than the allotted dst blocks. if data_length >= total_dst_blocks * self.block_size: raise error.PayloadError( '%s: data_length (%d) must be smaller than allotted dst space ' @@ -876,8 +831,7 @@ def _CheckAnyDiffOperation(self, op, data_length, total_dst_blocks, op_name): total_dst_blocks * self.block_size)) # Check the existence of src_length and dst_length for legacy bsdiffs. - if (op.type == common.OpType.BSDIFF or - (op.type == common.OpType.SOURCE_BSDIFF and self.minor_version <= 3)): + if op.type == common.OpType.SOURCE_BSDIFF and self.minor_version <= 3: if not op.HasField('src_length') or not op.HasField('dst_length'): raise error.PayloadError('%s: require {src,dst}_length.' % op_name) else: @@ -926,21 +880,19 @@ def _CheckAnySourceOperation(self, op, total_src_blocks, op_name): if self.minor_version >= 3 and op.src_sha256_hash is None: raise error.PayloadError('%s: source hash missing.' % op_name) - def _CheckOperation(self, op, op_name, is_last, old_block_counters, - new_block_counters, old_usable_size, new_usable_size, - prev_data_offset, allow_signature, blob_hash_counts): + def _CheckOperation(self, op, op_name, old_block_counters, new_block_counters, + old_usable_size, new_usable_size, prev_data_offset, + blob_hash_counts): """Checks a single update operation. Args: op: The operation object. op_name: Operation name string for error reporting. - is_last: Whether this is the last operation in the sequence. old_block_counters: Arrays of block read counters. new_block_counters: Arrays of block write counters. old_usable_size: The overall usable size for src data in bytes. new_usable_size: The overall usable size for dst data in bytes. prev_data_offset: Offset of last used data bytes. - allow_signature: Whether this may be a signature operation. blob_hash_counts: Counters for hashed/unhashed blobs. Returns: @@ -952,14 +904,10 @@ def _CheckOperation(self, op, op_name, is_last, old_block_counters, # Check extents. total_src_blocks = self._CheckExtents( op.src_extents, old_usable_size, old_block_counters, - op_name + '.src_extents', allow_pseudo=True) - allow_signature_in_extents = (allow_signature and is_last and - op.type == common.OpType.REPLACE) + op_name + '.src_extents') total_dst_blocks = self._CheckExtents( op.dst_extents, new_usable_size, new_block_counters, - op_name + '.dst_extents', - allow_pseudo=(not self.check_dst_pseudo_extents), - allow_signature=allow_signature_in_extents) + op_name + '.dst_extents') # Check: data_offset present <==> data_length present. data_offset = self._CheckOptionalField(op, 'data_offset', None) @@ -995,9 +943,7 @@ def _CheckOperation(self, op, op_name, is_last, old_block_counters, (op_name, common.FormatSha256(op.data_sha256_hash), common.FormatSha256(actual_hash.digest()))) elif data_offset is not None: - if allow_signature_in_extents: - blob_hash_counts['signature'] += 1 - elif self.allow_unhashed: + if self.allow_unhashed: blob_hash_counts['unhashed'] += 1 else: raise error.PayloadError('%s: unhashed operation not allowed.' % @@ -1011,18 +957,11 @@ def _CheckOperation(self, op, op_name, is_last, old_block_counters, (op_name, data_offset, prev_data_offset)) # Type-specific checks. - if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ): - self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name) - elif op.type == common.OpType.REPLACE_XZ and (self.minor_version >= 3 or - self.major_version >= 2): + if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ, + common.OpType.REPLACE_XZ): self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name) - elif op.type == common.OpType.MOVE and self.minor_version == 1: - self._CheckMoveOperation(op, data_offset, total_src_blocks, - total_dst_blocks, op_name) elif op.type == common.OpType.ZERO and self.minor_version >= 4: self._CheckZeroOperation(op, op_name) - elif op.type == common.OpType.BSDIFF and self.minor_version == 1: - self._CheckAnyDiffOperation(op, data_length, total_dst_blocks, op_name) elif op.type == common.OpType.SOURCE_COPY and self.minor_version >= 2: self._CheckSourceCopyOperation(data_offset, total_src_blocks, total_dst_blocks, op_name) @@ -1044,7 +983,7 @@ def _CheckOperation(self, op, op_name, is_last, old_block_counters, def _SizeToNumBlocks(self, size): """Returns the number of blocks needed to contain a given byte size.""" - return (size + self.block_size - 1) / self.block_size + return (size + self.block_size - 1) // self.block_size def _AllocBlockCounters(self, total_size): """Returns a freshly initialized array of block counters. @@ -1064,7 +1003,7 @@ def _AllocBlockCounters(self, total_size): def _CheckOperations(self, operations, report, base_name, old_fs_size, new_fs_size, old_usable_size, new_usable_size, - prev_data_offset, allow_signature): + prev_data_offset): """Checks a sequence of update operations. Args: @@ -1076,7 +1015,6 @@ def _CheckOperations(self, operations, report, base_name, old_fs_size, old_usable_size: The overall usable size of the old partition in bytes. new_usable_size: The overall usable size of the new partition in bytes. prev_data_offset: Offset of last used data bytes. - allow_signature: Whether this sequence may contain signature operations. Returns: The total data blob size used. @@ -1091,9 +1029,7 @@ def _CheckOperations(self, operations, report, base_name, old_fs_size, common.OpType.REPLACE: 0, common.OpType.REPLACE_BZ: 0, common.OpType.REPLACE_XZ: 0, - common.OpType.MOVE: 0, common.OpType.ZERO: 0, - common.OpType.BSDIFF: 0, common.OpType.SOURCE_COPY: 0, common.OpType.SOURCE_BSDIFF: 0, common.OpType.PUFFDIFF: 0, @@ -1104,8 +1040,6 @@ def _CheckOperations(self, operations, report, base_name, old_fs_size, common.OpType.REPLACE: 0, common.OpType.REPLACE_BZ: 0, common.OpType.REPLACE_XZ: 0, - # MOVE operations don't have blobs. - common.OpType.BSDIFF: 0, # SOURCE_COPY operations don't have blobs. common.OpType.SOURCE_BSDIFF: 0, common.OpType.PUFFDIFF: 0, @@ -1116,8 +1050,6 @@ def _CheckOperations(self, operations, report, base_name, old_fs_size, 'hashed': 0, 'unhashed': 0, } - if allow_signature: - blob_hash_counts['signature'] = 0 # Allocate old and new block counters. old_block_counters = (self._AllocBlockCounters(old_usable_size) @@ -1130,16 +1062,14 @@ def _CheckOperations(self, operations, report, base_name, old_fs_size, op_num += 1 # Check: Type is valid. - if op.type not in op_counts.keys(): + if op.type not in op_counts: raise error.PayloadError('%s: invalid type (%d).' % (op_name, op.type)) op_counts[op.type] += 1 - is_last = op_num == len(operations) curr_data_used = self._CheckOperation( - op, op_name, is_last, old_block_counters, new_block_counters, + op, op_name, old_block_counters, new_block_counters, old_usable_size, new_usable_size, - prev_data_offset + total_data_used, allow_signature, - blob_hash_counts) + prev_data_offset + total_data_used, blob_hash_counts) if curr_data_used: op_blob_totals[op.type] += curr_data_used total_data_used += curr_data_used @@ -1193,18 +1123,17 @@ def _CheckSignatures(self, report, pubkey_file_name): if not sigs.signatures: raise error.PayloadError('Signature block is empty.') - last_ops_section = (self.payload.manifest.kernel_install_operations or - self.payload.manifest.install_operations) - fake_sig_op = last_ops_section[-1] - # Check: signatures_{offset,size} must match the last (fake) operation. - if not (fake_sig_op.type == common.OpType.REPLACE and - self.sigs_offset == fake_sig_op.data_offset and - self.sigs_size == fake_sig_op.data_length): - raise error.PayloadError( - 'Signatures_{offset,size} (%d+%d) does not match last operation ' - '(%d+%d).' % - (self.sigs_offset, self.sigs_size, fake_sig_op.data_offset, - fake_sig_op.data_length)) + # Check that we don't have the signature operation blob at the end (used to + # be for major version 1). + last_partition = self.payload.manifest.partitions[-1] + if last_partition.operations: + last_op = last_partition.operations[-1] + # Check: signatures_{offset,size} must match the last (fake) operation. + if (last_op.type == common.OpType.REPLACE and + last_op.data_offset == self.sigs_offset and + last_op.data_length == self.sigs_size): + raise error.PayloadError('It seems like the last operation is the ' + 'signature blob. This is an invalid payload.') # Compute the checksum of all data up to signature blob. # TODO(garnold) we're re-reading the whole data section into a string @@ -1231,17 +1160,16 @@ def _CheckSignatures(self, report, pubkey_file_name): raise error.PayloadError('Unknown signature version (%d).' % sig.version) - def Run(self, pubkey_file_name=None, metadata_sig_file=None, - rootfs_part_size=0, kernel_part_size=0, report_out_file=None): + def Run(self, pubkey_file_name=None, metadata_sig_file=None, metadata_size=0, + part_sizes=None, report_out_file=None): """Checker entry point, invoking all checks. Args: pubkey_file_name: Public key used for signature verification. metadata_sig_file: Metadata signature, if verification is desired. - rootfs_part_size: The size of rootfs partitions in bytes (default: infer - based on payload type and version). - kernel_part_size: The size of kernel partitions in bytes (default: use - reported filesystem size). + metadata_size: Metadata size, if verification is desired. + part_sizes: Mapping of partition label to size in bytes (default: infer + based on payload type and version or filesystem). report_out_file: File object to dump the report to. Raises: @@ -1258,6 +1186,12 @@ def Run(self, pubkey_file_name=None, metadata_sig_file=None, self.payload.ResetFile() try: + # Check metadata_size (if provided). + if metadata_size and self.payload.metadata_size != metadata_size: + raise error.PayloadError('Invalid payload metadata size in payload(%d) ' + 'vs given(%d)' % (self.payload.metadata_size, + metadata_size)) + # Check metadata signature (if provided). if metadata_sig_file: metadata_sig = base64.b64decode(metadata_sig_file.read()) @@ -1268,65 +1202,60 @@ def Run(self, pubkey_file_name=None, metadata_sig_file=None, # Part 1: Check the file header. report.AddSection('header') # Check: Payload version is valid. - if self.payload.header.version != 1: + if self.payload.header.version not in (1, 2): raise error.PayloadError('Unknown payload version (%d).' % self.payload.header.version) report.AddField('version', self.payload.header.version) report.AddField('manifest len', self.payload.header.manifest_len) # Part 2: Check the manifest. - self._CheckManifest(report, rootfs_part_size, kernel_part_size) + self._CheckManifest(report, part_sizes) assert self.payload_type, 'payload type should be known by now' - # Infer the usable partition size when validating rootfs operations: - # - If rootfs partition size was provided, use that. - # - Otherwise, if this is an older delta (minor version < 2), stick with - # a known constant size. This is necessary because older deltas may - # exceed the filesystem size when moving data blocks around. - # - Otherwise, use the encoded filesystem size. - new_rootfs_usable_size = self.new_rootfs_fs_size - old_rootfs_usable_size = self.old_rootfs_fs_size - if rootfs_part_size: - new_rootfs_usable_size = rootfs_part_size - old_rootfs_usable_size = rootfs_part_size - elif self.payload_type == _TYPE_DELTA and self.minor_version in (None, 1): - new_rootfs_usable_size = _OLD_DELTA_USABLE_PART_SIZE - old_rootfs_usable_size = _OLD_DELTA_USABLE_PART_SIZE - - # Part 3: Examine rootfs operations. - # TODO(garnold)(chromium:243559) only default to the filesystem size if - # no explicit size provided *and* the partition size is not embedded in - # the payload; see issue for more details. - report.AddSection('rootfs operations') - total_blob_size = self._CheckOperations( - self.payload.manifest.install_operations, report, - 'install_operations', self.old_rootfs_fs_size, - self.new_rootfs_fs_size, old_rootfs_usable_size, - new_rootfs_usable_size, 0, False) - - # Part 4: Examine kernel operations. - # TODO(garnold)(chromium:243559) as above. - report.AddSection('kernel operations') - total_blob_size += self._CheckOperations( - self.payload.manifest.kernel_install_operations, report, - 'kernel_install_operations', self.old_kernel_fs_size, - self.new_kernel_fs_size, - kernel_part_size if kernel_part_size else self.old_kernel_fs_size, - kernel_part_size if kernel_part_size else self.new_kernel_fs_size, - total_blob_size, True) + # Make sure deprecated values are not present in the payload. + for field in ('install_operations', 'kernel_install_operations'): + self._CheckRepeatedElemNotPresent(self.payload.manifest, field, + 'manifest') + for field in ('old_kernel_info', 'old_rootfs_info', + 'new_kernel_info', 'new_rootfs_info'): + self._CheckElemNotPresent(self.payload.manifest, field, 'manifest') + + total_blob_size = 0 + for part, operations in ((p.partition_name, p.operations) + for p in self.payload.manifest.partitions): + report.AddSection('%s operations' % part) + + new_fs_usable_size = self.new_fs_sizes[part] + old_fs_usable_size = self.old_fs_sizes[part] + + if part_sizes is not None and part_sizes.get(part, None): + new_fs_usable_size = old_fs_usable_size = part_sizes[part] + + # TODO(chromium:243559) only default to the filesystem size if no + # explicit size provided *and* the partition size is not embedded in the + # payload; see issue for more details. + total_blob_size += self._CheckOperations( + operations, report, '%s_install_operations' % part, + self.old_fs_sizes[part], self.new_fs_sizes[part], + old_fs_usable_size, new_fs_usable_size, total_blob_size) # Check: Operations data reach the end of the payload file. used_payload_size = self.payload.data_offset + total_blob_size + # Major versions 2 and higher have a signature at the end, so it should be + # considered in the total size of the image. + if self.sigs_size: + used_payload_size += self.sigs_size + if used_payload_size != payload_file_size: raise error.PayloadError( 'Used payload size (%d) different from actual file size (%d).' % (used_payload_size, payload_file_size)) - # Part 5: Handle payload signatures message. + # Part 4: Handle payload signatures message. if self.check_payload_sig and self.sigs_size: self._CheckSignatures(report, pubkey_file_name) - # Part 6: Summary. + # Part 5: Summary. report.AddSection('summary') report.AddField('update type', self.payload_type) diff --git a/update_payload/checker_unittest.py b/update_payload/checker_unittest.py index f718234..993b785 100755 --- a/update_payload/checker_unittest.py +++ b/update_payload/checker_unittest.py @@ -1,4 +1,4 @@ -#!/usr/bin/python2 +#!/usr/bin/env python # # Copyright (C) 2013 The Android Open Source Project # @@ -17,35 +17,36 @@ """Unit testing checker.py.""" -from __future__ import print_function +# Disable check for function names to avoid errors based on old code +# pylint: disable-msg=invalid-name + +from __future__ import absolute_import import array import collections -import cStringIO import hashlib +import io import itertools import os import unittest -# pylint cannot find mox. -# pylint: disable=F0401 -import mox +from six.moves import zip + +import mock # pylint: disable=import-error from update_payload import checker from update_payload import common from update_payload import test_utils from update_payload import update_metadata_pb2 from update_payload.error import PayloadError -from update_payload.payload import Payload # Avoid name conflicts later. +from update_payload.payload import Payload # Avoid name conflicts later. def _OpTypeByName(op_name): - """Returns the type of an operation from itsname.""" + """Returns the type of an operation from its name.""" op_name_to_type = { 'REPLACE': common.OpType.REPLACE, 'REPLACE_BZ': common.OpType.REPLACE_BZ, - 'MOVE': common.OpType.MOVE, - 'BSDIFF': common.OpType.BSDIFF, 'SOURCE_COPY': common.OpType.SOURCE_COPY, 'SOURCE_BSDIFF': common.OpType.SOURCE_BSDIFF, 'ZERO': common.OpType.ZERO, @@ -65,7 +66,7 @@ def _GetPayloadChecker(payload_gen_write_to_file_func, payload_gen_dargs=None, if checker_init_dargs is None: checker_init_dargs = {} - payload_file = cStringIO.StringIO() + payload_file = io.BytesIO() payload_gen_write_to_file_func(payload_file, **payload_gen_dargs) payload_file.seek(0) payload = Payload(payload_file) @@ -75,7 +76,7 @@ def _GetPayloadChecker(payload_gen_write_to_file_func, payload_gen_dargs=None, def _GetPayloadCheckerWithData(payload_gen): """Returns a payload checker from a given payload generator.""" - payload_file = cStringIO.StringIO() + payload_file = io.BytesIO() payload_gen.WriteToFile(payload_file) payload_file.seek(0) payload = Payload(payload_file) @@ -89,7 +90,7 @@ def _GetPayloadCheckerWithData(payload_gen): # pylint: disable=W0212 # Don't bark about missing members of classes you cannot import. # pylint: disable=E1101 -class PayloadCheckerTest(mox.MoxTestBase): +class PayloadCheckerTest(unittest.TestCase): """Tests the PayloadChecker class. In addition to ordinary testFoo() methods, which are automatically invoked by @@ -102,11 +103,42 @@ class PayloadCheckerTest(mox.MoxTestBase): all such tests is done in AddAllParametricTests(). """ + def setUp(self): + """setUp function for unittest testcase""" + self.mock_checks = [] + + def tearDown(self): + """tearDown function for unittest testcase""" + # Verify that all mock functions were called. + for check in self.mock_checks: + check.mock_fn.assert_called_once_with(*check.exp_args, **check.exp_kwargs) + + class MockChecksAtTearDown(object): + """Mock data storage. + + This class stores the mock functions and its arguments to be checked at a + later point. + """ + def __init__(self, mock_fn, *args, **kwargs): + self.mock_fn = mock_fn + self.exp_args = args + self.exp_kwargs = kwargs + + def addPostCheckForMockFunction(self, mock_fn, *args, **kwargs): + """Store a mock function and its arguments to self.mock_checks + + Args: + mock_fn: mock function object + args: expected positional arguments for the mock_fn + kwargs: expected named arguments for the mock_fn + """ + self.mock_checks.append(self.MockChecksAtTearDown(mock_fn, *args, **kwargs)) + def MockPayload(self): """Create a mock payload object, complete with a mock manifest.""" - payload = self.mox.CreateMock(Payload) + payload = mock.create_autospec(Payload) payload.is_init = True - payload.manifest = self.mox.CreateMock( + payload.manifest = mock.create_autospec( update_metadata_pb2.DeltaArchiveManifest) return payload @@ -175,19 +207,20 @@ def SetupAddElemTest(self, is_present, is_submsg, convert=str, subreport = 'fake subreport' # Create a mock message. - msg = self.mox.CreateMock(update_metadata_pb2._message.Message) - msg.HasField(name).AndReturn(is_present) + msg = mock.create_autospec(update_metadata_pb2._message.Message) + self.addPostCheckForMockFunction(msg.HasField, name) + msg.HasField.return_value = is_present setattr(msg, name, val) - # Create a mock report. - report = self.mox.CreateMock(checker._PayloadReport) + report = mock.create_autospec(checker._PayloadReport) if is_present: if is_submsg: - report.AddSubReport(name).AndReturn(subreport) + self.addPostCheckForMockFunction(report.AddSubReport, name) + report.AddSubReport.return_value = subreport else: - report.AddField(name, convert(val), linebreak=linebreak, indent=indent) + self.addPostCheckForMockFunction(report.AddField, name, convert(val), + linebreak=linebreak, indent=indent) - self.mox.ReplayAll() return (msg, report, subreport, name, val) def DoAddElemTest(self, is_present, is_mandatory, is_submsg, convert, @@ -213,9 +246,9 @@ def DoAddElemTest(self, is_present, is_mandatory, is_submsg, convert, else: ret_val, ret_subreport = checker.PayloadChecker._CheckElem(*args, **kwargs) - self.assertEquals(val if is_present else None, ret_val) - self.assertEquals(subreport if is_present and is_submsg else None, - ret_subreport) + self.assertEqual(val if is_present else None, ret_val) + self.assertEqual(subreport if is_present and is_submsg else None, + ret_subreport) def DoAddFieldTest(self, is_mandatory, is_present, convert, linebreak, indent): @@ -245,7 +278,7 @@ def DoAddFieldTest(self, is_mandatory, is_present, convert, linebreak, self.assertRaises(PayloadError, tested_func, *args, **kwargs) else: ret_val = tested_func(*args, **kwargs) - self.assertEquals(val if is_present else None, ret_val) + self.assertEqual(val if is_present else None, ret_val) def DoAddSubMsgTest(self, is_mandatory, is_present): """Parametrized testing of _Check{Mandatory,Optional}SubMsg(). @@ -269,8 +302,8 @@ def DoAddSubMsgTest(self, is_mandatory, is_present): self.assertRaises(PayloadError, tested_func, *args) else: ret_val, ret_subreport = tested_func(*args) - self.assertEquals(val if is_present else None, ret_val) - self.assertEquals(subreport if is_present else None, ret_subreport) + self.assertEqual(val if is_present else None, ret_val) + self.assertEqual(subreport if is_present else None, ret_subreport) def testCheckPresentIff(self): """Tests _CheckPresentIff().""" @@ -296,15 +329,14 @@ def DoCheckSha256SignatureTest(self, expect_pass, expect_subprocess_call, returned_signed_hash: The signed hash data retuned by openssl. expected_signed_hash: The signed hash data to compare against. """ - try: - # Stub out the subprocess invocation. - self.mox.StubOutWithMock(checker.PayloadChecker, '_Run') + # Stub out the subprocess invocation. + with mock.patch.object(checker.PayloadChecker, '_Run') \ + as mock_payload_checker: if expect_subprocess_call: - checker.PayloadChecker._Run( - mox.IsA(list), send_data=sig_data).AndReturn( - (sig_asn1_header + returned_signed_hash, None)) + mock_payload_checker([], send_data=sig_data) + mock_payload_checker.return_value = ( + sig_asn1_header + returned_signed_hash, None) - self.mox.ReplayAll() if expect_pass: self.assertIsNone(checker.PayloadChecker._CheckSha256Signature( sig_data, 'foo', expected_signed_hash, 'bar')) @@ -312,13 +344,11 @@ def DoCheckSha256SignatureTest(self, expect_pass, expect_subprocess_call, self.assertRaises(PayloadError, checker.PayloadChecker._CheckSha256Signature, sig_data, 'foo', expected_signed_hash, 'bar') - finally: - self.mox.UnsetStubs() def testCheckSha256Signature_Pass(self): """Tests _CheckSha256Signature(); pass case.""" sig_data = 'fake-signature'.ljust(256) - signed_hash = hashlib.sha256('fake-data').digest() + signed_hash = hashlib.sha256(b'fake-data').digest() self.DoCheckSha256SignatureTest(True, True, sig_data, common.SIG_ASN1_HEADER, signed_hash, signed_hash) @@ -326,7 +356,7 @@ def testCheckSha256Signature_Pass(self): def testCheckSha256Signature_FailBadSignature(self): """Tests _CheckSha256Signature(); fails due to malformed signature.""" sig_data = 'fake-signature' # Malformed (not 256 bytes in length). - signed_hash = hashlib.sha256('fake-data').digest() + signed_hash = hashlib.sha256(b'fake-data').digest() self.DoCheckSha256SignatureTest(False, False, sig_data, common.SIG_ASN1_HEADER, signed_hash, signed_hash) @@ -334,7 +364,7 @@ def testCheckSha256Signature_FailBadSignature(self): def testCheckSha256Signature_FailBadOutputLength(self): """Tests _CheckSha256Signature(); fails due to unexpected output length.""" sig_data = 'fake-signature'.ljust(256) - signed_hash = 'fake-hash' # Malformed (not 32 bytes in length). + signed_hash = b'fake-hash' # Malformed (not 32 bytes in length). self.DoCheckSha256SignatureTest(False, True, sig_data, common.SIG_ASN1_HEADER, signed_hash, signed_hash) @@ -342,16 +372,16 @@ def testCheckSha256Signature_FailBadOutputLength(self): def testCheckSha256Signature_FailBadAsnHeader(self): """Tests _CheckSha256Signature(); fails due to bad ASN1 header.""" sig_data = 'fake-signature'.ljust(256) - signed_hash = hashlib.sha256('fake-data').digest() - bad_asn1_header = 'bad-asn-header'.ljust(len(common.SIG_ASN1_HEADER)) + signed_hash = hashlib.sha256(b'fake-data').digest() + bad_asn1_header = b'bad-asn-header'.ljust(len(common.SIG_ASN1_HEADER)) self.DoCheckSha256SignatureTest(False, True, sig_data, bad_asn1_header, signed_hash, signed_hash) def testCheckSha256Signature_FailBadHash(self): """Tests _CheckSha256Signature(); fails due to bad hash returned.""" sig_data = 'fake-signature'.ljust(256) - expected_signed_hash = hashlib.sha256('fake-data').digest() - returned_signed_hash = hashlib.sha256('bad-fake-data').digest() + expected_signed_hash = hashlib.sha256(b'fake-data').digest() + returned_signed_hash = hashlib.sha256(b'bad-fake-data').digest() self.DoCheckSha256SignatureTest(False, True, sig_data, common.SIG_ASN1_HEADER, expected_signed_hash, returned_signed_hash) @@ -429,10 +459,10 @@ def DoCheckManifestTest(self, fail_mismatched_block_size, fail_bad_sigs, payload_gen.SetBlockSize(test_utils.KiB(4)) # Add some operations. - payload_gen.AddOperation(False, common.OpType.MOVE, + payload_gen.AddOperation(common.ROOTFS, common.OpType.SOURCE_COPY, src_extents=[(0, 16), (16, 497)], dst_extents=[(16, 496), (0, 16)]) - payload_gen.AddOperation(True, common.OpType.MOVE, + payload_gen.AddOperation(common.KERNEL, common.OpType.SOURCE_COPY, src_extents=[(0, 8), (8, 8)], dst_extents=[(8, 8), (0, 8)]) @@ -457,21 +487,23 @@ def DoCheckManifestTest(self, fail_mismatched_block_size, fail_bad_sigs, # Add old kernel/rootfs partition info, as required. if fail_mismatched_oki_ori or fail_old_kernel_fs_size or fail_bad_oki: oki_hash = (None if fail_bad_oki - else hashlib.sha256('fake-oki-content').digest()) - payload_gen.SetPartInfo(True, False, old_kernel_fs_size, oki_hash) + else hashlib.sha256(b'fake-oki-content').digest()) + payload_gen.SetPartInfo(common.KERNEL, False, old_kernel_fs_size, + oki_hash) if not fail_mismatched_oki_ori and (fail_old_rootfs_fs_size or fail_bad_ori): ori_hash = (None if fail_bad_ori - else hashlib.sha256('fake-ori-content').digest()) - payload_gen.SetPartInfo(False, False, old_rootfs_fs_size, ori_hash) + else hashlib.sha256(b'fake-ori-content').digest()) + payload_gen.SetPartInfo(common.ROOTFS, False, old_rootfs_fs_size, + ori_hash) # Add new kernel/rootfs partition info. payload_gen.SetPartInfo( - True, True, new_kernel_fs_size, - None if fail_bad_nki else hashlib.sha256('fake-nki-content').digest()) + common.KERNEL, True, new_kernel_fs_size, + None if fail_bad_nki else hashlib.sha256(b'fake-nki-content').digest()) payload_gen.SetPartInfo( - False, True, new_rootfs_fs_size, - None if fail_bad_nri else hashlib.sha256('fake-nri-content').digest()) + common.ROOTFS, True, new_rootfs_fs_size, + None if fail_bad_nri else hashlib.sha256(b'fake-nri-content').digest()) # Set the minor version. payload_gen.SetMinorVersion(0) @@ -485,13 +517,16 @@ def DoCheckManifestTest(self, fail_mismatched_block_size, fail_bad_sigs, fail_bad_nki or fail_bad_nri or fail_old_kernel_fs_size or fail_old_rootfs_fs_size or fail_new_kernel_fs_size or fail_new_rootfs_fs_size) + part_sizes = { + common.ROOTFS: rootfs_part_size, + common.KERNEL: kernel_part_size + } + if should_fail: self.assertRaises(PayloadError, payload_checker._CheckManifest, report, - rootfs_part_size, kernel_part_size) + part_sizes) else: - self.assertIsNone(payload_checker._CheckManifest(report, - rootfs_part_size, - kernel_part_size)) + self.assertIsNone(payload_checker._CheckManifest(report, part_sizes)) def testCheckLength(self): """Tests _CheckLength().""" @@ -515,28 +550,11 @@ def testCheckExtents(self): # Passes w/ all real extents. extents = self.NewExtentList((0, 4), (8, 3), (1024, 16)) - self.assertEquals( + self.assertEqual( 23, payload_checker._CheckExtents(extents, (1024 + 16) * block_size, collections.defaultdict(int), 'foo')) - # Passes w/ pseudo-extents (aka sparse holes). - extents = self.NewExtentList((0, 4), (common.PSEUDO_EXTENT_MARKER, 5), - (8, 3)) - self.assertEquals( - 12, - payload_checker._CheckExtents(extents, (1024 + 16) * block_size, - collections.defaultdict(int), 'foo', - allow_pseudo=True)) - - # Passes w/ pseudo-extent due to a signature. - extents = self.NewExtentList((common.PSEUDO_EXTENT_MARKER, 2)) - self.assertEquals( - 2, - payload_checker._CheckExtents(extents, (1024 + 16) * block_size, - collections.defaultdict(int), 'foo', - allow_signature=True)) - # Fails, extent missing a start block. extents = self.NewExtentList((-1, 4), (8, 3), (1024, 16)) self.assertRaises( @@ -567,34 +585,34 @@ def testCheckReplaceOperation(self): block_size = payload_checker.block_size data_length = 10000 - op = self.mox.CreateMock( - update_metadata_pb2.InstallOperation) + op = mock.create_autospec(update_metadata_pb2.InstallOperation) op.type = common.OpType.REPLACE # Pass. op.src_extents = [] self.assertIsNone( payload_checker._CheckReplaceOperation( - op, data_length, (data_length + block_size - 1) / block_size, + op, data_length, (data_length + block_size - 1) // block_size, 'foo')) # Fail, src extents founds. op.src_extents = ['bar'] self.assertRaises( PayloadError, payload_checker._CheckReplaceOperation, - op, data_length, (data_length + block_size - 1) / block_size, 'foo') + op, data_length, (data_length + block_size - 1) // block_size, 'foo') # Fail, missing data. op.src_extents = [] self.assertRaises( PayloadError, payload_checker._CheckReplaceOperation, - op, None, (data_length + block_size - 1) / block_size, 'foo') + op, None, (data_length + block_size - 1) // block_size, 'foo') # Fail, length / block number mismatch. op.src_extents = ['bar'] self.assertRaises( PayloadError, payload_checker._CheckReplaceOperation, - op, data_length, (data_length + block_size - 1) / block_size + 1, 'foo') + op, data_length, (data_length + block_size - 1) // block_size + 1, + 'foo') def testCheckReplaceBzOperation(self): """Tests _CheckReplaceOperation() where op.type == REPLACE_BZ.""" @@ -602,7 +620,7 @@ def testCheckReplaceBzOperation(self): block_size = payload_checker.block_size data_length = block_size * 3 - op = self.mox.CreateMock( + op = mock.create_autospec( update_metadata_pb2.InstallOperation) op.type = common.OpType.REPLACE_BZ @@ -610,23 +628,30 @@ def testCheckReplaceBzOperation(self): op.src_extents = [] self.assertIsNone( payload_checker._CheckReplaceOperation( - op, data_length, (data_length + block_size - 1) / block_size + 5, + op, data_length, (data_length + block_size - 1) // block_size + 5, 'foo')) # Fail, src extents founds. op.src_extents = ['bar'] self.assertRaises( PayloadError, payload_checker._CheckReplaceOperation, - op, data_length, (data_length + block_size - 1) / block_size + 5, 'foo') + op, data_length, (data_length + block_size - 1) // block_size + 5, + 'foo') # Fail, missing data. op.src_extents = [] self.assertRaises( PayloadError, payload_checker._CheckReplaceOperation, - op, None, (data_length + block_size - 1) / block_size, 'foo') + op, None, (data_length + block_size - 1) // block_size, 'foo') # Fail, too few blocks to justify BZ. op.src_extents = [] + self.assertRaises( + PayloadError, payload_checker._CheckReplaceOperation, + op, data_length, (data_length + block_size - 1) // block_size, 'foo') + + # Fail, total_dst_blocks is a floating point value. + op.src_extents = [] self.assertRaises( PayloadError, payload_checker._CheckReplaceOperation, op, data_length, (data_length + block_size - 1) / block_size, 'foo') @@ -637,7 +662,7 @@ def testCheckReplaceXzOperation(self): block_size = payload_checker.block_size data_length = block_size * 3 - op = self.mox.CreateMock( + op = mock.create_autospec( update_metadata_pb2.InstallOperation) op.type = common.OpType.REPLACE_XZ @@ -645,152 +670,33 @@ def testCheckReplaceXzOperation(self): op.src_extents = [] self.assertIsNone( payload_checker._CheckReplaceOperation( - op, data_length, (data_length + block_size - 1) / block_size + 5, + op, data_length, (data_length + block_size - 1) // block_size + 5, 'foo')) # Fail, src extents founds. op.src_extents = ['bar'] self.assertRaises( PayloadError, payload_checker._CheckReplaceOperation, - op, data_length, (data_length + block_size - 1) / block_size + 5, 'foo') + op, data_length, (data_length + block_size - 1) // block_size + 5, + 'foo') # Fail, missing data. op.src_extents = [] self.assertRaises( PayloadError, payload_checker._CheckReplaceOperation, - op, None, (data_length + block_size - 1) / block_size, 'foo') + op, None, (data_length + block_size - 1) // block_size, 'foo') # Fail, too few blocks to justify XZ. op.src_extents = [] self.assertRaises( PayloadError, payload_checker._CheckReplaceOperation, - op, data_length, (data_length + block_size - 1) / block_size, 'foo') - - def testCheckMoveOperation_Pass(self): - """Tests _CheckMoveOperation(); pass case.""" - payload_checker = checker.PayloadChecker(self.MockPayload()) - op = update_metadata_pb2.InstallOperation() - op.type = common.OpType.MOVE - - self.AddToMessage(op.src_extents, - self.NewExtentList((1, 4), (12, 2), (1024, 128))) - self.AddToMessage(op.dst_extents, - self.NewExtentList((16, 128), (512, 6))) - self.assertIsNone( - payload_checker._CheckMoveOperation(op, None, 134, 134, 'foo')) - - def testCheckMoveOperation_FailContainsData(self): - """Tests _CheckMoveOperation(); fails, message contains data.""" - payload_checker = checker.PayloadChecker(self.MockPayload()) - op = update_metadata_pb2.InstallOperation() - op.type = common.OpType.MOVE - - self.AddToMessage(op.src_extents, - self.NewExtentList((1, 4), (12, 2), (1024, 128))) - self.AddToMessage(op.dst_extents, - self.NewExtentList((16, 128), (512, 6))) - self.assertRaises( - PayloadError, payload_checker._CheckMoveOperation, - op, 1024, 134, 134, 'foo') - - def testCheckMoveOperation_FailInsufficientSrcBlocks(self): - """Tests _CheckMoveOperation(); fails, not enough actual src blocks.""" - payload_checker = checker.PayloadChecker(self.MockPayload()) - op = update_metadata_pb2.InstallOperation() - op.type = common.OpType.MOVE + op, data_length, (data_length + block_size - 1) // block_size, 'foo') - self.AddToMessage(op.src_extents, - self.NewExtentList((1, 4), (12, 2), (1024, 127))) - self.AddToMessage(op.dst_extents, - self.NewExtentList((16, 128), (512, 6))) - self.assertRaises( - PayloadError, payload_checker._CheckMoveOperation, - op, None, 134, 134, 'foo') - - def testCheckMoveOperation_FailInsufficientDstBlocks(self): - """Tests _CheckMoveOperation(); fails, not enough actual dst blocks.""" - payload_checker = checker.PayloadChecker(self.MockPayload()) - op = update_metadata_pb2.InstallOperation() - op.type = common.OpType.MOVE - - self.AddToMessage(op.src_extents, - self.NewExtentList((1, 4), (12, 2), (1024, 128))) - self.AddToMessage(op.dst_extents, - self.NewExtentList((16, 128), (512, 5))) - self.assertRaises( - PayloadError, payload_checker._CheckMoveOperation, - op, None, 134, 134, 'foo') - - def testCheckMoveOperation_FailExcessSrcBlocks(self): - """Tests _CheckMoveOperation(); fails, too many actual src blocks.""" - payload_checker = checker.PayloadChecker(self.MockPayload()) - op = update_metadata_pb2.InstallOperation() - op.type = common.OpType.MOVE - - self.AddToMessage(op.src_extents, - self.NewExtentList((1, 4), (12, 2), (1024, 128))) - self.AddToMessage(op.dst_extents, - self.NewExtentList((16, 128), (512, 5))) - self.assertRaises( - PayloadError, payload_checker._CheckMoveOperation, - op, None, 134, 134, 'foo') - self.AddToMessage(op.src_extents, - self.NewExtentList((1, 4), (12, 2), (1024, 129))) - self.AddToMessage(op.dst_extents, - self.NewExtentList((16, 128), (512, 6))) - self.assertRaises( - PayloadError, payload_checker._CheckMoveOperation, - op, None, 134, 134, 'foo') - - def testCheckMoveOperation_FailExcessDstBlocks(self): - """Tests _CheckMoveOperation(); fails, too many actual dst blocks.""" - payload_checker = checker.PayloadChecker(self.MockPayload()) - op = update_metadata_pb2.InstallOperation() - op.type = common.OpType.MOVE - - self.AddToMessage(op.src_extents, - self.NewExtentList((1, 4), (12, 2), (1024, 128))) - self.AddToMessage(op.dst_extents, - self.NewExtentList((16, 128), (512, 7))) - self.assertRaises( - PayloadError, payload_checker._CheckMoveOperation, - op, None, 134, 134, 'foo') - - def testCheckMoveOperation_FailStagnantBlocks(self): - """Tests _CheckMoveOperation(); fails, there are blocks that do not move.""" - payload_checker = checker.PayloadChecker(self.MockPayload()) - op = update_metadata_pb2.InstallOperation() - op.type = common.OpType.MOVE - - self.AddToMessage(op.src_extents, - self.NewExtentList((1, 4), (12, 2), (1024, 128))) - self.AddToMessage(op.dst_extents, - self.NewExtentList((8, 128), (512, 6))) - self.assertRaises( - PayloadError, payload_checker._CheckMoveOperation, - op, None, 134, 134, 'foo') - - def testCheckMoveOperation_FailZeroStartBlock(self): - """Tests _CheckMoveOperation(); fails, has extent with start block 0.""" - payload_checker = checker.PayloadChecker(self.MockPayload()) - op = update_metadata_pb2.InstallOperation() - op.type = common.OpType.MOVE - - self.AddToMessage(op.src_extents, - self.NewExtentList((0, 4), (12, 2), (1024, 128))) - self.AddToMessage(op.dst_extents, - self.NewExtentList((8, 128), (512, 6))) - self.assertRaises( - PayloadError, payload_checker._CheckMoveOperation, - op, None, 134, 134, 'foo') - - self.AddToMessage(op.src_extents, - self.NewExtentList((1, 4), (12, 2), (1024, 128))) - self.AddToMessage(op.dst_extents, - self.NewExtentList((0, 128), (512, 6))) + # Fail, total_dst_blocks is a floating point value. + op.src_extents = [] self.assertRaises( - PayloadError, payload_checker._CheckMoveOperation, - op, None, 134, 134, 'foo') + PayloadError, payload_checker._CheckReplaceOperation, + op, data_length, (data_length + block_size - 1) / block_size, 'foo') def testCheckAnyDiff(self): """Tests _CheckAnyDiffOperation().""" @@ -829,8 +735,8 @@ def testCheckSourceCopyOperation_FailBlockCountsMismatch(self): self.assertRaises(PayloadError, payload_checker._CheckSourceCopyOperation, None, 0, 1, 'foo') - def DoCheckOperationTest(self, op_type_name, is_last, allow_signature, - allow_unhashed, fail_src_extents, fail_dst_extents, + def DoCheckOperationTest(self, op_type_name, allow_unhashed, + fail_src_extents, fail_dst_extents, fail_mismatched_data_offset_length, fail_missing_dst_extents, fail_src_length, fail_dst_length, fail_data_hash, @@ -838,10 +744,8 @@ def DoCheckOperationTest(self, op_type_name, is_last, allow_signature, """Parametric testing of _CheckOperation(). Args: - op_type_name: 'REPLACE', 'REPLACE_BZ', 'REPLACE_XZ', 'MOVE', 'BSDIFF', + op_type_name: 'REPLACE', 'REPLACE_BZ', 'REPLACE_XZ', 'SOURCE_COPY', 'SOURCE_BSDIFF', BROTLI_BSDIFF or 'PUFFDIFF'. - is_last: Whether we're testing the last operation in a sequence. - allow_signature: Whether we're testing a signature-capable operation. allow_unhashed: Whether we're allowing to not hash the data. fail_src_extents: Tamper with src extents. fail_dst_extents: Tamper with dst extents. @@ -866,9 +770,9 @@ def DoCheckOperationTest(self, op_type_name, is_last, allow_signature, old_part_size = test_utils.MiB(4) new_part_size = test_utils.MiB(8) old_block_counters = array.array( - 'B', [0] * ((old_part_size + block_size - 1) / block_size)) + 'B', [0] * ((old_part_size + block_size - 1) // block_size)) new_block_counters = array.array( - 'B', [0] * ((new_part_size + block_size - 1) / block_size)) + 'B', [0] * ((new_part_size + block_size - 1) // block_size)) prev_data_offset = 1876 blob_hash_counts = collections.defaultdict(int) @@ -877,8 +781,7 @@ def DoCheckOperationTest(self, op_type_name, is_last, allow_signature, op.type = op_type total_src_blocks = 0 - if op_type in (common.OpType.MOVE, common.OpType.BSDIFF, - common.OpType.SOURCE_COPY, common.OpType.SOURCE_BSDIFF, + if op_type in (common.OpType.SOURCE_COPY, common.OpType.SOURCE_BSDIFF, common.OpType.PUFFDIFF, common.OpType.BROTLI_BSDIFF): if fail_src_extents: self.AddToMessage(op.src_extents, @@ -888,10 +791,9 @@ def DoCheckOperationTest(self, op_type_name, is_last, allow_signature, self.NewExtentList((1, 16))) total_src_blocks = 16 + payload_checker.major_version = common.BRILLO_MAJOR_PAYLOAD_VERSION if op_type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ): payload_checker.minor_version = 0 - elif op_type in (common.OpType.MOVE, common.OpType.BSDIFF): - payload_checker.minor_version = 2 if fail_bad_minor_version else 1 elif op_type in (common.OpType.SOURCE_COPY, common.OpType.SOURCE_BSDIFF): payload_checker.minor_version = 1 if fail_bad_minor_version else 2 if op_type == common.OpType.REPLACE_XZ: @@ -902,7 +804,7 @@ def DoCheckOperationTest(self, op_type_name, is_last, allow_signature, elif op_type == common.OpType.PUFFDIFF: payload_checker.minor_version = 4 if fail_bad_minor_version else 5 - if op_type not in (common.OpType.MOVE, common.OpType.SOURCE_COPY): + if op_type != common.OpType.SOURCE_COPY: if not fail_mismatched_data_offset_length: op.data_length = 16 * block_size - 8 if fail_prev_data_offset: @@ -911,20 +813,16 @@ def DoCheckOperationTest(self, op_type_name, is_last, allow_signature, op.data_offset = prev_data_offset fake_data = 'fake-data'.ljust(op.data_length) - if not (allow_unhashed or (is_last and allow_signature and - op_type == common.OpType.REPLACE)): - if not fail_data_hash: - # Create a valid data blob hash. - op.data_sha256_hash = hashlib.sha256(fake_data).digest() - payload.ReadDataBlob(op.data_offset, op.data_length).AndReturn( - fake_data) + if not allow_unhashed and not fail_data_hash: + # Create a valid data blob hash. + op.data_sha256_hash = hashlib.sha256(fake_data.encode('utf-8')).digest() + payload.ReadDataBlob.return_value = fake_data.encode('utf-8') elif fail_data_hash: # Create an invalid data blob hash. op.data_sha256_hash = hashlib.sha256( - fake_data.replace(' ', '-')).digest() - payload.ReadDataBlob(op.data_offset, op.data_length).AndReturn( - fake_data) + fake_data.replace(' ', '-').encode('utf-8')).digest() + payload.ReadDataBlob.return_value = fake_data.encode('utf-8') total_dst_blocks = 0 if not fail_missing_dst_extents: @@ -939,8 +837,7 @@ def DoCheckOperationTest(self, op_type_name, is_last, allow_signature, if total_src_blocks: if fail_src_length: op.src_length = total_src_blocks * block_size + 8 - elif (op_type in (common.OpType.MOVE, common.OpType.BSDIFF, - common.OpType.SOURCE_BSDIFF) and + elif (op_type == common.OpType.SOURCE_BSDIFF and payload_checker.minor_version <= 3): op.src_length = total_src_blocks * block_size elif fail_src_length: @@ -950,19 +847,17 @@ def DoCheckOperationTest(self, op_type_name, is_last, allow_signature, if total_dst_blocks: if fail_dst_length: op.dst_length = total_dst_blocks * block_size + 8 - elif (op_type in (common.OpType.MOVE, common.OpType.BSDIFF, - common.OpType.SOURCE_BSDIFF) and + elif (op_type == common.OpType.SOURCE_BSDIFF and payload_checker.minor_version <= 3): op.dst_length = total_dst_blocks * block_size - self.mox.ReplayAll() should_fail = (fail_src_extents or fail_dst_extents or fail_mismatched_data_offset_length or fail_missing_dst_extents or fail_src_length or fail_dst_length or fail_data_hash or fail_prev_data_offset or fail_bad_minor_version) - args = (op, 'foo', is_last, old_block_counters, new_block_counters, - old_part_size, new_part_size, prev_data_offset, allow_signature, + args = (op, 'foo', old_block_counters, new_block_counters, + old_part_size, new_part_size, prev_data_offset, blob_hash_counts) if should_fail: self.assertRaises(PayloadError, payload_checker._CheckOperation, *args) @@ -1004,8 +899,9 @@ def DoCheckOperationsTest(self, fail_nonexhaustive_full_update): if fail_nonexhaustive_full_update: rootfs_data_length -= block_size - payload_gen.AddOperation(False, rootfs_op_type, - dst_extents=[(0, rootfs_data_length / block_size)], + payload_gen.AddOperation(common.ROOTFS, rootfs_op_type, + dst_extents= + [(0, rootfs_data_length // block_size)], data_offset=0, data_length=rootfs_data_length) @@ -1015,17 +911,17 @@ def DoCheckOperationsTest(self, fail_nonexhaustive_full_update): 'allow_unhashed': True}) payload_checker.payload_type = checker._TYPE_FULL report = checker._PayloadReport() - - args = (payload_checker.payload.manifest.install_operations, report, 'foo', - 0, rootfs_part_size, rootfs_part_size, rootfs_part_size, 0, False) + partition = next((p for p in payload_checker.payload.manifest.partitions + if p.partition_name == common.ROOTFS), None) + args = (partition.operations, report, 'foo', + 0, rootfs_part_size, rootfs_part_size, rootfs_part_size, 0) if fail_nonexhaustive_full_update: self.assertRaises(PayloadError, payload_checker._CheckOperations, *args) else: self.assertEqual(rootfs_data_length, payload_checker._CheckOperations(*args)) - def DoCheckSignaturesTest(self, fail_empty_sigs_blob, fail_missing_pseudo_op, - fail_mismatched_pseudo_op, fail_sig_missing_fields, + def DoCheckSignaturesTest(self, fail_empty_sigs_blob, fail_sig_missing_fields, fail_unknown_sig_version, fail_incorrect_sig): """Tests _CheckSignatures().""" # Generate a test payload. For this test, we only care about the signature @@ -1036,20 +932,18 @@ def DoCheckSignaturesTest(self, fail_empty_sigs_blob, fail_missing_pseudo_op, payload_gen.SetBlockSize(block_size) rootfs_part_size = test_utils.MiB(2) kernel_part_size = test_utils.KiB(16) - payload_gen.SetPartInfo(False, True, rootfs_part_size, - hashlib.sha256('fake-new-rootfs-content').digest()) - payload_gen.SetPartInfo(True, True, kernel_part_size, - hashlib.sha256('fake-new-kernel-content').digest()) + payload_gen.SetPartInfo(common.ROOTFS, True, rootfs_part_size, + hashlib.sha256(b'fake-new-rootfs-content').digest()) + payload_gen.SetPartInfo(common.KERNEL, True, kernel_part_size, + hashlib.sha256(b'fake-new-kernel-content').digest()) payload_gen.SetMinorVersion(0) payload_gen.AddOperationWithData( - False, common.OpType.REPLACE, - dst_extents=[(0, rootfs_part_size / block_size)], + common.ROOTFS, common.OpType.REPLACE, + dst_extents=[(0, rootfs_part_size // block_size)], data_blob=os.urandom(rootfs_part_size)) - do_forge_pseudo_op = (fail_missing_pseudo_op or fail_mismatched_pseudo_op) - do_forge_sigs_data = (do_forge_pseudo_op or fail_empty_sigs_blob or - fail_sig_missing_fields or fail_unknown_sig_version - or fail_incorrect_sig) + do_forge_sigs_data = (fail_empty_sigs_blob or fail_sig_missing_fields or + fail_unknown_sig_version or fail_incorrect_sig) sigs_data = None if do_forge_sigs_data: @@ -1058,37 +952,29 @@ def DoCheckSignaturesTest(self, fail_empty_sigs_blob, fail_missing_pseudo_op, if fail_sig_missing_fields: sig_data = None else: - sig_data = test_utils.SignSha256('fake-payload-content', + sig_data = test_utils.SignSha256(b'fake-payload-content', test_utils._PRIVKEY_FILE_NAME) sigs_gen.AddSig(5 if fail_unknown_sig_version else 1, sig_data) sigs_data = sigs_gen.ToBinary() payload_gen.SetSignatures(payload_gen.curr_offset, len(sigs_data)) - if do_forge_pseudo_op: - assert sigs_data is not None, 'should have forged signatures blob by now' - sigs_len = len(sigs_data) - payload_gen.AddOperation( - False, common.OpType.REPLACE, - data_offset=payload_gen.curr_offset / 2, - data_length=sigs_len / 2, - dst_extents=[(0, (sigs_len / 2 + block_size - 1) / block_size)]) - # Generate payload (complete w/ signature) and create the test object. payload_checker = _GetPayloadChecker( payload_gen.WriteToFileWithData, payload_gen_dargs={ 'sigs_data': sigs_data, - 'privkey_file_name': test_utils._PRIVKEY_FILE_NAME, - 'do_add_pseudo_operation': not do_forge_pseudo_op}) + 'privkey_file_name': test_utils._PRIVKEY_FILE_NAME}) payload_checker.payload_type = checker._TYPE_FULL report = checker._PayloadReport() # We have to check the manifest first in order to set signature attributes. - payload_checker._CheckManifest(report, rootfs_part_size, kernel_part_size) + payload_checker._CheckManifest(report, { + common.ROOTFS: rootfs_part_size, + common.KERNEL: kernel_part_size + }) - should_fail = (fail_empty_sigs_blob or fail_missing_pseudo_op or - fail_mismatched_pseudo_op or fail_sig_missing_fields or + should_fail = (fail_empty_sigs_blob or fail_sig_missing_fields or fail_unknown_sig_version or fail_incorrect_sig) args = (report, test_utils._PUBKEY_FILE_NAME) if should_fail: @@ -1112,7 +998,6 @@ def DoCheckManifestMinorVersionTest(self, minor_version, payload_type): should_succeed = ( (minor_version == 0 and payload_type == checker._TYPE_FULL) or - (minor_version == 1 and payload_type == checker._TYPE_DELTA) or (minor_version == 2 and payload_type == checker._TYPE_DELTA) or (minor_version == 3 and payload_type == checker._TYPE_DELTA) or (minor_version == 4 and payload_type == checker._TYPE_DELTA) or @@ -1127,8 +1012,8 @@ def DoCheckManifestMinorVersionTest(self, minor_version, payload_type): def DoRunTest(self, rootfs_part_size_provided, kernel_part_size_provided, fail_wrong_payload_type, fail_invalid_block_size, - fail_mismatched_block_size, fail_excess_data, - fail_rootfs_part_size_exceeded, + fail_mismatched_metadata_size, fail_mismatched_block_size, + fail_excess_data, fail_rootfs_part_size_exceeded, fail_kernel_part_size_exceeded): """Tests Run().""" # Generate a test payload. For this test, we generate a full update that @@ -1142,10 +1027,10 @@ def DoRunTest(self, rootfs_part_size_provided, kernel_part_size_provided, payload_gen.SetBlockSize(block_size) kernel_filesystem_size = test_utils.KiB(16) rootfs_filesystem_size = test_utils.MiB(2) - payload_gen.SetPartInfo(False, True, rootfs_filesystem_size, - hashlib.sha256('fake-new-rootfs-content').digest()) - payload_gen.SetPartInfo(True, True, kernel_filesystem_size, - hashlib.sha256('fake-new-kernel-content').digest()) + payload_gen.SetPartInfo(common.ROOTFS, True, rootfs_filesystem_size, + hashlib.sha256(b'fake-new-rootfs-content').digest()) + payload_gen.SetPartInfo(common.KERNEL, True, kernel_filesystem_size, + hashlib.sha256(b'fake-new-kernel-content').digest()) payload_gen.SetMinorVersion(0) rootfs_part_size = 0 @@ -1155,8 +1040,8 @@ def DoRunTest(self, rootfs_part_size_provided, kernel_part_size_provided, if fail_rootfs_part_size_exceeded: rootfs_op_size += block_size payload_gen.AddOperationWithData( - False, common.OpType.REPLACE, - dst_extents=[(0, rootfs_op_size / block_size)], + common.ROOTFS, common.OpType.REPLACE, + dst_extents=[(0, rootfs_op_size // block_size)], data_blob=os.urandom(rootfs_op_size)) kernel_part_size = 0 @@ -1166,8 +1051,8 @@ def DoRunTest(self, rootfs_part_size_provided, kernel_part_size_provided, if fail_kernel_part_size_exceeded: kernel_op_size += block_size payload_gen.AddOperationWithData( - True, common.OpType.REPLACE, - dst_extents=[(0, kernel_op_size / block_size)], + common.KERNEL, common.OpType.REPLACE, + dst_extents=[(0, kernel_op_size // block_size)], data_blob=os.urandom(kernel_op_size)) # Generate payload (complete w/ signature) and create the test object. @@ -1178,11 +1063,14 @@ def DoRunTest(self, rootfs_part_size_provided, kernel_part_size_provided, else: use_block_size = block_size + # For the unittests 237 is the value that generated for the payload. + metadata_size = 237 + if fail_mismatched_metadata_size: + metadata_size += 1 + kwargs = { 'payload_gen_dargs': { 'privkey_file_name': test_utils._PRIVKEY_FILE_NAME, - 'do_add_pseudo_operation': True, - 'is_pseudo_in_kernel': True, 'padding': os.urandom(1024) if fail_excess_data else None}, 'checker_init_dargs': { 'assert_type': 'delta' if fail_wrong_payload_type else 'full', @@ -1194,23 +1082,27 @@ def DoRunTest(self, rootfs_part_size_provided, kernel_part_size_provided, payload_checker = _GetPayloadChecker(payload_gen.WriteToFileWithData, **kwargs) - kwargs = {'pubkey_file_name': test_utils._PUBKEY_FILE_NAME, - 'rootfs_part_size': rootfs_part_size, - 'kernel_part_size': kernel_part_size} + kwargs2 = { + 'pubkey_file_name': test_utils._PUBKEY_FILE_NAME, + 'metadata_size': metadata_size, + 'part_sizes': { + common.KERNEL: kernel_part_size, + common.ROOTFS: rootfs_part_size}} + should_fail = (fail_wrong_payload_type or fail_mismatched_block_size or - fail_excess_data or + fail_mismatched_metadata_size or fail_excess_data or fail_rootfs_part_size_exceeded or fail_kernel_part_size_exceeded) if should_fail: - self.assertRaises(PayloadError, payload_checker.Run, **kwargs) + self.assertRaises(PayloadError, payload_checker.Run, **kwargs2) else: - self.assertIsNone(payload_checker.Run(**kwargs)) + self.assertIsNone(payload_checker.Run(**kwargs2)) + # This implements a generic API, hence the occasional unused args. # pylint: disable=W0613 -def ValidateCheckOperationTest(op_type_name, is_last, allow_signature, - allow_unhashed, fail_src_extents, - fail_dst_extents, +def ValidateCheckOperationTest(op_type_name, allow_unhashed, + fail_src_extents, fail_dst_extents, fail_mismatched_data_offset_length, fail_missing_dst_extents, fail_src_length, fail_dst_length, fail_data_hash, @@ -1227,8 +1119,8 @@ def ValidateCheckOperationTest(op_type_name, is_last, allow_signature, fail_bad_minor_version)): return False - # MOVE and SOURCE_COPY operations don't carry data. - if (op_type in (common.OpType.MOVE, common.OpType.SOURCE_COPY) and ( + # SOURCE_COPY operation does not carry data. + if (op_type == common.OpType.SOURCE_COPY and ( fail_mismatched_data_offset_length or fail_data_hash or fail_prev_data_offset)): return False @@ -1257,14 +1149,14 @@ def AddParametricTests(tested_method_name, arg_space, validate_func=None): (values) associated with them. validate_func: A function used for validating test argument combinations. """ - for value_tuple in itertools.product(*arg_space.itervalues()): - run_dargs = dict(zip(arg_space.iterkeys(), value_tuple)) + for value_tuple in itertools.product(*iter(arg_space.values())): + run_dargs = dict(zip(iter(arg_space.keys()), value_tuple)) if validate_func and not validate_func(**run_dargs): continue run_method_name = 'Do%sTest' % tested_method_name test_method_name = 'test%s' % tested_method_name - for arg_key, arg_val in run_dargs.iteritems(): - if arg_val or type(arg_val) is int: + for arg_key, arg_val in run_dargs.items(): + if arg_val or isinstance(arg_val, int): test_method_name += '__%s=%s' % (arg_key, arg_val) setattr(PayloadCheckerTest, test_method_name, TestMethodBody(run_method_name, run_dargs)) @@ -1311,11 +1203,8 @@ def AddAllParametricTests(): # Add all _CheckOperation() test cases. AddParametricTests('CheckOperation', {'op_type_name': ('REPLACE', 'REPLACE_BZ', 'REPLACE_XZ', - 'MOVE', 'BSDIFF', 'SOURCE_COPY', - 'SOURCE_BSDIFF', 'PUFFDIFF', - 'BROTLI_BSDIFF'), - 'is_last': (True, False), - 'allow_signature': (True, False), + 'SOURCE_COPY', 'SOURCE_BSDIFF', + 'PUFFDIFF', 'BROTLI_BSDIFF'), 'allow_unhashed': (True, False), 'fail_src_extents': (True, False), 'fail_dst_extents': (True, False), @@ -1335,15 +1224,13 @@ def AddAllParametricTests(): # Add all _CheckOperations() test cases. AddParametricTests('CheckSignatures', {'fail_empty_sigs_blob': (True, False), - 'fail_missing_pseudo_op': (True, False), - 'fail_mismatched_pseudo_op': (True, False), 'fail_sig_missing_fields': (True, False), 'fail_unknown_sig_version': (True, False), 'fail_incorrect_sig': (True, False)}) # Add all _CheckManifestMinorVersion() test cases. AddParametricTests('CheckManifestMinorVersion', - {'minor_version': (None, 0, 1, 2, 3, 4, 5, 555), + {'minor_version': (None, 0, 2, 3, 4, 5, 555), 'payload_type': (checker._TYPE_FULL, checker._TYPE_DELTA)}) @@ -1353,6 +1240,7 @@ def AddAllParametricTests(): 'kernel_part_size_provided': (True, False), 'fail_wrong_payload_type': (True, False), 'fail_invalid_block_size': (True, False), + 'fail_mismatched_metadata_size': (True, False), 'fail_mismatched_block_size': (True, False), 'fail_excess_data': (True, False), 'fail_rootfs_part_size_exceeded': (True, False), diff --git a/update_payload/common.py b/update_payload/common.py index 4e7b2e3..b934cf8 100644 --- a/update_payload/common.py +++ b/update_payload/common.py @@ -16,8 +16,11 @@ """Utilities for update payload processing.""" +from __future__ import absolute_import from __future__ import print_function +import base64 + from update_payload import update_metadata_pb2 from update_payload.error import PayloadError @@ -25,23 +28,25 @@ # # Constants. # -PSEUDO_EXTENT_MARKER = (1L << 64) - 1 # UINT64_MAX - SIG_ASN1_HEADER = ( - '\x30\x31\x30\x0d\x06\x09\x60\x86' - '\x48\x01\x65\x03\x04\x02\x01\x05' - '\x00\x04\x20' + b'\x30\x31\x30\x0d\x06\x09\x60\x86' + b'\x48\x01\x65\x03\x04\x02\x01\x05' + b'\x00\x04\x20' ) -CHROMEOS_MAJOR_PAYLOAD_VERSION = 1 BRILLO_MAJOR_PAYLOAD_VERSION = 2 -INPLACE_MINOR_PAYLOAD_VERSION = 1 SOURCE_MINOR_PAYLOAD_VERSION = 2 OPSRCHASH_MINOR_PAYLOAD_VERSION = 3 BROTLI_BSDIFF_MINOR_PAYLOAD_VERSION = 4 PUFFDIFF_MINOR_PAYLOAD_VERSION = 5 +KERNEL = 'kernel' +ROOTFS = 'root' +# Tuple of (name in system, name in protobuf). +CROS_PARTITIONS = ((KERNEL, KERNEL), (ROOTFS, 'rootfs')) + + # # Payload operation types. # @@ -50,8 +55,6 @@ class OpType(object): _CLASS = update_metadata_pb2.InstallOperation REPLACE = _CLASS.REPLACE REPLACE_BZ = _CLASS.REPLACE_BZ - MOVE = _CLASS.MOVE - BSDIFF = _CLASS.BSDIFF SOURCE_COPY = _CLASS.SOURCE_COPY SOURCE_BSDIFF = _CLASS.SOURCE_BSDIFF ZERO = _CLASS.ZERO @@ -59,13 +62,11 @@ class OpType(object): REPLACE_XZ = _CLASS.REPLACE_XZ PUFFDIFF = _CLASS.PUFFDIFF BROTLI_BSDIFF = _CLASS.BROTLI_BSDIFF - ALL = (REPLACE, REPLACE_BZ, MOVE, BSDIFF, SOURCE_COPY, SOURCE_BSDIFF, ZERO, + ALL = (REPLACE, REPLACE_BZ, SOURCE_COPY, SOURCE_BSDIFF, ZERO, DISCARD, REPLACE_XZ, PUFFDIFF, BROTLI_BSDIFF) NAMES = { REPLACE: 'REPLACE', REPLACE_BZ: 'REPLACE_BZ', - MOVE: 'MOVE', - BSDIFF: 'BSDIFF', SOURCE_COPY: 'SOURCE_COPY', SOURCE_BSDIFF: 'SOURCE_BSDIFF', ZERO: 'ZERO', @@ -141,7 +142,7 @@ def Read(file_obj, length, offset=None, hasher=None): try: data = file_obj.read(length) - except IOError, e: + except IOError as e: raise PayloadError('error reading from file (%s): %s' % (file_obj.name, e)) if len(data) != length: @@ -162,13 +163,12 @@ def FormatExtent(ex, block_size=0): end_block = ex.start_block + ex.num_blocks if block_size: return '%d->%d * %d' % (ex.start_block, end_block, block_size) - else: - return '%d->%d' % (ex.start_block, end_block) + return '%d->%d' % (ex.start_block, end_block) def FormatSha256(digest): """Returns a canonical string representation of a SHA256 digest.""" - return digest.encode('base64').strip() + return base64.b64encode(digest).decode('utf-8') # diff --git a/update_payload/format_utils.py b/update_payload/format_utils.py index 6248ba9..e73badf 100644 --- a/update_payload/format_utils.py +++ b/update_payload/format_utils.py @@ -16,6 +16,8 @@ """Various formatting functions.""" +from __future__ import division + def NumToPercent(num, total, min_precision=1, max_precision=5): """Returns the percentage (string) of |num| out of |total|. @@ -50,7 +52,7 @@ def NumToPercent(num, total, min_precision=1, max_precision=5): precision = min(min_precision, max_precision) factor = 10 ** precision while precision <= max_precision: - percent = num * 100 * factor / total + percent = num * 100 * factor // total if percent: break factor *= 10 @@ -102,8 +104,8 @@ def BytesToHumanReadable(size, precision=1, decimal=False): magnitude = next_magnitude if exp != 0: - whole = size / magnitude - frac = (size % magnitude) * (10 ** precision) / magnitude + whole = size // magnitude + frac = (size % magnitude) * (10 ** precision) // magnitude while frac and not frac % 10: frac /= 10 return '%d%s %s' % (whole, '.%d' % frac if frac else '', suffixes[exp - 1]) diff --git a/update_payload/format_utils_unittest.py b/update_payload/format_utils_unittest.py index 42ea621..4dcd652 100755 --- a/update_payload/format_utils_unittest.py +++ b/update_payload/format_utils_unittest.py @@ -1,4 +1,4 @@ -#!/usr/bin/python2 +#!/usr/bin/env python # # Copyright (C) 2013 The Android Open Source Project # @@ -17,6 +17,11 @@ """Unit tests for format_utils.py.""" +# Disable check for function names to avoid errors based on old code +# pylint: disable-msg=invalid-name + +from __future__ import absolute_import + import unittest from update_payload import format_utils diff --git a/update_payload/histogram.py b/update_payload/histogram.py index 1ac2ab5..bad2dc3 100644 --- a/update_payload/histogram.py +++ b/update_payload/histogram.py @@ -16,6 +16,9 @@ """Histogram generation tools.""" +from __future__ import absolute_import +from __future__ import division + from collections import defaultdict from update_payload import format_utils @@ -110,7 +113,7 @@ def __str__(self): hist_bar = '|' for key, count in self.data: if self.total: - bar_len = count * self.scale / self.total + bar_len = count * self.scale // self.total hist_bar = '|%s|' % ('#' * bar_len).ljust(self.scale) line = '%s %s %s' % ( diff --git a/update_payload/histogram_unittest.py b/update_payload/histogram_unittest.py index e757dd0..ccde2bb 100755 --- a/update_payload/histogram_unittest.py +++ b/update_payload/histogram_unittest.py @@ -1,4 +1,4 @@ -#!/usr/bin/python2 +#!/usr/bin/env python # # Copyright (C) 2013 The Android Open Source Project # @@ -17,6 +17,11 @@ """Unit tests for histogram.py.""" +# Disable check for function names to avoid errors based on old code +# pylint: disable-msg=invalid-name + +from __future__ import absolute_import + import unittest from update_payload import format_utils diff --git a/update_payload/payload.py b/update_payload/payload.py index 380d6d0..ea5ed30 100644 --- a/update_payload/payload.py +++ b/update_payload/payload.py @@ -16,6 +16,7 @@ """Tools for reading, verifying and applying Chrome OS update payloads.""" +from __future__ import absolute_import from __future__ import print_function import hashlib @@ -64,7 +65,7 @@ class _PayloadHeader(object): """Update payload header struct.""" # Header constants; sizes are in bytes. - _MAGIC = 'CrAU' + _MAGIC = b'CrAU' _VERSION_SIZE = 8 _MANIFEST_LEN_SIZE = 8 _METADATA_SIGNATURE_LEN_SIZE = 4 @@ -111,7 +112,6 @@ def ReadFromPayload(self, payload_file, hasher=None): payload_file, self._METADATA_SIGNATURE_LEN_SIZE, True, hasher=hasher) - def __init__(self, payload_file, payload_file_offset=0): """Initialize the payload object. @@ -263,9 +263,7 @@ def ResetFile(self): def IsDelta(self): """Returns True iff the payload appears to be a delta.""" self._AssertInit() - return (self.manifest.HasField('old_kernel_info') or - self.manifest.HasField('old_rootfs_info') or - any(partition.HasField('old_partition_info') + return (any(partition.HasField('old_partition_info') for partition in self.manifest.partitions)) def IsFull(self): @@ -273,19 +271,19 @@ def IsFull(self): return not self.IsDelta() def Check(self, pubkey_file_name=None, metadata_sig_file=None, - report_out_file=None, assert_type=None, block_size=0, - rootfs_part_size=0, kernel_part_size=0, allow_unhashed=False, + metadata_size=0, report_out_file=None, assert_type=None, + block_size=0, part_sizes=None, allow_unhashed=False, disabled_tests=()): """Checks the payload integrity. Args: pubkey_file_name: public key used for signature verification metadata_sig_file: metadata signature, if verification is desired + metadata_size: metadata size, if verification is desired report_out_file: file object to dump the report to assert_type: assert that payload is either 'full' or 'delta' block_size: expected filesystem / payload block size - rootfs_part_size: the size of (physical) rootfs partitions in bytes - kernel_part_size: the size of (physical) kernel partitions in bytes + part_sizes: map of partition label to (physical) size in bytes allow_unhashed: allow unhashed operation blobs disabled_tests: list of tests to disable @@ -300,20 +298,18 @@ def Check(self, pubkey_file_name=None, metadata_sig_file=None, allow_unhashed=allow_unhashed, disabled_tests=disabled_tests) helper.Run(pubkey_file_name=pubkey_file_name, metadata_sig_file=metadata_sig_file, - rootfs_part_size=rootfs_part_size, - kernel_part_size=kernel_part_size, + metadata_size=metadata_size, + part_sizes=part_sizes, report_out_file=report_out_file) - def Apply(self, new_kernel_part, new_rootfs_part, old_kernel_part=None, - old_rootfs_part=None, bsdiff_in_place=True, bspatch_path=None, - puffpatch_path=None, truncate_to_expected_size=True): + def Apply(self, new_parts, old_parts=None, bsdiff_in_place=True, + bspatch_path=None, puffpatch_path=None, + truncate_to_expected_size=True): """Applies the update payload. Args: - new_kernel_part: name of dest kernel partition file - new_rootfs_part: name of dest rootfs partition file - old_kernel_part: name of source kernel partition file (optional) - old_rootfs_part: name of source rootfs partition file (optional) + new_parts: map of partition name to dest partition file + old_parts: map of partition name to partition file (optional) bsdiff_in_place: whether to perform BSDIFF operations in-place (optional) bspatch_path: path to the bspatch binary (optional) puffpatch_path: path to the puffpatch binary (optional) @@ -331,6 +327,4 @@ def Apply(self, new_kernel_part, new_rootfs_part, old_kernel_part=None, self, bsdiff_in_place=bsdiff_in_place, bspatch_path=bspatch_path, puffpatch_path=puffpatch_path, truncate_to_expected_size=truncate_to_expected_size) - helper.Run(new_kernel_part, new_rootfs_part, - old_kernel_part=old_kernel_part, - old_rootfs_part=old_rootfs_part) + helper.Run(new_parts, old_parts=old_parts) diff --git a/update_payload/test_utils.py b/update_payload/test_utils.py index 1e2259d..e153669 100644 --- a/update_payload/test_utils.py +++ b/update_payload/test_utils.py @@ -16,9 +16,10 @@ """Utilities for unit testing.""" +from __future__ import absolute_import from __future__ import print_function -import cStringIO +import io import hashlib import os import struct @@ -70,7 +71,7 @@ def _WriteInt(file_obj, size, is_unsigned, val): """ try: file_obj.write(struct.pack(common.IntPackingFmtStr(size, is_unsigned), val)) - except IOError, e: + except IOError as e: raise payload.PayloadError('error writing to file (%s): %s' % (file_obj.name, e)) @@ -173,31 +174,37 @@ def SetBlockSize(self, block_size): self.block_size = block_size _SetMsgField(self.manifest, 'block_size', block_size) - def SetPartInfo(self, is_kernel, is_new, part_size, part_hash): + def SetPartInfo(self, part_name, is_new, part_size, part_hash): """Set the partition info entry. Args: - is_kernel: whether this is kernel partition info - is_new: whether to set old (False) or new (True) info - part_size: the partition size (in fact, filesystem size) - part_hash: the partition hash + part_name: The name of the partition. + is_new: Whether to set old (False) or new (True) info. + part_size: The partition size (in fact, filesystem size). + part_hash: The partition hash. """ - if is_kernel: - part_info = (self.manifest.new_kernel_info if is_new - else self.manifest.old_kernel_info) - else: - part_info = (self.manifest.new_rootfs_info if is_new - else self.manifest.old_rootfs_info) + partition = next((x for x in self.manifest.partitions + if x.partition_name == part_name), None) + if partition is None: + partition = self.manifest.partitions.add() + partition.partition_name = part_name + + part_info = (partition.new_partition_info if is_new + else partition.old_partition_info) _SetMsgField(part_info, 'size', part_size) _SetMsgField(part_info, 'hash', part_hash) - def AddOperation(self, is_kernel, op_type, data_offset=None, + def AddOperation(self, part_name, op_type, data_offset=None, data_length=None, src_extents=None, src_length=None, dst_extents=None, dst_length=None, data_sha256_hash=None): """Adds an InstallOperation entry.""" - operations = (self.manifest.kernel_install_operations if is_kernel - else self.manifest.install_operations) + partition = next((x for x in self.manifest.partitions + if x.partition_name == part_name), None) + if partition is None: + partition = self.manifest.partitions.add() + partition.partition_name = part_name + operations = partition.operations op = operations.add() op.type = op_type @@ -277,7 +284,7 @@ def AddData(self, data_blob): self.data_blobs.append(data_blob) return data_length, data_offset - def AddOperationWithData(self, is_kernel, op_type, src_extents=None, + def AddOperationWithData(self, part_name, op_type, src_extents=None, src_length=None, dst_extents=None, dst_length=None, data_blob=None, do_hash_data_blob=True): """Adds an install operation and associated data blob. @@ -287,12 +294,12 @@ def AddOperationWithData(self, is_kernel, op_type, src_extents=None, necessary offset/length accounting. Args: - is_kernel: whether this is a kernel (True) or rootfs (False) operation - op_type: one of REPLACE, REPLACE_BZ, REPLACE_XZ, MOVE or BSDIFF + part_name: The name of the partition (e.g. kernel or root). + op_type: one of REPLACE, REPLACE_BZ, REPLACE_XZ. src_extents: list of (start, length) pairs indicating src block ranges - src_length: size of the src data in bytes (needed for BSDIFF) + src_length: size of the src data in bytes (needed for diff operations) dst_extents: list of (start, length) pairs indicating dst block ranges - dst_length: size of the dst data in bytes (needed for BSDIFF) + dst_length: size of the dst data in bytes (needed for diff operations) data_blob: a data blob associated with this operation do_hash_data_blob: whether or not to compute and add a data blob hash """ @@ -302,15 +309,13 @@ def AddOperationWithData(self, is_kernel, op_type, src_extents=None, data_sha256_hash = hashlib.sha256(data_blob).digest() data_length, data_offset = self.AddData(data_blob) - self.AddOperation(is_kernel, op_type, data_offset=data_offset, + self.AddOperation(part_name, op_type, data_offset=data_offset, data_length=data_length, src_extents=src_extents, src_length=src_length, dst_extents=dst_extents, dst_length=dst_length, data_sha256_hash=data_sha256_hash) def WriteToFileWithData(self, file_obj, sigs_data=None, - privkey_file_name=None, - do_add_pseudo_operation=False, - is_pseudo_in_kernel=False, padding=None): + privkey_file_name=None, padding=None): """Writes the payload content to a file, optionally signing the content. Args: @@ -319,10 +324,6 @@ def WriteToFileWithData(self, file_obj, sigs_data=None, payload signature fields assumed to be preset by the caller) privkey_file_name: key used for signing the payload (optional; used only if explicit signatures blob not provided) - do_add_pseudo_operation: whether a pseudo-operation should be added to - account for the signature blob - is_pseudo_in_kernel: whether the pseudo-operation should be added to - kernel (True) or rootfs (False) operations padding: stuff to dump past the normal data blobs provided (optional) Raises: @@ -335,7 +336,7 @@ def WriteToFileWithData(self, file_obj, sigs_data=None, if do_generate_sigs_data: # First, sign some arbitrary data to obtain the size of a signature blob. - fake_sig = SignSha256('fake-payload-data', privkey_file_name) + fake_sig = SignSha256(b'fake-payload-data', privkey_file_name) fake_sigs_gen = SignaturesGenerator() fake_sigs_gen.AddSig(1, fake_sig) sigs_len = len(fake_sigs_gen.ToBinary()) @@ -343,20 +344,9 @@ def WriteToFileWithData(self, file_obj, sigs_data=None, # Update the payload with proper signature attributes. self.SetSignatures(self.curr_offset, sigs_len) - # Add a pseudo-operation to account for the signature blob, if requested. - if do_add_pseudo_operation: - if not self.block_size: - raise TestError('cannot add pseudo-operation without knowing the ' - 'payload block size') - self.AddOperation( - is_pseudo_in_kernel, common.OpType.REPLACE, - data_offset=self.curr_offset, data_length=sigs_len, - dst_extents=[(common.PSEUDO_EXTENT_MARKER, - (sigs_len + self.block_size - 1) / self.block_size)]) - if do_generate_sigs_data: # Once all payload fields are updated, dump and sign it. - temp_payload_file = cStringIO.StringIO() + temp_payload_file = io.BytesIO() self.WriteToFile(temp_payload_file, data_blobs=self.data_blobs) sig = SignSha256(temp_payload_file.getvalue(), privkey_file_name) sigs_gen = SignaturesGenerator() diff --git a/update_payload/update_metadata_pb2.py b/update_payload/update_metadata_pb2.py index 595f2f6..bcd8187 100644 --- a/update_payload/update_metadata_pb2.py +++ b/update_payload/update_metadata_pb2.py @@ -1,19 +1,25 @@ +# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: update_metadata.proto from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection -from google.protobuf import descriptor_pb2 +from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) +_sym_db = _symbol_database.Default() + DESCRIPTOR = _descriptor.FileDescriptor( name='update_metadata.proto', package='chromeos_update_engine', - serialized_pb='\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xe6\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\r\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\r\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xa5\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x08\n\x04MOVE\x10\x02\x12\n\n\x06\x42SDIFF\x10\x03\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x0c\n\x08PUFFDIFF\x10\t\x12\x11\n\rBROTLI_BSDIFF\x10\n\"\xa6\x03\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\"\xc4\x05\n\x14\x44\x65ltaArchiveManifest\x12\x44\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12K\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12>\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdateB\x02H\x03') + syntax='proto2', + serialized_options=b'H\003', + serialized_pb=b'\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xe6\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\r\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\r\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xa5\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x08\n\x04MOVE\x10\x02\x12\n\n\x06\x42SDIFF\x10\x03\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xd7\x05\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"Y\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\"\xdb\x05\n\x14\x44\x65ltaArchiveManifest\x12\x44\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12K\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12>\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x42\x02H\x03' +) @@ -25,54 +31,55 @@ values=[ _descriptor.EnumValueDescriptor( name='REPLACE', index=0, number=0, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='REPLACE_BZ', index=1, number=1, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='MOVE', index=2, number=2, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='BSDIFF', index=3, number=3, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='SOURCE_COPY', index=4, number=4, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( name='SOURCE_BSDIFF', index=5, number=5, - options=None, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( - name='ZERO', index=6, number=6, - options=None, + name='REPLACE_XZ', index=6, number=8, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( - name='DISCARD', index=7, number=7, - options=None, + name='ZERO', index=7, number=6, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( - name='REPLACE_XZ', index=8, number=8, - options=None, + name='DISCARD', index=8, number=7, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( - name='PUFFDIFF', index=9, number=9, - options=None, + name='BROTLI_BSDIFF', index=9, number=10, + serialized_options=None, type=None), _descriptor.EnumValueDescriptor( - name='BROTLI_BSDIFF', index=10, number=10, - options=None, + name='PUFFDIFF', index=10, number=9, + serialized_options=None, type=None), ], containing_type=None, - options=None, + serialized_options=None, serialized_start=712, serialized_end=877, ) +_sym_db.RegisterEnumDescriptor(_INSTALLOPERATION_TYPE) _EXTENT = _descriptor.Descriptor( @@ -88,23 +95,26 @@ has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='num_blocks', full_name='chromeos_update_engine.Extent.num_blocks', index=1, number=2, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], + oneofs=[ + ], serialized_start=49, serialized_end=98, ) @@ -123,23 +133,26 @@ has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='chromeos_update_engine.Signatures.Signature.data', index=1, number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value="", + has_default_value=False, default_value=b"", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], + oneofs=[ + ], serialized_start=180, serialized_end=222, ) @@ -157,16 +170,19 @@ has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_SIGNATURES_SIGNATURE, ], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], + oneofs=[ + ], serialized_start=100, serialized_end=222, ) @@ -185,23 +201,26 @@ has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='hash', full_name='chromeos_update_engine.PartitionInfo.hash', index=1, number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value="", + has_default_value=False, default_value=b"", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], + oneofs=[ + ], serialized_start=224, serialized_end=267, ) @@ -217,54 +236,57 @@ _descriptor.FieldDescriptor( name='board', full_name='chromeos_update_engine.ImageInfo.board', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='key', full_name='chromeos_update_engine.ImageInfo.key', index=1, number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='channel', full_name='chromeos_update_engine.ImageInfo.channel', index=2, number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='version', full_name='chromeos_update_engine.ImageInfo.version', index=3, number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='build_channel', full_name='chromeos_update_engine.ImageInfo.build_channel', index=4, number=5, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='build_version', full_name='chromeos_update_engine.ImageInfo.build_version', index=5, number=6, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], + oneofs=[ + ], serialized_start=269, serialized_end=388, ) @@ -283,63 +305,63 @@ has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data_offset', full_name='chromeos_update_engine.InstallOperation.data_offset', index=1, number=2, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data_length', full_name='chromeos_update_engine.InstallOperation.data_length', index=2, number=3, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='src_extents', full_name='chromeos_update_engine.InstallOperation.src_extents', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='src_length', full_name='chromeos_update_engine.InstallOperation.src_length', index=4, number=5, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='dst_extents', full_name='chromeos_update_engine.InstallOperation.dst_extents', index=5, number=6, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='dst_length', full_name='chromeos_update_engine.InstallOperation.dst_length', index=6, number=7, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data_sha256_hash', full_name='chromeos_update_engine.InstallOperation.data_sha256_hash', index=7, number=8, type=12, cpp_type=9, label=1, - has_default_value=False, default_value="", + has_default_value=False, default_value=b"", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='src_sha256_hash', full_name='chromeos_update_engine.InstallOperation.src_sha256_hash', index=8, number=9, type=12, cpp_type=9, label=1, - has_default_value=False, default_value="", + has_default_value=False, default_value=b"", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -347,9 +369,12 @@ enum_types=[ _INSTALLOPERATION_TYPE, ], - options=None, + serialized_options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], + oneofs=[ + ], serialized_start=391, serialized_end=877, ) @@ -365,77 +390,205 @@ _descriptor.FieldDescriptor( name='partition_name', full_name='chromeos_update_engine.PartitionUpdate.partition_name', index=0, number=1, type=9, cpp_type=9, label=2, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='run_postinstall', full_name='chromeos_update_engine.PartitionUpdate.run_postinstall', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='postinstall_path', full_name='chromeos_update_engine.PartitionUpdate.postinstall_path', index=2, number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='filesystem_type', full_name='chromeos_update_engine.PartitionUpdate.filesystem_type', index=3, number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='new_partition_signature', full_name='chromeos_update_engine.PartitionUpdate.new_partition_signature', index=4, number=5, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='old_partition_info', full_name='chromeos_update_engine.PartitionUpdate.old_partition_info', index=5, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='new_partition_info', full_name='chromeos_update_engine.PartitionUpdate.new_partition_info', index=6, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='operations', full_name='chromeos_update_engine.PartitionUpdate.operations', index=7, number=8, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='postinstall_optional', full_name='chromeos_update_engine.PartitionUpdate.postinstall_optional', index=8, number=9, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='hash_tree_data_extent', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_data_extent', index=9, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='hash_tree_extent', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_extent', index=10, + number=11, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='hash_tree_algorithm', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_algorithm', index=11, + number=12, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='hash_tree_salt', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_salt', index=12, + number=13, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=b"", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='fec_data_extent', full_name='chromeos_update_engine.PartitionUpdate.fec_data_extent', index=13, + number=14, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='fec_extent', full_name='chromeos_update_engine.PartitionUpdate.fec_extent', index=14, + number=15, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='fec_roots', full_name='chromeos_update_engine.PartitionUpdate.fec_roots', index=15, + number=16, type=13, cpp_type=3, label=1, + has_default_value=True, default_value=2, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], + oneofs=[ + ], serialized_start=880, - serialized_end=1302, + serialized_end=1607, +) + + +_DYNAMICPARTITIONGROUP = _descriptor.Descriptor( + name='DynamicPartitionGroup', + full_name='chromeos_update_engine.DynamicPartitionGroup', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='chromeos_update_engine.DynamicPartitionGroup.name', index=0, + number=1, type=9, cpp_type=9, label=2, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='size', full_name='chromeos_update_engine.DynamicPartitionGroup.size', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='partition_names', full_name='chromeos_update_engine.DynamicPartitionGroup.partition_names', index=2, + number=3, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1609, + serialized_end=1685, +) + + +_DYNAMICPARTITIONMETADATA = _descriptor.Descriptor( + name='DynamicPartitionMetadata', + full_name='chromeos_update_engine.DynamicPartitionMetadata', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='groups', full_name='chromeos_update_engine.DynamicPartitionMetadata.groups', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1687, + serialized_end=1776, ) @@ -452,114 +605,129 @@ has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='kernel_install_operations', full_name='chromeos_update_engine.DeltaArchiveManifest.kernel_install_operations', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='block_size', full_name='chromeos_update_engine.DeltaArchiveManifest.block_size', index=2, number=3, type=13, cpp_type=3, label=1, has_default_value=True, default_value=4096, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='signatures_offset', full_name='chromeos_update_engine.DeltaArchiveManifest.signatures_offset', index=3, number=4, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='signatures_size', full_name='chromeos_update_engine.DeltaArchiveManifest.signatures_size', index=4, number=5, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='old_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_kernel_info', index=5, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='new_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_kernel_info', index=6, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='old_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_rootfs_info', index=7, number=8, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='new_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_rootfs_info', index=8, number=9, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='old_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_image_info', index=9, number=10, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='new_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_image_info', index=10, number=11, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='minor_version', full_name='chromeos_update_engine.DeltaArchiveManifest.minor_version', index=11, number=12, type=13, cpp_type=3, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='partitions', full_name='chromeos_update_engine.DeltaArchiveManifest.partitions', index=12, number=13, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='max_timestamp', full_name='chromeos_update_engine.DeltaArchiveManifest.max_timestamp', index=13, + number=14, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], - options=None, + serialized_options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=1305, - serialized_end=2013, + oneofs=[ + ], + serialized_start=1779, + serialized_end=2510, ) -_SIGNATURES_SIGNATURE.containing_type = _SIGNATURES; +_SIGNATURES_SIGNATURE.containing_type = _SIGNATURES _SIGNATURES.fields_by_name['signatures'].message_type = _SIGNATURES_SIGNATURE _INSTALLOPERATION.fields_by_name['type'].enum_type = _INSTALLOPERATION_TYPE _INSTALLOPERATION.fields_by_name['src_extents'].message_type = _EXTENT _INSTALLOPERATION.fields_by_name['dst_extents'].message_type = _EXTENT -_INSTALLOPERATION_TYPE.containing_type = _INSTALLOPERATION; +_INSTALLOPERATION_TYPE.containing_type = _INSTALLOPERATION _PARTITIONUPDATE.fields_by_name['new_partition_signature'].message_type = _SIGNATURES_SIGNATURE _PARTITIONUPDATE.fields_by_name['old_partition_info'].message_type = _PARTITIONINFO _PARTITIONUPDATE.fields_by_name['new_partition_info'].message_type = _PARTITIONINFO _PARTITIONUPDATE.fields_by_name['operations'].message_type = _INSTALLOPERATION +_PARTITIONUPDATE.fields_by_name['hash_tree_data_extent'].message_type = _EXTENT +_PARTITIONUPDATE.fields_by_name['hash_tree_extent'].message_type = _EXTENT +_PARTITIONUPDATE.fields_by_name['fec_data_extent'].message_type = _EXTENT +_PARTITIONUPDATE.fields_by_name['fec_extent'].message_type = _EXTENT +_DYNAMICPARTITIONMETADATA.fields_by_name['groups'].message_type = _DYNAMICPARTITIONGROUP _DELTAARCHIVEMANIFEST.fields_by_name['install_operations'].message_type = _INSTALLOPERATION _DELTAARCHIVEMANIFEST.fields_by_name['kernel_install_operations'].message_type = _INSTALLOPERATION _DELTAARCHIVEMANIFEST.fields_by_name['old_kernel_info'].message_type = _PARTITIONINFO @@ -575,57 +743,82 @@ DESCRIPTOR.message_types_by_name['ImageInfo'] = _IMAGEINFO DESCRIPTOR.message_types_by_name['InstallOperation'] = _INSTALLOPERATION DESCRIPTOR.message_types_by_name['PartitionUpdate'] = _PARTITIONUPDATE +DESCRIPTOR.message_types_by_name['DynamicPartitionGroup'] = _DYNAMICPARTITIONGROUP +DESCRIPTOR.message_types_by_name['DynamicPartitionMetadata'] = _DYNAMICPARTITIONMETADATA DESCRIPTOR.message_types_by_name['DeltaArchiveManifest'] = _DELTAARCHIVEMANIFEST +_sym_db.RegisterFileDescriptor(DESCRIPTOR) -class Extent(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType - DESCRIPTOR = _EXTENT - +Extent = _reflection.GeneratedProtocolMessageType('Extent', (_message.Message,), { + 'DESCRIPTOR' : _EXTENT, + '__module__' : 'update_metadata_pb2' # @@protoc_insertion_point(class_scope:chromeos_update_engine.Extent) + }) +_sym_db.RegisterMessage(Extent) -class Signatures(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType - - class Signature(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType - DESCRIPTOR = _SIGNATURES_SIGNATURE +Signatures = _reflection.GeneratedProtocolMessageType('Signatures', (_message.Message,), { + 'Signature' : _reflection.GeneratedProtocolMessageType('Signature', (_message.Message,), { + 'DESCRIPTOR' : _SIGNATURES_SIGNATURE, + '__module__' : 'update_metadata_pb2' # @@protoc_insertion_point(class_scope:chromeos_update_engine.Signatures.Signature) - DESCRIPTOR = _SIGNATURES - + }) + , + 'DESCRIPTOR' : _SIGNATURES, + '__module__' : 'update_metadata_pb2' # @@protoc_insertion_point(class_scope:chromeos_update_engine.Signatures) + }) +_sym_db.RegisterMessage(Signatures) +_sym_db.RegisterMessage(Signatures.Signature) -class PartitionInfo(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType - DESCRIPTOR = _PARTITIONINFO - +PartitionInfo = _reflection.GeneratedProtocolMessageType('PartitionInfo', (_message.Message,), { + 'DESCRIPTOR' : _PARTITIONINFO, + '__module__' : 'update_metadata_pb2' # @@protoc_insertion_point(class_scope:chromeos_update_engine.PartitionInfo) + }) +_sym_db.RegisterMessage(PartitionInfo) -class ImageInfo(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType - DESCRIPTOR = _IMAGEINFO - +ImageInfo = _reflection.GeneratedProtocolMessageType('ImageInfo', (_message.Message,), { + 'DESCRIPTOR' : _IMAGEINFO, + '__module__' : 'update_metadata_pb2' # @@protoc_insertion_point(class_scope:chromeos_update_engine.ImageInfo) + }) +_sym_db.RegisterMessage(ImageInfo) -class InstallOperation(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType - DESCRIPTOR = _INSTALLOPERATION - +InstallOperation = _reflection.GeneratedProtocolMessageType('InstallOperation', (_message.Message,), { + 'DESCRIPTOR' : _INSTALLOPERATION, + '__module__' : 'update_metadata_pb2' # @@protoc_insertion_point(class_scope:chromeos_update_engine.InstallOperation) + }) +_sym_db.RegisterMessage(InstallOperation) -class PartitionUpdate(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType - DESCRIPTOR = _PARTITIONUPDATE - +PartitionUpdate = _reflection.GeneratedProtocolMessageType('PartitionUpdate', (_message.Message,), { + 'DESCRIPTOR' : _PARTITIONUPDATE, + '__module__' : 'update_metadata_pb2' # @@protoc_insertion_point(class_scope:chromeos_update_engine.PartitionUpdate) - -class DeltaArchiveManifest(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType - DESCRIPTOR = _DELTAARCHIVEMANIFEST - + }) +_sym_db.RegisterMessage(PartitionUpdate) + +DynamicPartitionGroup = _reflection.GeneratedProtocolMessageType('DynamicPartitionGroup', (_message.Message,), { + 'DESCRIPTOR' : _DYNAMICPARTITIONGROUP, + '__module__' : 'update_metadata_pb2' + # @@protoc_insertion_point(class_scope:chromeos_update_engine.DynamicPartitionGroup) + }) +_sym_db.RegisterMessage(DynamicPartitionGroup) + +DynamicPartitionMetadata = _reflection.GeneratedProtocolMessageType('DynamicPartitionMetadata', (_message.Message,), { + 'DESCRIPTOR' : _DYNAMICPARTITIONMETADATA, + '__module__' : 'update_metadata_pb2' + # @@protoc_insertion_point(class_scope:chromeos_update_engine.DynamicPartitionMetadata) + }) +_sym_db.RegisterMessage(DynamicPartitionMetadata) + +DeltaArchiveManifest = _reflection.GeneratedProtocolMessageType('DeltaArchiveManifest', (_message.Message,), { + 'DESCRIPTOR' : _DELTAARCHIVEMANIFEST, + '__module__' : 'update_metadata_pb2' # @@protoc_insertion_point(class_scope:chromeos_update_engine.DeltaArchiveManifest) + }) +_sym_db.RegisterMessage(DeltaArchiveManifest) -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), 'H\003') +DESCRIPTOR._options = None # @@protoc_insertion_point(module_scope)