Skip to content
This repository was archived by the owner on Jun 3, 2025. It is now read-only.

Disable logging for quantization (num_bits etc) by default #1470

Closed
wants to merge 1 commit into from
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,7 @@ class QuantizationModifier(ScheduledModifier):
| freeze_bn_stats_epoch: 3.0
| model_fuse_fn_name: 'fuse_module'
| strict: True
| verbose: False

:param start_epoch: The epoch to start the modifier at
:param scheme: Default QuantizationScheme to use when enabling quantization
Expand Down Expand Up @@ -133,6 +134,8 @@ class QuantizationModifier(ScheduledModifier):
scheme_overrides or ignore are not found in a given module. Default True
:param end_epoch: Disabled, setting to anything other than -1 will raise an
exception. For compatibility with YAML serialization only.
:param verbose: if True, will log detailed information such as number of bits, batch
norm freezing etc. Default to False
"""

def __init__(
Expand All @@ -148,6 +151,7 @@ def __init__(
num_calibration_steps: Optional[int] = None,
strict: bool = True,
end_epoch: float = -1.0,
verbose: bool = False,
):
raise_if_torch_quantization_not_available()
if end_epoch != -1:
Expand Down Expand Up @@ -178,7 +182,7 @@ def __init__(
self._model_fuse_fn_name = None

self._strict = strict

self._verbose = verbose
self._qat_enabled = False
self._quantization_observer_disabled = False
self._bn_stats_frozen = False
Expand Down Expand Up @@ -348,6 +352,22 @@ def strict(self, value: bool):
"""
self._strict = value

@ModifierProp()
def verbose(self) -> bool:
"""
:return: if True, will log detailed information such as number of bits, batch
norm freezing etc
"""
return self._verbose

@strict.setter
def verbose(self, value: bool):
"""
:params value: if True, will log detailed information such as number of bits,
batch norm freezing etc.
"""
self._verbose = value

def initialize(
self,
module: Module,
Expand Down Expand Up @@ -455,7 +475,8 @@ def _check_quantization_update(
module.apply(freeze_bn_stats)
self._bn_stats_frozen = True

self._log_quantization(module, epoch, steps_per_epoch)
if self._verbose:
self._log_quantization(module, epoch, steps_per_epoch)

def _disable_quantization_observer_update_ready(self, epoch: float) -> bool:
return (
Expand Down
Loading