From e164451dfe331e56bfa9fbaab9e9c2284d46a18f Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Wed, 25 Jun 2025 14:03:45 +1000 Subject: [PATCH] chore: ruff --- invokeai/app/invocations/baseinvocation.py | 12 ++++++------ invokeai/app/invocations/segment_anything.py | 6 +++--- invokeai/app/services/config/config_default.py | 6 +++--- .../workflow_records_sqlite.py | 12 ++++++------ invokeai/backend/model_manager/merge.py | 18 +++++++++--------- .../services/download/test_download_queue.py | 12 ++++++------ ...est_flux_aitoolkit_lora_conversion_utils.py | 6 +++--- 7 files changed, 36 insertions(+), 36 deletions(-) diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index 4f2341fbcd..622d8ea60f 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -587,9 +587,9 @@ def invocation( for field_name, field_info in cls.model_fields.items(): annotation = field_info.annotation assert annotation is not None, f"{field_name} on invocation {invocation_type} has no type annotation." - assert isinstance( - field_info.json_schema_extra, dict - ), f"{field_name} on invocation {invocation_type} has a non-dict json_schema_extra, did you forget to use InputField?" + assert isinstance(field_info.json_schema_extra, dict), ( + f"{field_name} on invocation {invocation_type} has a non-dict json_schema_extra, did you forget to use InputField?" + ) original_model_fields[field_name] = OriginalModelField(annotation=annotation, field_info=field_info) @@ -712,9 +712,9 @@ def invocation_output( for field_name, field_info in cls.model_fields.items(): annotation = field_info.annotation assert annotation is not None, f"{field_name} on invocation output {output_type} has no type annotation." - assert isinstance( - field_info.json_schema_extra, dict - ), f"{field_name} on invocation output {output_type} has a non-dict json_schema_extra, did you forget to use InputField?" + assert isinstance(field_info.json_schema_extra, dict), ( + f"{field_name} on invocation output {output_type} has a non-dict json_schema_extra, did you forget to use InputField?" + ) cls._original_model_fields[field_name] = OriginalModelField(annotation=annotation, field_info=field_info) diff --git a/invokeai/app/invocations/segment_anything.py b/invokeai/app/invocations/segment_anything.py index 6b2decff18..4d82624edf 100644 --- a/invokeai/app/invocations/segment_anything.py +++ b/invokeai/app/invocations/segment_anything.py @@ -184,9 +184,9 @@ class SegmentAnythingInvocation(BaseInvocation): # Find the largest mask. return [max(masks, key=lambda x: float(x.sum()))] elif self.mask_filter == "highest_box_score": - assert ( - bounding_boxes is not None - ), "Bounding boxes must be provided to use the 'highest_box_score' mask filter." + assert bounding_boxes is not None, ( + "Bounding boxes must be provided to use the 'highest_box_score' mask filter." + ) assert len(masks) == len(bounding_boxes) # Find the index of the bounding box with the highest score. # Note that we fallback to -1.0 if the score is None. This is mainly to satisfy the type checker. In most diff --git a/invokeai/app/services/config/config_default.py b/invokeai/app/services/config/config_default.py index 18fd5c70db..4dabac964b 100644 --- a/invokeai/app/services/config/config_default.py +++ b/invokeai/app/services/config/config_default.py @@ -482,9 +482,9 @@ def load_and_migrate_config(config_path: Path) -> InvokeAIAppConfig: try: # Meta is not included in the model fields, so we need to validate it separately config = InvokeAIAppConfig.model_validate(loaded_config_dict) - assert ( - config.schema_version == CONFIG_SCHEMA_VERSION - ), f"Invalid schema version, expected {CONFIG_SCHEMA_VERSION}: {config.schema_version}" + assert config.schema_version == CONFIG_SCHEMA_VERSION, ( + f"Invalid schema version, expected {CONFIG_SCHEMA_VERSION}: {config.schema_version}" + ) return config except Exception as e: raise RuntimeError(f"Failed to load config file {config_path}: {e}") from e diff --git a/invokeai/app/services/workflow_records/workflow_records_sqlite.py b/invokeai/app/services/workflow_records/workflow_records_sqlite.py index 367c00b503..b84b226d9f 100644 --- a/invokeai/app/services/workflow_records/workflow_records_sqlite.py +++ b/invokeai/app/services/workflow_records/workflow_records_sqlite.py @@ -379,13 +379,13 @@ class SqliteWorkflowRecordsStorage(WorkflowRecordsStorageBase): bytes_ = path.read_bytes() workflow_from_file = WorkflowValidator.validate_json(bytes_) - assert workflow_from_file.id.startswith( - "default_" - ), f'Invalid default workflow ID (must start with "default_"): {workflow_from_file.id}' + assert workflow_from_file.id.startswith("default_"), ( + f'Invalid default workflow ID (must start with "default_"): {workflow_from_file.id}' + ) - assert ( - workflow_from_file.meta.category is WorkflowCategory.Default - ), f"Invalid default workflow category: {workflow_from_file.meta.category}" + assert workflow_from_file.meta.category is WorkflowCategory.Default, ( + f"Invalid default workflow category: {workflow_from_file.meta.category}" + ) workflows_from_file.append(workflow_from_file) diff --git a/invokeai/backend/model_manager/merge.py b/invokeai/backend/model_manager/merge.py index b00bc99f3e..03056b10f5 100644 --- a/invokeai/backend/model_manager/merge.py +++ b/invokeai/backend/model_manager/merge.py @@ -115,19 +115,19 @@ class ModelMerger(object): base_models: Set[BaseModelType] = set() variant = None if self._installer.app_config.precision == "float32" else "fp16" - assert ( - len(model_keys) <= 2 or interp == MergeInterpolationMethod.AddDifference - ), "When merging three models, only the 'add_difference' merge method is supported" + assert len(model_keys) <= 2 or interp == MergeInterpolationMethod.AddDifference, ( + "When merging three models, only the 'add_difference' merge method is supported" + ) for key in model_keys: info = store.get_model(key) model_names.append(info.name) - assert isinstance( - info, MainDiffusersConfig - ), f"{info.name} ({info.key}) is not a diffusers model. It must be optimized before merging" - assert info.variant == ModelVariantType( - "normal" - ), f"{info.name} ({info.key}) is a {info.variant} model, which cannot currently be merged" + assert isinstance(info, MainDiffusersConfig), ( + f"{info.name} ({info.key}) is not a diffusers model. It must be optimized before merging" + ) + assert info.variant == ModelVariantType("normal"), ( + f"{info.name} ({info.key}) is a {info.variant} model, which cannot currently be merged" + ) # tally base models used base_models.add(info.base) diff --git a/tests/app/services/download/test_download_queue.py b/tests/app/services/download/test_download_queue.py index 8feb49f999..edf3c115ac 100644 --- a/tests/app/services/download/test_download_queue.py +++ b/tests/app/services/download/test_download_queue.py @@ -211,12 +211,12 @@ def test_multifile_download(tmp_path: Path, mm2_session: Session) -> None: assert job.bytes > 0, "expected download bytes to be positive" assert job.bytes == job.total_bytes, "expected download bytes to equal total bytes" assert job.download_path == tmp_path / "sdxl-turbo" - assert Path( - tmp_path, "sdxl-turbo/model_index.json" - ).exists(), f"expected {tmp_path}/sdxl-turbo/model_inded.json to exist" - assert Path( - tmp_path, "sdxl-turbo/text_encoder/config.json" - ).exists(), f"expected {tmp_path}/sdxl-turbo/text_encoder/config.json to exist" + assert Path(tmp_path, "sdxl-turbo/model_index.json").exists(), ( + f"expected {tmp_path}/sdxl-turbo/model_inded.json to exist" + ) + assert Path(tmp_path, "sdxl-turbo/text_encoder/config.json").exists(), ( + f"expected {tmp_path}/sdxl-turbo/text_encoder/config.json to exist" + ) assert events == {DownloadJobStatus.RUNNING, DownloadJobStatus.COMPLETED} queue.stop() diff --git a/tests/backend/patches/lora_conversions/test_flux_aitoolkit_lora_conversion_utils.py b/tests/backend/patches/lora_conversions/test_flux_aitoolkit_lora_conversion_utils.py index 1ad408861e..ed3e05a9b2 100644 --- a/tests/backend/patches/lora_conversions/test_flux_aitoolkit_lora_conversion_utils.py +++ b/tests/backend/patches/lora_conversions/test_flux_aitoolkit_lora_conversion_utils.py @@ -48,9 +48,9 @@ def test_flux_aitoolkit_transformer_state_dict_is_in_invoke_format(): model_keys = set(model.state_dict().keys()) for converted_key_prefix in converted_key_prefixes: - assert any( - model_key.startswith(converted_key_prefix) for model_key in model_keys - ), f"'{converted_key_prefix}' did not match any model keys." + assert any(model_key.startswith(converted_key_prefix) for model_key in model_keys), ( + f"'{converted_key_prefix}' did not match any model keys." + ) def test_lora_model_from_flux_aitoolkit_state_dict():