From df4ecd5bec77034eeec508bda7f8604c35ccd4aa Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 2 May 2025 07:43:48 +1000 Subject: [PATCH 1/8] feat(nodes): improved pydantic type annotation massaging When we do our field type overrides to allow invocations to be instantiated without all required fields, we were not modifying the annotation of the field but did set the default value of the field to `None`. This results in an error when doing a ser/de round trip. Here's what we end up doing: ```py from pydantic import BaseModel, Field class MyModel(BaseModel): foo: str = Field(default=None) ``` And here is a simple round-trip, which should not error but which does: ```py MyModel(**MyModel().model_dump()) # ValidationError: 1 validation error for MyModel # foo # Input should be a valid string [type=string_type, input_value=None, input_type=NoneType] # For further information visit https://errors.pydantic.dev/2.11/v/string_type ``` To fix this, we now check every incoming field and update its annotation to match its default value. In other words, when we override the default field value to `None`, we make its type annotation ` | None`. This prevents the error during deserialization. This slightly alters the schema for all invocations and outputs - the values of all fields without default values are now typed as ` | None`, reflecting the overrides. This means the autogenerated types for fields have also changed for fields without defaults: ```ts // Old image?: components["schemas"]["ImageField"]; // New image?: components["schemas"]["ImageField"] | null; ``` This does not break anything on the frontend. --- invokeai/app/invocations/baseinvocation.py | 76 ++++++++++++++++------ tests/app/invocations/test_is_optional.py | 46 +++++++++++++ 2 files changed, 102 insertions(+), 20 deletions(-) create mode 100644 tests/app/invocations/test_is_optional.py diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index 61e76a288eb..02d4537c890 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -5,6 +5,8 @@ import inspect import re import sys +import types +import typing import warnings from abc import ABC, abstractmethod from enum import Enum @@ -489,6 +491,18 @@ def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None return None +def is_optional(annotation: Any) -> bool: + """ + Checks if the given annotation is optional (i.e. Optional[X], Union[X, None] or X | None). + """ + origin = typing.get_origin(annotation) + # PEP 604 unions (int|None) have origin types.UnionType + is_union = origin is typing.Union or origin is types.UnionType + if not is_union: + return False + return any(arg is type(None) for arg in typing.get_args(annotation)) + + def invocation( invocation_type: str, title: Optional[str] = None, @@ -523,6 +537,18 @@ def wrapper(cls: Type[TBaseInvocation]) -> Type[TBaseInvocation]: validate_fields(cls.model_fields, invocation_type) + fields: dict[str, tuple[Any, FieldInfo]] = {} + + for field_name, field_info in cls.model_fields.items(): + annotation = field_info.annotation + assert annotation is not None, f"{field_name} on invocation {invocation_type} has no type annotation." + assert isinstance(field_info.json_schema_extra, dict), ( + f"{field_name} on invocation {invocation_type} has a non-dict json_schema_extra, did you forget to use InputField?" + ) + if field_info.default is None and not is_optional(annotation): + annotation = annotation | None + fields[field_name] = (annotation, field_info) + # Add OpenAPI schema extras uiconfig: dict[str, Any] = {} uiconfig["title"] = title @@ -557,11 +583,17 @@ def wrapper(cls: Type[TBaseInvocation]) -> Type[TBaseInvocation]: # Unfortunately, because the `GraphInvocation` uses a forward ref in its `graph` field's annotation, this does # not work. Instead, we have to create a new class with the type field and patch the original class with it. - invocation_type_annotation = Literal[invocation_type] # type: ignore + invocation_type_annotation = Literal[invocation_type] invocation_type_field = Field( title="type", default=invocation_type, json_schema_extra={"field_kind": FieldKind.NodeAttribute} ) + # pydantic's Field function returns a FieldInfo, but they annotate it as returning a type so that type-checkers + # don't get confused by something like this: + # foo: str = Field() <-- this is a FieldInfo, not a str + # Unfortunately this means we need to use type: ignore here to avoid type-checker errors + fields["type"] = (invocation_type_annotation, invocation_type_field) # type: ignore + # Validate the `invoke()` method is implemented if "invoke" in cls.__abstractmethods__: raise ValueError(f'Invocation "{invocation_type}" must implement the "invoke" method') @@ -583,17 +615,12 @@ def wrapper(cls: Type[TBaseInvocation]) -> Type[TBaseInvocation]: ) docstring = cls.__doc__ - cls = create_model( - cls.__qualname__, - __base__=cls, - __module__=cls.__module__, - type=(invocation_type_annotation, invocation_type_field), - ) - cls.__doc__ = docstring + new_class = create_model(cls.__qualname__, __base__=cls, __module__=cls.__module__, **fields) + new_class.__doc__ = docstring - InvocationRegistry.register_invocation(cls) + InvocationRegistry.register_invocation(new_class) - return cls + return new_class return wrapper @@ -618,23 +645,32 @@ def wrapper(cls: Type[TBaseInvocationOutput]) -> Type[TBaseInvocationOutput]: validate_fields(cls.model_fields, output_type) + fields: dict[str, tuple[Any, FieldInfo]] = {} + + for field_name, field_info in cls.model_fields.items(): + annotation = field_info.annotation + assert annotation is not None, f"{field_name} on invocation output {output_type} has no type annotation." + assert isinstance(field_info.json_schema_extra, dict), ( + f"{field_name} on invocation output {output_type} has a non-dict json_schema_extra, did you forget to use InputField?" + ) + if field_info.default is not PydanticUndefined and is_optional(annotation): + annotation = annotation | None + fields[field_name] = (annotation, field_info) + # Add the output type to the model. - output_type_annotation = Literal[output_type] # type: ignore + output_type_annotation = Literal[output_type] output_type_field = Field( title="type", default=output_type, json_schema_extra={"field_kind": FieldKind.NodeAttribute} ) + fields["type"] = (output_type_annotation, output_type_field) # type: ignore + docstring = cls.__doc__ - cls = create_model( - cls.__qualname__, - __base__=cls, - __module__=cls.__module__, - type=(output_type_annotation, output_type_field), - ) - cls.__doc__ = docstring + new_class = create_model(cls.__qualname__, __base__=cls, __module__=cls.__module__, **fields) + new_class.__doc__ = docstring - InvocationRegistry.register_output(cls) + InvocationRegistry.register_output(new_class) - return cls + return new_class return wrapper diff --git a/tests/app/invocations/test_is_optional.py b/tests/app/invocations/test_is_optional.py new file mode 100644 index 00000000000..58e2723ba9e --- /dev/null +++ b/tests/app/invocations/test_is_optional.py @@ -0,0 +1,46 @@ +from typing import Any, Literal, Optional, Union + +import pytest +from pydantic import BaseModel + + +class TestModel(BaseModel): + foo: Literal["bar"] = "bar" + + +@pytest.mark.parametrize( + "input_type, expected", + [ + (str, False), + (list[str], False), + (list[dict[str, Any]], False), + (list[None], False), + (list[dict[str, None]], False), + (Any, False), + (True, False), + (False, False), + (Union[str, False], False), + (Union[str, True], False), + (None, False), + (str | None, True), + (Union[str, None], True), + (Optional[str], True), + (str | int | None, True), + (None | str | int, True), + (Union[None, str], True), + (Optional[str], True), + (Optional[int], True), + (Optional[str], True), + (TestModel | None, True), + (Union[TestModel, None], True), + (Optional[TestModel], True), + ], +) +def test_is_optional(input_type: Any, expected: bool) -> None: + """ + Test the is_optional function. + """ + from invokeai.app.invocations.baseinvocation import is_optional + + result = is_optional(input_type) + assert result == expected, f"Expected {expected} but got {result} for input type {input_type}" From 1750babf6c2aaa28ca46ac48cd3dffc1f2d46be8 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 2 May 2025 07:43:53 +1000 Subject: [PATCH 2/8] chore(ui): typegen --- .../frontend/web/src/services/api/schema.ts | 470 +++++++++--------- 1 file changed, 230 insertions(+), 240 deletions(-) diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index 36dc31a0a2b..843c22cb94f 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -1710,7 +1710,7 @@ export type components = { * @description The mask image to convert. * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Invert * @description Whether to invert the mask. @@ -1966,12 +1966,12 @@ export type components = { * @description The mask tensor to apply. * @default null */ - mask?: components["schemas"]["TensorField"]; + mask?: components["schemas"]["TensorField"] | null; /** * @description The image to apply the mask to. * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Invert * @description Whether to invert the mask. @@ -2022,12 +2022,12 @@ export type components = { * @description The image from which to extract the masked region * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * @description The mask defining the region (black=keep, white=discard) * @default null */ - mask?: components["schemas"]["ImageField"]; + mask?: components["schemas"]["ImageField"] | null; /** * Invert Mask * @description Whether to invert the mask before applying it @@ -2262,14 +2262,9 @@ export type components = { mode?: "RGB" | "RGBA"; /** * @description The color of the image - * @default { - * "r": 0, - * "g": 0, - * "b": 0, - * "a": 255 - * } + * @default null */ - color?: components["schemas"]["ColorField"]; + color?: components["schemas"]["ColorField"] | null; /** * type * @default blank_image @@ -2304,12 +2299,12 @@ export type components = { * @description Latents tensor * @default null */ - latents_a?: components["schemas"]["LatentsField"]; + latents_a?: components["schemas"]["LatentsField"] | null; /** * @description Latents tensor * @default null */ - latents_b?: components["schemas"]["LatentsField"]; + latents_b?: components["schemas"]["LatentsField"] | null; /** * @description Mask for blending in latents B * @default null @@ -3203,7 +3198,7 @@ export type components = { * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count * @default null */ - clip?: components["schemas"]["CLIPField"]; + clip?: components["schemas"]["CLIPField"] | null; /** * Skipped Layers * @description Number of layers to skip in text encoder @@ -3352,7 +3347,7 @@ export type components = { * @description The image to process * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * type * @default infill_cv2 @@ -3657,7 +3652,7 @@ export type components = { * @description The image to process * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Low Threshold * @description The low threshold of the Canny pixel gradient (0-255) @@ -3713,17 +3708,17 @@ export type components = { * @description The source image * @default null */ - source_image?: components["schemas"]["ImageField"]; + source_image?: components["schemas"]["ImageField"] | null; /** * @description The target image * @default null */ - target_image?: components["schemas"]["ImageField"]; + target_image?: components["schemas"]["ImageField"] | null; /** * @description The mask to use when pasting * @default null */ - mask?: components["schemas"]["ImageField"]; + mask?: components["schemas"]["ImageField"] | null; /** * Mask Blur * @description The amount to blur the mask by @@ -3778,12 +3773,12 @@ export type components = { * @description The image to apply the mask to * @default null */ - generated_image?: components["schemas"]["ImageField"]; + generated_image?: components["schemas"]["ImageField"] | null; /** * @description The mask to apply * @default null */ - mask?: components["schemas"]["ImageField"]; + mask?: components["schemas"]["ImageField"] | null; /** * Mask Blur * @description The amount to blur the mask by @@ -3823,7 +3818,7 @@ export type components = { * @description The image to crop * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Left * @description Number of pixels to pad/crop from the left (negative values crop inwards, positive values pad outwards) @@ -3968,17 +3963,17 @@ export type components = { * @description CogView4 model (Transformer) to load * @default null */ - transformer?: components["schemas"]["TransformerField"]; + transformer?: components["schemas"]["TransformerField"] | null; /** * @description Positive conditioning tensor * @default null */ - positive_conditioning?: components["schemas"]["CogView4ConditioningField"]; + positive_conditioning?: components["schemas"]["CogView4ConditioningField"] | null; /** * @description Negative conditioning tensor * @default null */ - negative_conditioning?: components["schemas"]["CogView4ConditioningField"]; + negative_conditioning?: components["schemas"]["CogView4ConditioningField"] | null; /** * CFG Scale * @description Classifier-Free Guidance scale @@ -4052,12 +4047,12 @@ export type components = { * @description The image to encode. * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * @description VAE * @default null */ - vae?: components["schemas"]["VAEField"]; + vae?: components["schemas"]["VAEField"] | null; /** * type * @default cogview4_i2l @@ -4101,12 +4096,12 @@ export type components = { * @description Latents tensor * @default null */ - latents?: components["schemas"]["LatentsField"]; + latents?: components["schemas"]["LatentsField"] | null; /** * @description VAE * @default null */ - vae?: components["schemas"]["VAEField"]; + vae?: components["schemas"]["VAEField"] | null; /** * type * @default cogview4_l2i @@ -4199,13 +4194,13 @@ export type components = { * @description Text prompt to encode. * @default null */ - prompt?: string; + prompt?: string | null; /** * GLM Encoder * @description GLM (THUDM) tokenizer and text encoder * @default null */ - glm_encoder?: components["schemas"]["GlmEncoderField"]; + glm_encoder?: components["schemas"]["GlmEncoderField"] | null; /** * type * @default cogview4_text_encoder @@ -4322,12 +4317,12 @@ export type components = { * @description The image to color-correct * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * @description Reference image for color-correction * @default null */ - reference?: components["schemas"]["ImageField"]; + reference?: components["schemas"]["ImageField"] | null; /** * @description Mask to use when applying color-correction * @default null @@ -4447,7 +4442,7 @@ export type components = { * @description The image to process * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Tile Size * @description Tile size @@ -4508,7 +4503,7 @@ export type components = { * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count * @default null */ - clip?: components["schemas"]["CLIPField"]; + clip?: components["schemas"]["CLIPField"] | null; /** * @description A mask defining the region that this conditioning prompt applies to. * @default null @@ -4615,7 +4610,7 @@ export type components = { * @description Conditioning tensor * @default null */ - conditioning?: components["schemas"]["ConditioningField"]; + conditioning?: components["schemas"]["ConditioningField"] | null; /** * type * @default conditioning @@ -4673,7 +4668,7 @@ export type components = { * @description The image to process * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Scale Factor * @description The scale factor used for the shuffle @@ -5116,12 +5111,12 @@ export type components = { * @description The control image * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * @description ControlNet model to load * @default null */ - control_model?: components["schemas"]["ModelIdentifierField"]; + control_model?: components["schemas"]["ModelIdentifierField"] | null; /** * Control Weight * @description The weight given to the ControlNet @@ -5477,7 +5472,7 @@ export type components = { * @description VAE * @default null */ - vae?: components["schemas"]["VAEField"]; + vae?: components["schemas"]["VAEField"] | null; /** * @description Image which will be masked * @default null @@ -5487,7 +5482,7 @@ export type components = { * @description The mask to use when pasting * @default null */ - mask?: components["schemas"]["ImageField"]; + mask?: components["schemas"]["ImageField"] | null; /** * Tiled * @description Processing using overlapping tiles (reduce memory consumption) @@ -5533,7 +5528,7 @@ export type components = { * @description Image which will be masked * @default null */ - mask?: components["schemas"]["ImageField"]; + mask?: components["schemas"]["ImageField"] | null; /** * Edge Radius * @description How far to blur/expand the edges of the mask @@ -5625,7 +5620,7 @@ export type components = { * @description The image to crop * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * @description The bounding box to crop the image to * @default null @@ -5665,31 +5660,31 @@ export type components = { * @description Latents tensor * @default null */ - latents?: components["schemas"]["LatentsField"]; + latents?: components["schemas"]["LatentsField"] | null; /** * X * @description The left x coordinate (in px) of the crop rectangle in image space. This value will be converted to a dimension in latent space. * @default null */ - x?: number; + x?: number | null; /** * Y * @description The top y coordinate (in px) of the crop rectangle in image space. This value will be converted to a dimension in latent space. * @default null */ - y?: number; + y?: number | null; /** * Width * @description The width (in px) of the crop rectangle in image space. This value will be converted to a dimension in latent space. * @default null */ - width?: number; + width?: number | null; /** * Height * @description The height (in px) of the crop rectangle in image space. This value will be converted to a dimension in latent space. * @default null */ - height?: number; + height?: number | null; /** * type * @default crop_latents @@ -5751,12 +5746,12 @@ export type components = { * @description The image to inpaint * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * @description The mask to use when inpainting * @default null */ - mask?: components["schemas"]["ImageField"]; + mask?: components["schemas"]["ImageField"] | null; /** * type * @default cv_inpaint @@ -5800,7 +5795,7 @@ export type components = { * @description The image to process * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Draw Body * @default true @@ -5873,13 +5868,13 @@ export type components = { * @description Positive conditioning tensor * @default null */ - positive_conditioning?: components["schemas"]["ConditioningField"] | components["schemas"]["ConditioningField"][]; + positive_conditioning?: components["schemas"]["ConditioningField"] | components["schemas"]["ConditioningField"][] | null; /** * Negative Conditioning * @description Negative conditioning tensor * @default null */ - negative_conditioning?: components["schemas"]["ConditioningField"] | components["schemas"]["ConditioningField"][]; + negative_conditioning?: components["schemas"]["ConditioningField"] | components["schemas"]["ConditioningField"][] | null; /** * @description Noise tensor * @default null @@ -5921,7 +5916,7 @@ export type components = { * @description UNet (scheduler, LoRAs) * @default null */ - unet?: components["schemas"]["UNetField"]; + unet?: components["schemas"]["UNetField"] | null; /** * Control * @default null @@ -5991,13 +5986,13 @@ export type components = { * @description Positive conditioning tensor * @default null */ - positive_conditioning?: components["schemas"]["ConditioningField"] | components["schemas"]["ConditioningField"][]; + positive_conditioning?: components["schemas"]["ConditioningField"] | components["schemas"]["ConditioningField"][] | null; /** * Negative Conditioning * @description Negative conditioning tensor * @default null */ - negative_conditioning?: components["schemas"]["ConditioningField"] | components["schemas"]["ConditioningField"][]; + negative_conditioning?: components["schemas"]["ConditioningField"] | components["schemas"]["ConditioningField"][] | null; /** * @description Noise tensor * @default null @@ -6039,7 +6034,7 @@ export type components = { * @description UNet (scheduler, LoRAs) * @default null */ - unet?: components["schemas"]["UNetField"]; + unet?: components["schemas"]["UNetField"] | null; /** * Control * @default null @@ -6153,7 +6148,7 @@ export type components = { * @description The image to process * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Model Size * @description The size of the depth model to use @@ -6444,7 +6439,7 @@ export type components = { * @description The prompt to parse with dynamicprompts * @default null */ - prompt?: string; + prompt?: string | null; /** * Max Prompts * @description The number of prompts to generate @@ -6507,7 +6502,7 @@ export type components = { * @description The input image * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Model Name * @description The Real-ESRGAN model to use @@ -6612,7 +6607,7 @@ export type components = { * @description The mask to expand * @default null */ - mask?: components["schemas"]["ImageField"]; + mask?: components["schemas"]["ImageField"] | null; /** * Threshold * @description The threshold for the binary mask (0-255) @@ -6728,7 +6723,7 @@ export type components = { * @description Image to face detect * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Minimum Confidence * @description Minimum confidence for face detection (lower if detection is failing) @@ -6779,7 +6774,7 @@ export type components = { * @description Image to face detect * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Face Ids * @description Comma-separated list of face ids to mask eg '0,2,7'. Numbered from 0. Leave empty to mask all. Find face IDs with FaceIdentifier node. @@ -6880,7 +6875,7 @@ export type components = { * @description Image for face detection * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Face Id * @description The face ID to process, numbered from 0. Multiple faces not supported. Find a face's ID with FaceIdentifier node. @@ -7396,12 +7391,12 @@ export type components = { * @description Control LoRA model to load * @default null */ - lora?: components["schemas"]["ModelIdentifierField"]; + lora?: components["schemas"]["ModelIdentifierField"] | null; /** * @description The image to encode. * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Weight * @description The weight of the LoRA. @@ -7497,12 +7492,12 @@ export type components = { * @description The control image * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * @description ControlNet model to load * @default null */ - control_model?: components["schemas"]["ModelIdentifierField"]; + control_model?: components["schemas"]["ModelIdentifierField"] | null; /** * Control Weight * @description The weight given to the ControlNet @@ -7620,7 +7615,7 @@ export type components = { * @description Flux model (Transformer) to load * @default null */ - transformer?: components["schemas"]["TransformerField"]; + transformer?: components["schemas"]["TransformerField"] | null; /** * Control LoRA * @description Control LoRA model to load @@ -7632,7 +7627,7 @@ export type components = { * @description Positive conditioning tensor * @default null */ - positive_text_conditioning?: components["schemas"]["FluxConditioningField"] | components["schemas"]["FluxConditioningField"][]; + positive_text_conditioning?: components["schemas"]["FluxConditioningField"] | components["schemas"]["FluxConditioningField"][] | null; /** * Negative Text Conditioning * @description Negative conditioning tensor. Can be None if cfg_scale is 1.0. @@ -7787,7 +7782,7 @@ export type components = { * @description Flux model (Transformer) to load * @default null */ - transformer?: components["schemas"]["TransformerField"]; + transformer?: components["schemas"]["TransformerField"] | null; /** * Control LoRA * @description Control LoRA model to load @@ -7799,7 +7794,7 @@ export type components = { * @description Positive conditioning tensor * @default null */ - positive_text_conditioning?: components["schemas"]["FluxConditioningField"] | components["schemas"]["FluxConditioningField"][]; + positive_text_conditioning?: components["schemas"]["FluxConditioningField"] | components["schemas"]["FluxConditioningField"][] | null; /** * Negative Text Conditioning * @description Negative conditioning tensor. Can be None if cfg_scale is 1.0. @@ -7925,12 +7920,12 @@ export type components = { * @description The FLUX Fill reference image. * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * @description The bool inpainting mask. Excluded regions should be set to False, included regions should be set to True. * @default null */ - mask?: components["schemas"]["TensorField"]; + mask?: components["schemas"]["TensorField"] | null; /** * type * @default flux_fill @@ -7981,13 +7976,13 @@ export type components = { * @description The IP-Adapter image prompt(s). * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * IP-Adapter Model * @description The IP-Adapter model. * @default null */ - ip_adapter_model?: components["schemas"]["ModelIdentifierField"]; + ip_adapter_model?: components["schemas"]["ModelIdentifierField"] | null; /** * Clip Vision Model * @description CLIP Vision model to use. @@ -8047,7 +8042,7 @@ export type components = { * @description LoRA model to load * @default null */ - lora?: components["schemas"]["ModelIdentifierField"]; + lora?: components["schemas"]["ModelIdentifierField"] | null; /** * Weight * @description The weight at which the LoRA is applied to each model @@ -8148,7 +8143,7 @@ export type components = { * @description VAE model to load * @default null */ - vae_model?: components["schemas"]["ModelIdentifierField"]; + vae_model?: components["schemas"]["ModelIdentifierField"] | null; /** * type * @default flux_model_loader @@ -8312,7 +8307,7 @@ export type components = { * @description The FLUX Redux image prompt. * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * @description The bool mask associated with this FLUX Redux image prompt. Excluded regions should be set to False, included regions should be set to True. * @default null @@ -8323,7 +8318,7 @@ export type components = { * @description The FLUX Redux model to use. * @default null */ - redux_model?: components["schemas"]["ModelIdentifierField"]; + redux_model?: components["schemas"]["ModelIdentifierField"] | null; /** * Downsampling Factor * @description Redux Downsampling Factor (1-9) @@ -8394,26 +8389,25 @@ export type components = { * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count * @default null */ - clip?: components["schemas"]["CLIPField"]; + clip?: components["schemas"]["CLIPField"] | null; /** * T5Encoder * @description T5 tokenizer and text encoder * @default null */ - t5_encoder?: components["schemas"]["T5EncoderField"]; + t5_encoder?: components["schemas"]["T5EncoderField"] | null; /** * T5 Max Seq Len * @description Max sequence length for the T5 encoder. Expected to be 256 for FLUX schnell models and 512 for FLUX dev models. * @default null - * @enum {integer} */ - t5_max_seq_len?: 256 | 512; + t5_max_seq_len?: (256 | 512) | null; /** * Prompt * @description Text prompt to encode. * @default null */ - prompt?: string; + prompt?: string | null; /** * @description A mask defining the region that this conditioning prompt applies to. * @default null @@ -8462,12 +8456,12 @@ export type components = { * @description Latents tensor * @default null */ - latents?: components["schemas"]["LatentsField"]; + latents?: components["schemas"]["LatentsField"] | null; /** * @description VAE * @default null */ - vae?: components["schemas"]["VAEField"]; + vae?: components["schemas"]["VAEField"] | null; /** * type * @default flux_vae_decode @@ -8501,12 +8495,12 @@ export type components = { * @description The image to encode. * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * @description VAE * @default null */ - vae?: components["schemas"]["VAEField"]; + vae?: components["schemas"]["VAEField"] | null; /** * type * @default flux_vae_encode @@ -8586,7 +8580,7 @@ export type components = { * @description UNet (scheduler, LoRAs) * @default null */ - unet?: components["schemas"]["UNetField"]; + unet?: components["schemas"]["UNetField"] | null; /** * B1 * @description Scaling factor for stage 1 to amplify the contributions of backbone features. @@ -8644,7 +8638,7 @@ export type components = { * @description The mask to crop. * @default null */ - mask?: components["schemas"]["ImageField"]; + mask?: components["schemas"]["ImageField"] | null; /** * Margin * @description Margin to add to the bounding box. @@ -8790,20 +8784,19 @@ export type components = { * Model * @description The Grounding DINO model to use. * @default null - * @enum {string} */ - model?: "grounding-dino-tiny" | "grounding-dino-base"; + model?: ("grounding-dino-tiny" | "grounding-dino-base") | null; /** * Prompt * @description The prompt describing the object to segment. * @default null */ - prompt?: string; + prompt?: string | null; /** * @description The image to segment. * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Detection Threshold * @description The detection threshold for the Grounding DINO model. All detected bounding boxes with scores above this threshold will be returned. @@ -8853,7 +8846,7 @@ export type components = { * @description The image to process * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Scribble * @description Whether or not to use scribble mode @@ -8924,7 +8917,7 @@ export type components = { * @description The image to resize * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Width * @description The width to resize to (px) @@ -9146,13 +9139,13 @@ export type components = { * @description The IP-Adapter image prompt(s). * @default null */ - image?: components["schemas"]["ImageField"] | components["schemas"]["ImageField"][]; + image?: components["schemas"]["ImageField"] | components["schemas"]["ImageField"][] | null; /** * IP-Adapter Model * @description The IP-Adapter model. * @default null */ - ip_adapter_model?: components["schemas"]["ModelIdentifierField"]; + ip_adapter_model?: components["schemas"]["ModelIdentifierField"] | null; /** * Clip Vision Model * @description CLIP Vision model to use. Overrides model settings. Mandatory for checkpoint models. @@ -9367,7 +9360,7 @@ export type components = { * @description UNet (scheduler, LoRAs) * @default null */ - unet?: components["schemas"]["UNetField"]; + unet?: components["schemas"]["UNetField"] | null; /** * Multiplier * @description Amount to multiply the model's dimensions by when calculating the ideal size (may result in initial generation artifacts if too large) @@ -9481,7 +9474,7 @@ export type components = { * @description The image to blur * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Radius * @description The blur radius @@ -9550,7 +9543,7 @@ export type components = { * @description The image to get the channel from * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Channel * @description The channel to get @@ -9601,14 +9594,13 @@ export type components = { * @description The image to adjust * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Channel * @description Which channel to adjust * @default null - * @enum {string} */ - channel?: "Red (RGBA)" | "Green (RGBA)" | "Blue (RGBA)" | "Alpha (RGBA)" | "Cyan (CMYK)" | "Magenta (CMYK)" | "Yellow (CMYK)" | "Black (CMYK)" | "Hue (HSV)" | "Saturation (HSV)" | "Value (HSV)" | "Luminosity (LAB)" | "A (LAB)" | "B (LAB)" | "Y (YCbCr)" | "Cb (YCbCr)" | "Cr (YCbCr)"; + channel?: ("Red (RGBA)" | "Green (RGBA)" | "Blue (RGBA)" | "Alpha (RGBA)" | "Cyan (CMYK)" | "Magenta (CMYK)" | "Yellow (CMYK)" | "Black (CMYK)" | "Hue (HSV)" | "Saturation (HSV)" | "Value (HSV)" | "Luminosity (LAB)" | "A (LAB)" | "B (LAB)" | "Y (YCbCr)" | "Cb (YCbCr)" | "Cr (YCbCr)") | null; /** * Scale * @description The amount to scale the channel by. @@ -9664,14 +9656,13 @@ export type components = { * @description The image to adjust * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Channel * @description Which channel to adjust * @default null - * @enum {string} */ - channel?: "Red (RGBA)" | "Green (RGBA)" | "Blue (RGBA)" | "Alpha (RGBA)" | "Cyan (CMYK)" | "Magenta (CMYK)" | "Yellow (CMYK)" | "Black (CMYK)" | "Hue (HSV)" | "Saturation (HSV)" | "Value (HSV)" | "Luminosity (LAB)" | "A (LAB)" | "B (LAB)" | "Y (YCbCr)" | "Cb (YCbCr)" | "Cr (YCbCr)"; + channel?: ("Red (RGBA)" | "Green (RGBA)" | "Blue (RGBA)" | "Alpha (RGBA)" | "Cyan (CMYK)" | "Magenta (CMYK)" | "Yellow (CMYK)" | "Black (CMYK)" | "Hue (HSV)" | "Saturation (HSV)" | "Value (HSV)" | "Luminosity (LAB)" | "A (LAB)" | "B (LAB)" | "Y (YCbCr)" | "Cb (YCbCr)" | "Cr (YCbCr)") | null; /** * Offset * @description The amount to adjust the channel by @@ -9712,7 +9703,7 @@ export type components = { * @description The collection of image values * @default null */ - collection?: components["schemas"]["ImageField"][]; + collection?: components["schemas"]["ImageField"][] | null; /** * type * @default image_collection @@ -9773,7 +9764,7 @@ export type components = { * @description The image to convert * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Mode * @description The mode to convert to @@ -9824,7 +9815,7 @@ export type components = { * @description The image to crop * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * X * @description The left x coordinate of the crop rectangle @@ -10036,7 +10027,7 @@ export type components = { * @description The image to adjust * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Hue * @description The degrees by which to rotate the hue, 0-360 @@ -10086,7 +10077,7 @@ export type components = { * @description The image to lerp * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Min * @description The minimum input value @@ -10132,7 +10123,7 @@ export type components = { * @description The image to load * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * type * @default image @@ -10176,7 +10167,7 @@ export type components = { * @description The image to lerp * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Min * @description The minimum output value @@ -10227,7 +10218,7 @@ export type components = { * @description The mask image to convert. * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Cutoff * @description Cutoff (<) @@ -10283,12 +10274,12 @@ export type components = { * @description The first image to multiply * @default null */ - image1?: components["schemas"]["ImageField"]; + image1?: components["schemas"]["ImageField"] | null; /** * @description The second image to multiply * @default null */ - image2?: components["schemas"]["ImageField"]; + image2?: components["schemas"]["ImageField"] | null; /** * type * @default img_mul @@ -10332,7 +10323,7 @@ export type components = { * @description The image to check * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * type * @default img_nsfw @@ -10376,7 +10367,7 @@ export type components = { * @description The image to add noise to * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Seed * @description Seed for random number generation @@ -10496,13 +10487,13 @@ export type components = { * @description The width of the entire grid. * @default null */ - width?: number; + width?: number | null; /** * Height * @description The height of the entire grid. * @default null */ - height?: number; + height?: number | null; /** * Num Cols * @description The number of columns in the grid. @@ -10570,12 +10561,12 @@ export type components = { * @description The base image * @default null */ - base_image?: components["schemas"]["ImageField"]; + base_image?: components["schemas"]["ImageField"] | null; /** * @description The image to paste * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * @description The mask to use when pasting * @default null @@ -10673,7 +10664,7 @@ export type components = { * @description The image to resize * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Width * @description The width to resize to (px) @@ -10736,7 +10727,7 @@ export type components = { * @description The image to scale * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Scale Factor * @description The factor by which to scale the image @@ -10783,12 +10774,12 @@ export type components = { * @description The image to encode * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * @description VAE * @default null */ - vae?: components["schemas"]["VAEField"]; + vae?: components["schemas"]["VAEField"] | null; /** * Tiled * @description Processing using overlapping tiles (reduce memory consumption) @@ -10881,7 +10872,7 @@ export type components = { * @description The image to check * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Text * @description Watermark text @@ -10952,7 +10943,7 @@ export type components = { * @description The image to process * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * @description The color to use to infill * @default { @@ -11006,7 +10997,7 @@ export type components = { * @description The image to process * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Downscale * @description Run patchmatch on downscaled image to speedup infill @@ -11063,7 +11054,7 @@ export type components = { * @description The image to process * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Tile Size * @description The tile size (px) @@ -11410,7 +11401,7 @@ export type components = { * @description The tensor mask to convert. * @default null */ - mask?: components["schemas"]["TensorField"]; + mask?: components["schemas"]["TensorField"] | null; /** * type * @default invert_tensor_mask @@ -12378,7 +12369,7 @@ export type components = { * @description The image to adjust * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Space * @description Color space in which to rotate hue by polar coords (*: non-invertible) @@ -12453,7 +12444,7 @@ export type components = { * @description Image from which to get channel * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * type * @default invokeai_ealightness @@ -12497,7 +12488,7 @@ export type components = { * @description The top image to blend * @default null */ - layer_upper?: components["schemas"]["ImageField"]; + layer_upper?: components["schemas"]["ImageField"] | null; /** * Blend Mode * @description Available blend modes @@ -12532,7 +12523,7 @@ export type components = { * @description The bottom image to blend * @default null */ - layer_base?: components["schemas"]["ImageField"]; + layer_base?: components["schemas"]["ImageField"] | null; /** * Color Space * @description Available color spaces for blend computations @@ -12595,12 +12586,12 @@ export type components = { * @description Image of the subject on a plain monochrome background * @default null */ - image_subject?: components["schemas"]["ImageField"]; + image_subject?: components["schemas"]["ImageField"] | null; /** * @description Image of a background scene * @default null */ - image_background?: components["schemas"]["ImageField"]; + image_background?: components["schemas"]["ImageField"] | null; /** * Chroma Key * @description Can be empty for corner flood select, or CSS-3 color or tuple @@ -12675,7 +12666,7 @@ export type components = { * @description The image from which to create a mask * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Lightness Only * @description If true, only applies to image lightness (CIELa*b*) @@ -12744,7 +12735,7 @@ export type components = { * @description The image for which to apply processing * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Invert * @description Whether to invert the image colors @@ -12818,7 +12809,7 @@ export type components = { * @description The image from which to create a mask * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Invert Output * @description Make light areas dark and vice versa @@ -12961,7 +12952,7 @@ export type components = { * @description The image to process * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * type * @default infill_lama @@ -12996,7 +12987,7 @@ export type components = { * @description The collection of latents tensors * @default null */ - collection?: components["schemas"]["LatentsField"][]; + collection?: components["schemas"]["LatentsField"][] | null; /** * type * @default latents_collection @@ -13064,7 +13055,7 @@ export type components = { * @description The latents tensor * @default null */ - latents?: components["schemas"]["LatentsField"]; + latents?: components["schemas"]["LatentsField"] | null; /** * type * @default latents @@ -13158,12 +13149,12 @@ export type components = { * @description Latents tensor * @default null */ - latents?: components["schemas"]["LatentsField"]; + latents?: components["schemas"]["LatentsField"] | null; /** * @description VAE * @default null */ - vae?: components["schemas"]["VAEField"]; + vae?: components["schemas"]["VAEField"] | null; /** * Tiled * @description Processing using overlapping tiles (reduce memory consumption) @@ -13225,7 +13216,7 @@ export type components = { * @description The image to process * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * type * @default lineart_anime_edge_detection @@ -13269,7 +13260,7 @@ export type components = { * @description The image to process * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Coarse * @description Whether to use coarse mode @@ -13403,7 +13394,7 @@ export type components = { * @description The VLLM model to use * @default null */ - vllm_model?: components["schemas"]["ModelIdentifierField"]; + vllm_model?: components["schemas"]["ModelIdentifierField"] | null; /** * type * @default llava_onevision_vllm @@ -13579,7 +13570,7 @@ export type components = { * @description LoRA model to load * @default null */ - lora?: components["schemas"]["ModelIdentifierField"]; + lora?: components["schemas"]["ModelIdentifierField"] | null; /** * Weight * @description The weight at which the LoRA is applied to each model @@ -13753,7 +13744,7 @@ export type components = { * @description LoRA model to load * @default null */ - lora?: components["schemas"]["ModelIdentifierField"]; + lora?: components["schemas"]["ModelIdentifierField"] | null; /** * Weight * @description The weight at which the LoRA is applied to each model @@ -13885,7 +13876,7 @@ export type components = { * @description The image to process * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Score Threshold * @description The threshold used to score points when determining line segments @@ -14397,7 +14388,7 @@ export type components = { * @description Main model (UNet, VAE, CLIP) to load * @default null */ - model?: components["schemas"]["ModelIdentifierField"]; + model?: components["schemas"]["ModelIdentifierField"] | null; /** * type * @default main_model_loader @@ -14441,12 +14432,12 @@ export type components = { * @description The first mask to combine * @default null */ - mask1?: components["schemas"]["ImageField"]; + mask1?: components["schemas"]["ImageField"] | null; /** * @description The second image to combine * @default null */ - mask2?: components["schemas"]["ImageField"]; + mask2?: components["schemas"]["ImageField"] | null; /** * type * @default mask_combine @@ -14490,31 +14481,31 @@ export type components = { * @description The image to apply the mask to * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Edge Size * @description The size of the edge * @default null */ - edge_size?: number; + edge_size?: number | null; /** * Edge Blur * @description The amount of blur on the edge * @default null */ - edge_blur?: number; + edge_blur?: number | null; /** * Low Threshold * @description First threshold for the hysteresis procedure in Canny edge detection * @default null */ - low_threshold?: number; + low_threshold?: number | null; /** * High Threshold * @description Second threshold for the hysteresis procedure in Canny edge detection * @default null */ - high_threshold?: number; + high_threshold?: number | null; /** * type * @default mask_edge @@ -14558,7 +14549,7 @@ export type components = { * @description The image to create the mask from * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Invert * @description Whether or not to invert the mask @@ -14608,12 +14599,12 @@ export type components = { * @description The image to create the mask from * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * @description ID color to mask * @default null */ - color?: components["schemas"]["ColorField"]; + color?: components["schemas"]["ColorField"] | null; /** * Threshold * @description Threshold for color detection @@ -14693,7 +14684,7 @@ export type components = { * @description The mask tensor to convert. * @default null */ - mask?: components["schemas"]["TensorField"]; + mask?: components["schemas"]["TensorField"] | null; /** * type * @default tensor_mask_to_image @@ -14737,7 +14728,7 @@ export type components = { * @description The image to process * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Max Faces * @description Maximum number of faces to detect @@ -14784,7 +14775,7 @@ export type components = { * @description Collection of Metadata * @default null */ - collection?: components["schemas"]["MetadataField"][]; + collection?: components["schemas"]["MetadataField"][] | null; /** * type * @default merge_metadata @@ -14829,7 +14820,7 @@ export type components = { * @description A list of tile images with tile properties. * @default null */ - tiles_with_images?: components["schemas"]["TileWithImage"][]; + tiles_with_images?: components["schemas"]["TileWithImage"][] | null; /** * Blend Mode * @description blending type Linear or Seam @@ -14883,13 +14874,13 @@ export type components = { * @description The image to extract metadata from * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Key * @description The key in the image's metadata to extract the value from * @default null */ - key?: string; + key?: string | null; /** * type * @default metadata_field_extractor @@ -14923,7 +14914,7 @@ export type components = { * @description The image to process * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * type * @default metadata_from_image @@ -14958,7 +14949,7 @@ export type components = { * @description A single metadata item or collection of metadata items * @default null */ - items?: components["schemas"]["MetadataItemField"][] | components["schemas"]["MetadataItemField"]; + items?: components["schemas"]["MetadataItemField"][] | components["schemas"]["MetadataItemField"] | null; /** * type * @default metadata @@ -15006,13 +14997,13 @@ export type components = { * @description Label for this metadata item * @default null */ - label?: string; + label?: string | null; /** * Value * @description The value for this metadata item (may be any type) * @default null */ - value?: unknown; + value?: unknown | null; /** * type * @default metadata_item @@ -15065,7 +15056,7 @@ export type components = { * @description The value for this metadata item (may be any type) * @default null */ - value?: unknown; + value?: unknown | null; /** * type * @default metadata_item_linked @@ -15143,7 +15134,7 @@ export type components = { * @description The default bool to use if not found in the metadata * @default null */ - default_value?: boolean[]; + default_value?: boolean[] | null; /** * type * @default metadata_to_bool_collection @@ -15196,7 +15187,7 @@ export type components = { * @description The default bool to use if not found in the metadata * @default null */ - default_value?: boolean; + default_value?: boolean | null; /** * type * @default metadata_to_bool @@ -15288,7 +15279,7 @@ export type components = { * @description The default float to use if not found in the metadata * @default null */ - default_value?: number[]; + default_value?: number[] | null; /** * type * @default metadata_to_float_collection @@ -15341,7 +15332,7 @@ export type components = { * @description The default float to use if not found in the metadata * @default null */ - default_value?: number; + default_value?: number | null; /** * type * @default metadata_to_float @@ -15434,7 +15425,7 @@ export type components = { * @description The default integer to use if not found in the metadata * @default null */ - default_value?: number[]; + default_value?: number[] | null; /** * type * @default metadata_to_integer_collection @@ -15487,7 +15478,7 @@ export type components = { * @description The default integer to use if not found in the metadata * @default null */ - default_value?: number; + default_value?: number | null; /** * type * @default metadata_to_integer @@ -15648,7 +15639,7 @@ export type components = { * @description The default model to use if not found in the metadata * @default null */ - default_value?: components["schemas"]["ModelIdentifierField"]; + default_value?: components["schemas"]["ModelIdentifierField"] | null; /** * type * @default metadata_to_model @@ -15789,7 +15780,7 @@ export type components = { * @description The default SDXL Model to use if not found in the metadata * @default null */ - default_value?: components["schemas"]["ModelIdentifierField"]; + default_value?: components["schemas"]["ModelIdentifierField"] | null; /** * type * @default metadata_to_sdxl_model @@ -15938,7 +15929,7 @@ export type components = { * @description The default string collection to use if not found in the metadata * @default null */ - default_value?: string[]; + default_value?: string[] | null; /** * type * @default metadata_to_string_collection @@ -15991,7 +15982,7 @@ export type components = { * @description The default string to use if not found in the metadata * @default null */ - default_value?: string; + default_value?: string | null; /** * type * @default metadata_to_string @@ -16083,7 +16074,7 @@ export type components = { * @description The default VAE to use if not found in the metadata * @default null */ - default_value?: components["schemas"]["VAEField"]; + default_value?: components["schemas"]["VAEField"] | null; /** * type * @default metadata_to_vae @@ -16153,7 +16144,7 @@ export type components = { * @description The model to select * @default null */ - model?: components["schemas"]["ModelIdentifierField"]; + model?: components["schemas"]["ModelIdentifierField"] | null; /** * type * @default model_identifier @@ -16823,7 +16814,7 @@ export type components = { * @description The image to process * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * type * @default normal_map @@ -16944,12 +16935,12 @@ export type components = { * @description The tile image. * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * @description The tile properties. * @default null */ - tile?: components["schemas"]["Tile"]; + tile?: components["schemas"]["Tile"] | null; /** * type * @default pair_tile_image @@ -17006,17 +16997,17 @@ export type components = { * @description The image to paste * @default null */ - source_image?: components["schemas"]["ImageField"]; + source_image?: components["schemas"]["ImageField"] | null; /** * @description The image to paste into * @default null */ - target_image?: components["schemas"]["ImageField"]; + target_image?: components["schemas"]["ImageField"] | null; /** * @description The bounding box to paste the image into * @default null */ - bounding_box?: components["schemas"]["BoundingBoxField"]; + bounding_box?: components["schemas"]["BoundingBoxField"] | null; /** * type * @default paste_image_into_bounding_box @@ -17060,7 +17051,7 @@ export type components = { * @description The image to process * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Quantize Edges * @description Whether or not to use safe mode @@ -17146,7 +17137,7 @@ export type components = { * @description Path to prompt text file * @default null */ - file_path?: string; + file_path?: string | null; /** * Pre Prompt * @description String to prepend to each prompt @@ -17594,37 +17585,37 @@ export type components = { * @description The width of the entire mask. * @default null */ - width?: number; + width?: number | null; /** * Height * @description The height of the entire mask. * @default null */ - height?: number; + height?: number | null; /** * X Left * @description The left x-coordinate of the rectangular masked region (inclusive). * @default null */ - x_left?: number; + x_left?: number | null; /** * Y Top * @description The top y-coordinate of the rectangular masked region (inclusive). * @default null */ - y_top?: number; + y_top?: number | null; /** * Rectangle Width * @description The width of the rectangular masked region. * @default null */ - rectangle_width?: number; + rectangle_width?: number | null; /** * Rectangle Height * @description The height of the rectangular masked region. * @default null */ - rectangle_height?: number; + rectangle_height?: number | null; /** * type * @default rectangle_mask @@ -17695,19 +17686,19 @@ export type components = { * @description Latents tensor * @default null */ - latents?: components["schemas"]["LatentsField"]; + latents?: components["schemas"]["LatentsField"] | null; /** * Width * @description Width of output (px) * @default null */ - width?: number; + width?: number | null; /** * Height * @description Width of output (px) * @default null */ - height?: number; + height?: number | null; /** * Mode * @description Interpolation mode @@ -17904,17 +17895,17 @@ export type components = { * @description SD3 model (MMDiTX) to load * @default null */ - transformer?: components["schemas"]["TransformerField"]; + transformer?: components["schemas"]["TransformerField"] | null; /** * @description Positive conditioning tensor * @default null */ - positive_conditioning?: components["schemas"]["SD3ConditioningField"]; + positive_conditioning?: components["schemas"]["SD3ConditioningField"] | null; /** * @description Negative conditioning tensor * @default null */ - negative_conditioning?: components["schemas"]["SD3ConditioningField"]; + negative_conditioning?: components["schemas"]["SD3ConditioningField"] | null; /** * CFG Scale * @description Classifier-Free Guidance scale @@ -17988,12 +17979,12 @@ export type components = { * @description The image to encode * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * @description VAE * @default null */ - vae?: components["schemas"]["VAEField"]; + vae?: components["schemas"]["VAEField"] | null; /** * type * @default sd3_i2l @@ -18037,12 +18028,12 @@ export type components = { * @description Latents tensor * @default null */ - latents?: components["schemas"]["LatentsField"]; + latents?: components["schemas"]["LatentsField"] | null; /** * @description VAE * @default null */ - vae?: components["schemas"]["VAEField"]; + vae?: components["schemas"]["VAEField"] | null; /** * type * @default sd3_l2i @@ -18119,13 +18110,13 @@ export type components = { * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count * @default null */ - clip?: components["schemas"]["CLIPField"]; + clip?: components["schemas"]["CLIPField"] | null; /** * CLIP 2 * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count * @default null */ - clip2?: components["schemas"]["CLIPField"]; + clip2?: components["schemas"]["CLIPField"] | null; /** * @description A mask defining the region that this conditioning prompt applies to. * @default null @@ -18218,7 +18209,7 @@ export type components = { * @description LoRA model to load * @default null */ - lora?: components["schemas"]["ModelIdentifierField"]; + lora?: components["schemas"]["ModelIdentifierField"] | null; /** * Weight * @description The weight at which the LoRA is applied to each model @@ -18306,7 +18297,7 @@ export type components = { * @description SDXL Main model (UNet, VAE, CLIP1, CLIP2) to load * @default null */ - model?: components["schemas"]["ModelIdentifierField"]; + model?: components["schemas"]["ModelIdentifierField"] | null; /** * type * @default sdxl_model_loader @@ -18404,7 +18395,7 @@ export type components = { * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count * @default null */ - clip2?: components["schemas"]["CLIPField"]; + clip2?: components["schemas"]["CLIPField"] | null; /** * type * @default sdxl_refiner_compel_prompt @@ -18438,7 +18429,7 @@ export type components = { * @description SDXL Refiner Main Modde (UNet, VAE, CLIP2) to load * @default null */ - model?: components["schemas"]["ModelIdentifierField"]; + model?: components["schemas"]["ModelIdentifierField"] | null; /** * type * @default sdxl_refiner_model_loader @@ -18514,7 +18505,7 @@ export type components = { * @description The image to process * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * type * @default save_image @@ -18548,13 +18539,13 @@ export type components = { * @description Latents tensor * @default null */ - latents?: components["schemas"]["LatentsField"]; + latents?: components["schemas"]["LatentsField"] | null; /** * Scale Factor * @description The factor by which to scale * @default null */ - scale_factor?: number; + scale_factor?: number | null; /** * Mode * @description Interpolation mode @@ -18751,13 +18742,13 @@ export type components = { * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count * @default null */ - clip_l?: components["schemas"]["CLIPField"]; + clip_l?: components["schemas"]["CLIPField"] | null; /** * CLIP G * @description CLIP (tokenizer, text encoder, LoRAs) and skipped layer count * @default null */ - clip_g?: components["schemas"]["CLIPField"]; + clip_g?: components["schemas"]["CLIPField"] | null; /** * T5Encoder * @description T5 tokenizer and text encoder @@ -18769,7 +18760,7 @@ export type components = { * @description Text prompt to encode. * @default null */ - prompt?: string; + prompt?: string | null; /** * type * @default sd3_text_encoder @@ -18880,14 +18871,13 @@ export type components = { * Model * @description The Segment Anything model to use. * @default null - * @enum {string} */ - model?: "segment-anything-base" | "segment-anything-large" | "segment-anything-huge"; + model?: ("segment-anything-base" | "segment-anything-large" | "segment-anything-huge") | null; /** * @description The image to segment. * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Bounding Boxes * @description The bounding boxes to prompt the SAM model with. @@ -19301,7 +19291,7 @@ export type components = { * @description The image to show * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * type * @default show_image @@ -19426,13 +19416,13 @@ export type components = { * @description The input image * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Image-to-Image Model * @description Image-to-Image model * @default null */ - image_to_image_model?: components["schemas"]["ModelIdentifierField"]; + image_to_image_model?: components["schemas"]["ModelIdentifierField"] | null; /** * Tile Size * @description The tile size for tiled image-to-image. Set to 0 to disable tiling. @@ -19573,13 +19563,13 @@ export type components = { * @description The input image * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Image-to-Image Model * @description Image-to-Image model * @default null */ - image_to_image_model?: components["schemas"]["ModelIdentifierField"]; + image_to_image_model?: components["schemas"]["ModelIdentifierField"] | null; /** * Tile Size * @description The tile size for tiled image-to-image. Set to 0 to disable tiling. @@ -20326,13 +20316,13 @@ export type components = { * @description The IP-Adapter image prompt. * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * T2I-Adapter Model * @description The T2I-Adapter model. * @default null */ - t2i_adapter_model?: components["schemas"]["ModelIdentifierField"]; + t2i_adapter_model?: components["schemas"]["ModelIdentifierField"] | null; /** * Weight * @description The weight given to the T2I-Adapter @@ -20793,7 +20783,7 @@ export type components = { * @description The tile to split into properties. * @default null */ - tile?: components["schemas"]["Tile"]; + tile?: components["schemas"]["Tile"] | null; /** * type * @default tile_to_properties @@ -20898,12 +20888,12 @@ export type components = { * @description Positive conditioning tensor * @default null */ - positive_conditioning?: components["schemas"]["ConditioningField"]; + positive_conditioning?: components["schemas"]["ConditioningField"] | null; /** * @description Negative conditioning tensor * @default null */ - negative_conditioning?: components["schemas"]["ConditioningField"]; + negative_conditioning?: components["schemas"]["ConditioningField"] | null; /** * @description Noise tensor * @default null @@ -20968,7 +20958,7 @@ export type components = { * @description UNet (scheduler, LoRAs) * @default null */ - unet?: components["schemas"]["UNetField"]; + unet?: components["schemas"]["UNetField"] | null; /** * CFG Rescale Multiplier * @description Rescale multiplier for CFG guidance, used for models trained with zero-terminal SNR @@ -21178,7 +21168,7 @@ export type components = { * @description The image to use * @default null */ - image?: components["schemas"]["ImageField"]; + image?: components["schemas"]["ImageField"] | null; /** * Radius * @description Unsharp mask radius @@ -21417,7 +21407,7 @@ export type components = { * @description VAE model to load * @default null */ - vae_model?: components["schemas"]["ModelIdentifierField"]; + vae_model?: components["schemas"]["ModelIdentifierField"] | null; /** * type * @default vae_loader From 71e956c1c7fda9aaf4cee63dd18c95c5e1521b40 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 2 May 2025 10:30:18 +1000 Subject: [PATCH 3/8] feat(nodes): validate default values for all fields This prevents issues where the node is defined with an invalid default value, which would guarantee an error during a ser/de roundtrip. - Upstream issue requesting this functionality be built-in to pydantic: https://github.com/pydantic/pydantic/issues/8722 - Upstream PR that implements the functionality: https://github.com/pydantic/pydantic-core/pull/1593 --- invokeai/app/invocations/baseinvocation.py | 29 ++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index 02d4537c890..209629e9c4e 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -491,6 +491,31 @@ def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None return None +class NoDefaultSentinel: + pass + + +def validate_field_default(field_name: str, invocation_type: str, annotation: Any, field_info: FieldInfo) -> None: + """Validates the default value of a field against its pydantic field definition.""" + + assert isinstance(field_info.json_schema_extra, dict), "json_schema_extra is not a dict" + + # By the time we are doing this, we've already done some pydantic magic by overriding the original default value. + # We store the original default value in the json_schema_extra dict, so we can validate it here. + orig_default = field_info.json_schema_extra.get("orig_default", NoDefaultSentinel) + + if orig_default is NoDefaultSentinel: + return + + TempDefaultValidator = create_model("TempDefaultValidator", field_to_validate=(annotation, field_info)) + + # Validate the default value against the annotation + try: + TempDefaultValidator(field_to_validate=orig_default) + except Exception as e: + raise ValueError(f"Default value for field {field_name} on invocation {invocation_type} is invalid, {e}") from e + + def is_optional(annotation: Any) -> bool: """ Checks if the given annotation is optional (i.e. Optional[X], Union[X, None] or X | None). @@ -545,8 +570,12 @@ def wrapper(cls: Type[TBaseInvocation]) -> Type[TBaseInvocation]: assert isinstance(field_info.json_schema_extra, dict), ( f"{field_name} on invocation {invocation_type} has a non-dict json_schema_extra, did you forget to use InputField?" ) + + validate_field_default(field_name, invocation_type, annotation, field_info) + if field_info.default is None and not is_optional(annotation): annotation = annotation | None + fields[field_name] = (annotation, field_info) # Add OpenAPI schema extras From 2e44413684161bf4e482954ce343aa754ab72b16 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 2 May 2025 10:36:33 +1000 Subject: [PATCH 4/8] fix(nodes): do not provide invalid defaults for batch nodes --- invokeai/app/invocations/batch.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/invokeai/app/invocations/batch.py b/invokeai/app/invocations/batch.py index 37a89104737..34ecd38f26b 100644 --- a/invokeai/app/invocations/batch.py +++ b/invokeai/app/invocations/batch.py @@ -64,7 +64,6 @@ class ImageBatchInvocation(BaseBatchInvocation): """Create a batched generation, where the workflow is executed once for each image in the batch.""" images: list[ImageField] = InputField( - default=[], min_length=1, description="The images to batch over", ) @@ -120,7 +119,6 @@ class StringBatchInvocation(BaseBatchInvocation): """Create a batched generation, where the workflow is executed once for each string in the batch.""" strings: list[str] = InputField( - default=[], min_length=1, description="The strings to batch over", ) @@ -176,7 +174,6 @@ class IntegerBatchInvocation(BaseBatchInvocation): """Create a batched generation, where the workflow is executed once for each integer in the batch.""" integers: list[int] = InputField( - default=[], min_length=1, description="The integers to batch over", ) @@ -230,7 +227,6 @@ class FloatBatchInvocation(BaseBatchInvocation): """Create a batched generation, where the workflow is executed once for each float in the batch.""" floats: list[float] = InputField( - default=[], min_length=1, description="The floats to batch over", ) From 14779b2dfe37026a0855e0aa99ab2483faadc330 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 2 May 2025 10:36:48 +1000 Subject: [PATCH 5/8] chore(ui): typegen --- .../frontend/web/src/services/api/schema.ts | 25 +++++++++++-------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts index 843c22cb94f..818af79b4ec 100644 --- a/invokeai/frontend/web/src/services/api/schema.ts +++ b/invokeai/frontend/web/src/services/api/schema.ts @@ -2262,9 +2262,14 @@ export type components = { mode?: "RGB" | "RGBA"; /** * @description The color of the image - * @default null + * @default { + * "r": 0, + * "g": 0, + * "b": 0, + * "a": 255 + * } */ - color?: components["schemas"]["ColorField"] | null; + color?: components["schemas"]["ColorField"]; /** * type * @default blank_image @@ -7024,9 +7029,9 @@ export type components = { /** * Floats * @description The floats to batch over - * @default [] + * @default null */ - floats?: number[]; + floats?: number[] | null; /** * type * @default float_batch @@ -9428,9 +9433,9 @@ export type components = { /** * Images * @description The images to batch over - * @default [] + * @default null */ - images?: components["schemas"]["ImageField"][]; + images?: components["schemas"]["ImageField"][] | null; /** * type * @default image_batch @@ -11163,9 +11168,9 @@ export type components = { /** * Integers * @description The integers to batch over - * @default [] + * @default null */ - integers?: number[]; + integers?: number[] | null; /** * type * @default integer_batch @@ -19692,9 +19697,9 @@ export type components = { /** * Strings * @description The strings to batch over - * @default [] + * @default null */ - strings?: string[]; + strings?: string[] | null; /** * type * @default string_batch From 76a0798ce20de281f21762ce72370866912e6090 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 2 May 2025 10:45:04 +1000 Subject: [PATCH 6/8] feat(nodes): improved error messages for invalid defaults --- invokeai/app/invocations/baseinvocation.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index 209629e9c4e..e46b5d03b90 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -495,7 +495,9 @@ class NoDefaultSentinel: pass -def validate_field_default(field_name: str, invocation_type: str, annotation: Any, field_info: FieldInfo) -> None: +def validate_field_default( + cls_name: str, field_name: str, invocation_type: str, annotation: Any, field_info: FieldInfo +) -> None: """Validates the default value of a field against its pydantic field definition.""" assert isinstance(field_info.json_schema_extra, dict), "json_schema_extra is not a dict" @@ -507,13 +509,15 @@ def validate_field_default(field_name: str, invocation_type: str, annotation: An if orig_default is NoDefaultSentinel: return - TempDefaultValidator = create_model("TempDefaultValidator", field_to_validate=(annotation, field_info)) + TempDefaultValidator = create_model(cls_name, **{field_name: (annotation, field_info)}) # Validate the default value against the annotation try: - TempDefaultValidator(field_to_validate=orig_default) + TempDefaultValidator.model_validate({field_name: orig_default}) except Exception as e: - raise ValueError(f"Default value for field {field_name} on invocation {invocation_type} is invalid, {e}") from e + raise InvalidFieldError( + f'Default value for field "{field_name}" on invocation "{invocation_type}" is invalid, {e}' + ) from e def is_optional(annotation: Any) -> bool: @@ -571,7 +575,7 @@ def wrapper(cls: Type[TBaseInvocation]) -> Type[TBaseInvocation]: f"{field_name} on invocation {invocation_type} has a non-dict json_schema_extra, did you forget to use InputField?" ) - validate_field_default(field_name, invocation_type, annotation, field_info) + validate_field_default(cls.__name__, field_name, invocation_type, annotation, field_info) if field_info.default is None and not is_optional(annotation): annotation = annotation | None From d6f8c822f9bf94638ba134d6860fdc800c73869f Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Fri, 2 May 2025 17:12:25 +1000 Subject: [PATCH 7/8] feat(nodes): store original field annotation & FieldInfo in invocations --- invokeai/app/invocations/baseinvocation.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index e46b5d03b90..4fa28b6a3a6 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -22,6 +22,7 @@ Literal, Optional, Type, + TypedDict, TypeVar, Union, ) @@ -106,6 +107,11 @@ class UIConfigBase(BaseModel): ) +class OriginalModelField(TypedDict): + annotation: Any + field_info: FieldInfo + + class BaseInvocationOutput(BaseModel): """ Base class for all invocation outputs. @@ -134,6 +140,9 @@ def get_type(cls) -> str: """Gets the invocation output's type, as provided by the `@invocation_output` decorator.""" return cls.model_fields["type"].default + _original_model_fields: ClassVar[dict[str, OriginalModelField]] = {} + """The original model fields, before any modifications were made by the @invocation_output decorator.""" + model_config = ConfigDict( protected_namespaces=(), validate_assignment=True, @@ -266,6 +275,9 @@ def invoke_internal(self, context: InvocationContext, services: "InvocationServi coerce_numbers_to_str=True, ) + _original_model_fields: ClassVar[dict[str, OriginalModelField]] = {} + """The original model fields, before any modifications were made by the @invocation decorator.""" + TBaseInvocation = TypeVar("TBaseInvocation", bound=BaseInvocation) @@ -575,6 +587,8 @@ def wrapper(cls: Type[TBaseInvocation]) -> Type[TBaseInvocation]: f"{field_name} on invocation {invocation_type} has a non-dict json_schema_extra, did you forget to use InputField?" ) + cls._original_model_fields[field_name] = OriginalModelField(annotation=annotation, field_info=field_info) + validate_field_default(cls.__name__, field_name, invocation_type, annotation, field_info) if field_info.default is None and not is_optional(annotation): @@ -686,6 +700,9 @@ def wrapper(cls: Type[TBaseInvocationOutput]) -> Type[TBaseInvocationOutput]: assert isinstance(field_info.json_schema_extra, dict), ( f"{field_name} on invocation output {output_type} has a non-dict json_schema_extra, did you forget to use InputField?" ) + + cls._original_model_fields[field_name] = OriginalModelField(annotation=annotation, field_info=field_info) + if field_info.default is not PydanticUndefined and is_optional(annotation): annotation = annotation | None fields[field_name] = (annotation, field_info) From 1b58798dc7091fde2622208db238a6ea570ca827 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 13 May 2025 15:02:53 +1000 Subject: [PATCH 8/8] feat(nodes): tidy some type annotations in baseinvocation --- invokeai/app/invocations/baseinvocation.py | 32 +++++++++++++--------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index 4fa28b6a3a6..0033b56d3c1 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -25,6 +25,7 @@ TypedDict, TypeVar, Union, + cast, ) import semver @@ -521,9 +522,10 @@ def validate_field_default( if orig_default is NoDefaultSentinel: return - TempDefaultValidator = create_model(cls_name, **{field_name: (annotation, field_info)}) + # To validate the default value, we can create a temporary pydantic model with the field we are validating as its + # only field. Then validate the default value against this temporary model. + TempDefaultValidator = cast(BaseModel, create_model(cls_name, **{field_name: (annotation, field_info)})) - # Validate the default value against the annotation try: TempDefaultValidator.model_validate({field_name: orig_default}) except Exception as e: @@ -631,15 +633,15 @@ def wrapper(cls: Type[TBaseInvocation]) -> Type[TBaseInvocation]: # not work. Instead, we have to create a new class with the type field and patch the original class with it. invocation_type_annotation = Literal[invocation_type] - invocation_type_field = Field( - title="type", default=invocation_type, json_schema_extra={"field_kind": FieldKind.NodeAttribute} + + # Field() returns an instance of FieldInfo, but thanks to a pydantic implementation detail, it is _typed_ as Any. + # This cast makes the type annotation match the class's true type. + invocation_type_field_info = cast( + FieldInfo, + Field(title="type", default=invocation_type, json_schema_extra={"field_kind": FieldKind.NodeAttribute}), ) - # pydantic's Field function returns a FieldInfo, but they annotate it as returning a type so that type-checkers - # don't get confused by something like this: - # foo: str = Field() <-- this is a FieldInfo, not a str - # Unfortunately this means we need to use type: ignore here to avoid type-checker errors - fields["type"] = (invocation_type_annotation, invocation_type_field) # type: ignore + fields["type"] = (invocation_type_annotation, invocation_type_field_info) # Validate the `invoke()` method is implemented if "invoke" in cls.__abstractmethods__: @@ -662,7 +664,7 @@ def wrapper(cls: Type[TBaseInvocation]) -> Type[TBaseInvocation]: ) docstring = cls.__doc__ - new_class = create_model(cls.__qualname__, __base__=cls, __module__=cls.__module__, **fields) + new_class = create_model(cls.__qualname__, __base__=cls, __module__=cls.__module__, **fields) # type: ignore new_class.__doc__ = docstring InvocationRegistry.register_invocation(new_class) @@ -709,11 +711,15 @@ def wrapper(cls: Type[TBaseInvocationOutput]) -> Type[TBaseInvocationOutput]: # Add the output type to the model. output_type_annotation = Literal[output_type] - output_type_field = Field( - title="type", default=output_type, json_schema_extra={"field_kind": FieldKind.NodeAttribute} + + # Field() returns an instance of FieldInfo, but thanks to a pydantic implementation detail, it is _typed_ as Any. + # This cast makes the type annotation match the class's true type. + output_type_field_info = cast( + FieldInfo, + Field(title="type", default=output_type, json_schema_extra={"field_kind": FieldKind.NodeAttribute}), ) - fields["type"] = (output_type_annotation, output_type_field) # type: ignore + fields["type"] = (output_type_annotation, output_type_field_info) docstring = cls.__doc__ new_class = create_model(cls.__qualname__, __base__=cls, __module__=cls.__module__, **fields)