diff --git a/invokeai/app/invocations/flux_control_lora_loader.py b/invokeai/app/invocations/flux_control_lora_loader.py index ba3addbdca0..f24ad6bf6a7 100644 --- a/invokeai/app/invocations/flux_control_lora_loader.py +++ b/invokeai/app/invocations/flux_control_lora_loader.py @@ -1,7 +1,6 @@ from invokeai.app.invocations.baseinvocation import ( BaseInvocation, BaseInvocationOutput, - Classification, invocation, invocation_output, ) @@ -25,7 +24,6 @@ class FluxControlLoRALoaderOutput(BaseInvocationOutput): tags=["lora", "model", "flux"], category="model", version="1.1.1", - classification=Classification.Prototype, ) class FluxControlLoRALoaderInvocation(BaseInvocation): """LoRA model and Image to use with FLUX transformer generation.""" diff --git a/invokeai/app/invocations/flux_controlnet.py b/invokeai/app/invocations/flux_controlnet.py index 41b66975a75..230ef912f46 100644 --- a/invokeai/app/invocations/flux_controlnet.py +++ b/invokeai/app/invocations/flux_controlnet.py @@ -3,7 +3,6 @@ from invokeai.app.invocations.baseinvocation import ( BaseInvocation, BaseInvocationOutput, - Classification, invocation, invocation_output, ) @@ -52,7 +51,6 @@ class FluxControlNetOutput(BaseInvocationOutput): tags=["controlnet", "flux"], category="controlnet", version="1.0.0", - classification=Classification.Prototype, ) class FluxControlNetInvocation(BaseInvocation): """Collect FLUX ControlNet info to pass to other nodes.""" diff --git a/invokeai/app/invocations/flux_denoise.py b/invokeai/app/invocations/flux_denoise.py index fb063acbc70..4e35d6d3311 100644 --- a/invokeai/app/invocations/flux_denoise.py +++ b/invokeai/app/invocations/flux_denoise.py @@ -10,7 +10,7 @@ from torchvision.transforms.functional import resize as tv_resize from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection -from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation +from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation from invokeai.app.invocations.fields import ( DenoiseMaskField, FieldDescriptions, @@ -64,7 +64,6 @@ tags=["image", "flux"], category="image", version="3.3.0", - classification=Classification.Prototype, ) class FluxDenoiseInvocation(BaseInvocation, WithMetadata, WithBoard): """Run denoising process with a FLUX transformer model.""" diff --git a/invokeai/app/invocations/flux_fill.py b/invokeai/app/invocations/flux_fill.py index 2602c7cf079..cff8f2b1e52 100644 --- a/invokeai/app/invocations/flux_fill.py +++ b/invokeai/app/invocations/flux_fill.py @@ -31,7 +31,7 @@ class FluxFillOutput(BaseInvocationOutput): tags=["inpaint"], category="inpaint", version="1.0.0", - classification=Classification.Prototype, + classification=Classification.Beta, ) class FluxFillInvocation(BaseInvocation): """Prepare the FLUX Fill conditioning data.""" diff --git a/invokeai/app/invocations/flux_ip_adapter.py b/invokeai/app/invocations/flux_ip_adapter.py index 9653f859ad0..fd8fc2118ee 100644 --- a/invokeai/app/invocations/flux_ip_adapter.py +++ b/invokeai/app/invocations/flux_ip_adapter.py @@ -4,7 +4,7 @@ from pydantic import field_validator, model_validator from typing_extensions import Self -from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation +from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation from invokeai.app.invocations.fields import InputField, UIType from invokeai.app.invocations.ip_adapter import ( CLIP_VISION_MODEL_MAP, @@ -28,7 +28,6 @@ tags=["ip_adapter", "control"], category="ip_adapter", version="1.0.0", - classification=Classification.Prototype, ) class FluxIPAdapterInvocation(BaseInvocation): """Collects FLUX IP-Adapter info to pass to other nodes.""" diff --git a/invokeai/app/invocations/flux_lora_loader.py b/invokeai/app/invocations/flux_lora_loader.py index 7c155ddbd4c..ae7c3657071 100644 --- a/invokeai/app/invocations/flux_lora_loader.py +++ b/invokeai/app/invocations/flux_lora_loader.py @@ -3,7 +3,6 @@ from invokeai.app.invocations.baseinvocation import ( BaseInvocation, BaseInvocationOutput, - Classification, invocation, invocation_output, ) @@ -32,7 +31,6 @@ class FluxLoRALoaderOutput(BaseInvocationOutput): tags=["lora", "model", "flux"], category="model", version="1.2.1", - classification=Classification.Prototype, ) class FluxLoRALoaderInvocation(BaseInvocation): """Apply a LoRA model to a FLUX transformer and/or text encoder.""" @@ -111,7 +109,6 @@ def invoke(self, context: InvocationContext) -> FluxLoRALoaderOutput: tags=["lora", "model", "flux"], category="model", version="1.3.1", - classification=Classification.Prototype, ) class FLUXLoRACollectionLoader(BaseInvocation): """Applies a collection of LoRAs to a FLUX transformer.""" diff --git a/invokeai/app/invocations/flux_model_loader.py b/invokeai/app/invocations/flux_model_loader.py index 6803f0edc34..a0c26829adc 100644 --- a/invokeai/app/invocations/flux_model_loader.py +++ b/invokeai/app/invocations/flux_model_loader.py @@ -3,7 +3,6 @@ from invokeai.app.invocations.baseinvocation import ( BaseInvocation, BaseInvocationOutput, - Classification, invocation, invocation_output, ) @@ -41,7 +40,6 @@ class FluxModelLoaderOutput(BaseInvocationOutput): tags=["model", "flux"], category="model", version="1.0.6", - classification=Classification.Prototype, ) class FluxModelLoaderInvocation(BaseInvocation): """Loads a flux base model, outputting its submodels.""" diff --git a/invokeai/app/invocations/flux_redux.py b/invokeai/app/invocations/flux_redux.py index e055d94f8db..4d6a532bea4 100644 --- a/invokeai/app/invocations/flux_redux.py +++ b/invokeai/app/invocations/flux_redux.py @@ -45,7 +45,7 @@ class FluxReduxOutput(BaseInvocationOutput): tags=["ip_adapter", "control"], category="ip_adapter", version="2.0.0", - classification=Classification.Prototype, + classification=Classification.Beta, ) class FluxReduxInvocation(BaseInvocation): """Runs a FLUX Redux model to generate a conditioning tensor.""" diff --git a/invokeai/app/invocations/flux_text_encoder.py b/invokeai/app/invocations/flux_text_encoder.py index aab40ed85b5..9131f06ea35 100644 --- a/invokeai/app/invocations/flux_text_encoder.py +++ b/invokeai/app/invocations/flux_text_encoder.py @@ -4,7 +4,7 @@ import torch from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5Tokenizer, T5TokenizerFast -from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation +from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation from invokeai.app.invocations.fields import ( FieldDescriptions, FluxConditioningField, @@ -30,7 +30,6 @@ tags=["prompt", "conditioning", "flux"], category="conditioning", version="1.1.2", - classification=Classification.Prototype, ) class FluxTextEncoderInvocation(BaseInvocation): """Encodes and preps a prompt for a flux image.""" diff --git a/invokeai/app/invocations/image.py b/invokeai/app/invocations/image.py index 2324b979853..ea6069bd7a9 100644 --- a/invokeai/app/invocations/image.py +++ b/invokeai/app/invocations/image.py @@ -355,7 +355,6 @@ def invoke(self, context: InvocationContext) -> ImageOutput: tags=["image", "unsharp_mask"], category="image", version="1.2.2", - classification=Classification.Beta, ) class UnsharpMaskInvocation(BaseInvocation, WithMetadata, WithBoard): """Applies an unsharp mask filter to an image""" @@ -1265,7 +1264,6 @@ def invoke(self, context: InvocationContext) -> ImageOutput: category="image", version="1.0.0", tags=["image", "crop"], - classification=Classification.Beta, ) class CropImageToBoundingBoxInvocation(BaseInvocation, WithMetadata, WithBoard): """Crop an image to the given bounding box. If the bounding box is omitted, the image is cropped to the non-transparent pixels.""" @@ -1292,7 +1290,6 @@ def invoke(self, context: InvocationContext) -> ImageOutput: category="image", version="1.0.0", tags=["image", "crop"], - classification=Classification.Beta, ) class PasteImageIntoBoundingBoxInvocation(BaseInvocation, WithMetadata, WithBoard): """Paste the source image into the target image at the given bounding box. diff --git a/invokeai/app/invocations/llava_onevision_vllm.py b/invokeai/app/invocations/llava_onevision_vllm.py index af5ee6762ba..dca568d73a0 100644 --- a/invokeai/app/invocations/llava_onevision_vllm.py +++ b/invokeai/app/invocations/llava_onevision_vllm.py @@ -4,7 +4,7 @@ from PIL.Image import Image from pydantic import field_validator -from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation +from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation from invokeai.app.invocations.fields import FieldDescriptions, ImageField, InputField, UIComponent, UIType from invokeai.app.invocations.model import ModelIdentifierField from invokeai.app.invocations.primitives import StringOutput @@ -13,7 +13,14 @@ from invokeai.backend.util.devices import TorchDevice -@invocation("llava_onevision_vllm", title="LLaVA OneVision VLLM", tags=["vllm"], category="vllm", version="1.0.0") +@invocation( + "llava_onevision_vllm", + title="LLaVA OneVision VLLM", + tags=["vllm"], + category="vllm", + version="1.0.0", + classification=Classification.Beta, +) class LlavaOnevisionVllmInvocation(BaseInvocation): """Run a LLaVA OneVision VLLM model.""" diff --git a/invokeai/app/invocations/mask.py b/invokeai/app/invocations/mask.py index c3456cdf189..556ab8801d8 100644 --- a/invokeai/app/invocations/mask.py +++ b/invokeai/app/invocations/mask.py @@ -4,7 +4,6 @@ from invokeai.app.invocations.baseinvocation import ( BaseInvocation, - Classification, InvocationContext, invocation, ) @@ -58,7 +57,6 @@ def invoke(self, context: InvocationContext) -> MaskOutput: tags=["conditioning"], category="conditioning", version="1.0.0", - classification=Classification.Beta, ) class AlphaMaskToTensorInvocation(BaseInvocation): """Convert a mask image to a tensor. Opaque regions are 1 and transparent regions are 0.""" @@ -87,7 +85,6 @@ def invoke(self, context: InvocationContext) -> MaskOutput: tags=["conditioning"], category="conditioning", version="1.1.0", - classification=Classification.Beta, ) class InvertTensorMaskInvocation(BaseInvocation): """Inverts a tensor mask.""" @@ -234,7 +231,6 @@ def invoke(self, context: InvocationContext) -> ImageOutput: tags=["mask"], category="mask", version="1.0.0", - classification=Classification.Beta, ) class GetMaskBoundingBoxInvocation(BaseInvocation): """Gets the bounding box of the given mask image.""" diff --git a/invokeai/app/invocations/model.py b/invokeai/app/invocations/model.py index 5a58bc18a8d..9a51da769fa 100644 --- a/invokeai/app/invocations/model.py +++ b/invokeai/app/invocations/model.py @@ -6,7 +6,6 @@ from invokeai.app.invocations.baseinvocation import ( BaseInvocation, BaseInvocationOutput, - Classification, invocation, invocation_output, ) @@ -124,7 +123,6 @@ class ModelIdentifierOutput(BaseInvocationOutput): tags=["model"], category="model", version="1.0.1", - classification=Classification.Prototype, ) class ModelIdentifierInvocation(BaseInvocation): """Selects any model, outputting it its identifier. Be careful with this one! The identifier will be accepted as diff --git a/invokeai/app/invocations/sd3_denoise.py b/invokeai/app/invocations/sd3_denoise.py index ea7c5f39d1d..51899013f4d 100644 --- a/invokeai/app/invocations/sd3_denoise.py +++ b/invokeai/app/invocations/sd3_denoise.py @@ -6,7 +6,7 @@ from torchvision.transforms.functional import resize as tv_resize from tqdm import tqdm -from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation +from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR from invokeai.app.invocations.fields import ( DenoiseMaskField, @@ -36,7 +36,6 @@ tags=["image", "sd3"], category="image", version="1.1.1", - classification=Classification.Prototype, ) class SD3DenoiseInvocation(BaseInvocation, WithMetadata, WithBoard): """Run denoising process with a SD3 model.""" diff --git a/invokeai/app/invocations/sd3_image_to_latents.py b/invokeai/app/invocations/sd3_image_to_latents.py index db1ace24e23..fc88e85aa56 100644 --- a/invokeai/app/invocations/sd3_image_to_latents.py +++ b/invokeai/app/invocations/sd3_image_to_latents.py @@ -2,7 +2,7 @@ import torch from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL -from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation +from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation from invokeai.app.invocations.fields import ( FieldDescriptions, ImageField, @@ -25,7 +25,6 @@ tags=["image", "latents", "vae", "i2l", "sd3"], category="image", version="1.0.1", - classification=Classification.Prototype, ) class SD3ImageToLatentsInvocation(BaseInvocation, WithMetadata, WithBoard): """Generates latents from an image.""" diff --git a/invokeai/app/invocations/sd3_model_loader.py b/invokeai/app/invocations/sd3_model_loader.py index 7bc31e7aece..c7e2f397f6f 100644 --- a/invokeai/app/invocations/sd3_model_loader.py +++ b/invokeai/app/invocations/sd3_model_loader.py @@ -3,7 +3,6 @@ from invokeai.app.invocations.baseinvocation import ( BaseInvocation, BaseInvocationOutput, - Classification, invocation, invocation_output, ) @@ -34,7 +33,6 @@ class Sd3ModelLoaderOutput(BaseInvocationOutput): tags=["model", "sd3"], category="model", version="1.0.1", - classification=Classification.Prototype, ) class Sd3ModelLoaderInvocation(BaseInvocation): """Loads a SD3 base model, outputting its submodels.""" diff --git a/invokeai/app/invocations/sd3_text_encoder.py b/invokeai/app/invocations/sd3_text_encoder.py index 2cdd16b2b45..230fdf0f602 100644 --- a/invokeai/app/invocations/sd3_text_encoder.py +++ b/invokeai/app/invocations/sd3_text_encoder.py @@ -11,7 +11,7 @@ T5TokenizerFast, ) -from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation +from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField from invokeai.app.invocations.model import CLIPField, T5EncoderField from invokeai.app.invocations.primitives import SD3ConditioningOutput @@ -33,7 +33,6 @@ tags=["prompt", "conditioning", "sd3"], category="conditioning", version="1.0.1", - classification=Classification.Prototype, ) class Sd3TextEncoderInvocation(BaseInvocation): """Encodes and preps a prompt for a SD3 image.""" diff --git a/invokeai/app/invocations/tiled_multi_diffusion_denoise_latents.py b/invokeai/app/invocations/tiled_multi_diffusion_denoise_latents.py index ee2084bb8f8..cf0a1dbf8ed 100644 --- a/invokeai/app/invocations/tiled_multi_diffusion_denoise_latents.py +++ b/invokeai/app/invocations/tiled_multi_diffusion_denoise_latents.py @@ -7,7 +7,7 @@ from diffusers.schedulers.scheduling_utils import SchedulerMixin from pydantic import field_validator -from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation +from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation from invokeai.app.invocations.constants import LATENT_SCALE_FACTOR from invokeai.app.invocations.controlnet_image_processors import ControlField from invokeai.app.invocations.denoise_latents import DenoiseLatentsInvocation, get_scheduler @@ -56,7 +56,6 @@ def crop_controlnet_data(control_data: ControlNetData, latent_region: TBLR) -> C title="Tiled Multi-Diffusion Denoise - SD1.5, SDXL", tags=["upscale", "denoise"], category="latents", - classification=Classification.Beta, version="1.0.1", ) class TiledMultiDiffusionDenoiseLatents(BaseInvocation): diff --git a/invokeai/app/invocations/tiles.py b/invokeai/app/invocations/tiles.py index a54001a0a79..a631f3ba4ec 100644 --- a/invokeai/app/invocations/tiles.py +++ b/invokeai/app/invocations/tiles.py @@ -7,7 +7,6 @@ from invokeai.app.invocations.baseinvocation import ( BaseInvocation, BaseInvocationOutput, - Classification, invocation, invocation_output, ) @@ -40,7 +39,6 @@ class CalculateImageTilesOutput(BaseInvocationOutput): tags=["tiles"], category="tiles", version="1.0.1", - classification=Classification.Beta, ) class CalculateImageTilesInvocation(BaseInvocation): """Calculate the coordinates and overlaps of tiles that cover a target image shape.""" @@ -74,7 +72,6 @@ def invoke(self, context: InvocationContext) -> CalculateImageTilesOutput: tags=["tiles"], category="tiles", version="1.1.1", - classification=Classification.Beta, ) class CalculateImageTilesEvenSplitInvocation(BaseInvocation): """Calculate the coordinates and overlaps of tiles that cover a target image shape.""" @@ -117,7 +114,6 @@ def invoke(self, context: InvocationContext) -> CalculateImageTilesOutput: tags=["tiles"], category="tiles", version="1.0.1", - classification=Classification.Beta, ) class CalculateImageTilesMinimumOverlapInvocation(BaseInvocation): """Calculate the coordinates and overlaps of tiles that cover a target image shape.""" @@ -168,7 +164,6 @@ class TileToPropertiesOutput(BaseInvocationOutput): tags=["tiles"], category="tiles", version="1.0.1", - classification=Classification.Beta, ) class TileToPropertiesInvocation(BaseInvocation): """Split a Tile into its individual properties.""" @@ -201,7 +196,6 @@ class PairTileImageOutput(BaseInvocationOutput): tags=["tiles"], category="tiles", version="1.0.1", - classification=Classification.Beta, ) class PairTileImageInvocation(BaseInvocation): """Pair an image with its tile properties.""" @@ -230,7 +224,6 @@ def invoke(self, context: InvocationContext) -> PairTileImageOutput: tags=["tiles"], category="tiles", version="1.1.1", - classification=Classification.Beta, ) class MergeTilesToImageInvocation(BaseInvocation, WithMetadata, WithBoard): """Merge multiple tile images into a single image.""" diff --git a/invokeai/frontend/web/public/locales/en.json b/invokeai/frontend/web/public/locales/en.json index 34199560fe9..9005b80e940 100644 --- a/invokeai/frontend/web/public/locales/en.json +++ b/invokeai/frontend/web/public/locales/en.json @@ -2344,8 +2344,9 @@ "whatsNew": { "whatsNewInInvoke": "What's New in Invoke", "items": [ - "Workflows: New and improved Workflow Library.", - "FLUX: Support for FLUX Redux & FLUX Fill in Workflows and Canvas." + "Workflows: Support for custom string drop-downs in Workflow Builder.", + "FLUX: Support for FLUX Fill in Workflows and Canvas.", + "LLaVA OneVision VLLM: Beta support in Workflows." ], "readReleaseNotes": "Read Release Notes", "watchRecentReleaseVideos": "Watch Recent Release Videos", diff --git a/invokeai/version/invokeai_version.py b/invokeai/version/invokeai_version.py index d208345c306..b6a1a593e4b 100644 --- a/invokeai/version/invokeai_version.py +++ b/invokeai/version/invokeai_version.py @@ -1 +1 @@ -__version__ = "5.9.0rc2" +__version__ = "5.9.0"