Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Auto-Sync Graph Tensor on Weight Update #1000

Merged
merged 7 commits into from
Jul 8, 2024
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
{
"3": {
"inputs": {
"seed": 87631619688518,
"steps": 20,
"cfg": 8,
"sampler_name": "euler_ancestral",
"scheduler": "normal",
"denoise": 1,
"model": [
"31",
0
],
"positive": [
"6",
0
],
"negative": [
"7",
0
],
"latent_image": [
"5",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"5": {
"inputs": {
"width": 1024,
"height": 1024,
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
},
"6": {
"inputs": {
"text": "(blue colour lighting:1.3),photorealistic,masterpiece:1.5, spot light, exquisite gentle eyes,().\\n,portrait,masterpiece,breast focus:1.2,(),multicolored hair:1.4,wavy hair,3D face,black hair,short hair:1.2, sidelocks,1girl:1.3,blue eyes,tareme:1.5,(cowboy shot:1.5),(light smile:1.3),(stand:1.3),\\nhead tilt:1.3,(Shoulderless sundress:1.2),\\n(flat chest:1.4),cute face,(A balanced body,Model Body Type),\\n(Dark Background:1.3)、\\nslender Body:1.3,shiny hair, shiny skin,niji",
"clip": [
"31",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"7": {
"inputs": {
"text": " Multiple people,bad body,long body,(fat:1.2),long neck,deformed,mutated,malformed limbs,missing limb,acnes,skin spots,skin blemishes,poorly drawn face",
"clip": [
"31",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"8": {
"inputs": {
"samples": [
"3",
0
],
"vae": [
"31",
2
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"9": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"8",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"31": {
"inputs": {
"ckpt_name": "Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
{
"3": {
"inputs": {
"seed": 87631619688518,
"steps": 20,
"cfg": 8,
"sampler_name": "euler_ancestral",
"scheduler": "normal",
"denoise": 1,
"model": [
"31",
0
],
"positive": [
"6",
0
],
"negative": [
"7",
0
],
"latent_image": [
"5",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"5": {
"inputs": {
"width": 1024,
"height": 1024,
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
},
"6": {
"inputs": {
"text": "(blue colour lighting:1.3),photorealistic,masterpiece:1.5, spot light, exquisite gentle eyes,().\\n,portrait,masterpiece,breast focus:1.2,(),multicolored hair:1.4,wavy hair,3D face,black hair,short hair:1.2, sidelocks,1girl:1.3,blue eyes,tareme:1.5,(cowboy shot:1.5),(light smile:1.3),(stand:1.3),\\nhead tilt:1.3,(Shoulderless sundress:1.2),\\n(flat chest:1.4),cute face,(A balanced body,Model Body Type),\\n(Dark Background:1.3)、\\nslender Body:1.3,shiny hair, shiny skin,niji",
"clip": [
"31",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"7": {
"inputs": {
"text": " Multiple people,bad body,long body,(fat:1.2),long neck,deformed,mutated,malformed limbs,missing limb,acnes,skin spots,skin blemishes,poorly drawn face",
"clip": [
"31",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"8": {
"inputs": {
"samples": [
"3",
0
],
"vae": [
"31",
2
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"9": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"8",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"31": {
"inputs": {
"ckpt_name": "Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors",
"vae_speedup": "disable"
},
"class_type": "OneDiffCheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint - OneDiff"
}
}
}
6 changes: 6 additions & 0 deletions onediff_comfy_nodes/benchmarks/scripts/run_oneflow_case_ci.sh
Original file line number Diff line number Diff line change
Expand Up @@ -28,3 +28,9 @@ python3 scripts/text_to_image.py \
-w $WORKFLOW_DIR/ComfyUI_IPAdapter_plus/ipadapter_advanced.json \
--baseline-dir $STANDARD_OUTPUT/test_ipa
# --output-images \

python3 scripts/text_to_image.py \
--comfy-port $COMFY_PORT \
-w $WORKFLOW_DIR/txt2img.json \
--ssim-threshold 0.7 \
--baseline-dir $STANDARD_OUTPUT/txt2img/imgs # --output-images
strint marked this conversation as resolved.
Show resolved Hide resolved
30 changes: 30 additions & 0 deletions onediff_comfy_nodes/benchmarks/src/input_registration.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,19 @@
WORKFLOW_DIR = "resources/workflows"
FACE_IMAGE_DIR = "/share_nfs/hf_models/comfyui_resources/input/faces"
POSE_IMAGE_DIR = "/share_nfs/hf_models/comfyui_resources/input/poses"
SDXL_MODELS = [
"Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors",
"Pony_Realism.safetensors",
"sdxl/dreamshaperXL_v21TurboDPMSDE.safetensors",
]
SD1_5_MODELS = [
"sd15/020.realisticVisionV51_v51VAE.safetensors",
"sd15/majicmixRealistic_v7.safetensors",
"sd15/v1-5-pruned-emaonly.ckpt",
"sd15/helloyoung25d_V10f.safetensors",
"sd15/RealCartoonSpecialPruned.safetensors",
]


class InputParams(NamedTuple):
graph: ComfyGraph
Expand Down Expand Up @@ -46,6 +59,23 @@ def _(workflow_path, *args, **kwargs):
yield InputParams(graph=graph)


@register_generator(
[f"{WORKFLOW_DIR}/baseline/txt2img.json", f"{WORKFLOW_DIR}/oneflow/txt2img.json"]
)
def _(workflow_path, *args, **kwargs):
with open(workflow_path, "r") as fp:
workflow = json.load(fp)
graph = ComfyGraph(graph=workflow, sampler_nodes=["3"])
for sdxl_model in SDXL_MODELS:
graph.set_image_size(height=1024, width=1024)
graph.graph["31"]["inputs"]["ckpt_name"] = sdxl_model
yield InputParams(graph)
for sd1_5_model in SD1_5_MODELS:
graph.set_image_size(height=768, width=512)
graph.graph["31"]["inputs"]["ckpt_name"] = sd1_5_model
yield InputParams(graph)


SD3_WORKFLOWS = [
f"{WORKFLOW_DIR}/baseline/sd3_baseline.json",
f"{WORKFLOW_DIR}/nexfort/sd3_unet_speedup.json",
Expand Down
8 changes: 0 additions & 8 deletions onediff_comfy_nodes/modules/booster_cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,6 @@
from functools import singledispatch
from comfy.sd import VAE
from onediff.torch_utils.module_operations import get_sub_module
from onediff.utils.import_utils import is_oneflow_available
from .._config import is_disable_oneflow_backend


@singledispatch
Expand Down Expand Up @@ -46,12 +44,6 @@ def get_cached_model(model):

@get_cached_model.register
def _(model: ModelPatcher):
if is_oneflow_available() and not is_disable_oneflow_backend():
from .oneflow.utils.booster_utils import is_using_oneflow_backend

if is_using_oneflow_backend(model):
return None

return model.model


Expand Down
1 change: 1 addition & 0 deletions onediff_comfy_nodes/modules/oneflow/booster_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ def _(self, model: ModelPatcher, ckpt_name: Optional[str] = None, **kwargs):
return model

compiled_model = oneflow_compile(torch_model)

model.model.diffusion_model = compiled_model

graph_file = generate_graph_path(f"{type(model).__name__}", model=model.model)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,12 @@
from .dual_module import DualModule, get_mixed_dual_module
from .oneflow_exec_mode import oneflow_exec_mode, oneflow_exec_mode_enabled
from .args_tree_util import input_output_processor
from .param_utils import parse_device, check_device, generate_constant_folding_info
from .param_utils import (
parse_device,
check_device,
generate_constant_folding_info,
update_graph_with_constant_folding_info,
)
from .graph_management_utils import graph_file_management
from .online_quantization_utils import quantize_and_deploy_wrapper
from ..env_var import OneflowCompileOptions
Expand Down Expand Up @@ -195,6 +200,7 @@ def load_graph(self, file_path, device=None, run_warmup=True, *, state_dict=None
file_path, device, run_warmup, state_dict=state_dict
)
generate_constant_folding_info(self)
update_graph_with_constant_folding_info(self)

def save_graph(self, file_path, *, process_state_dict=lambda x: x):
self.get_graph().save_graph(file_path, process_state_dict=process_state_dict)
Expand Down
18 changes: 18 additions & 0 deletions src/onediff/infer_compiler/backends/oneflow/param_utils.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import re
import types
import torch
import oneflow as flow
from typing import List, Dict, Any, Union
Expand Down Expand Up @@ -108,9 +109,26 @@ def convert_var_name(s: str, prefix="variable_transpose_"):
for k, v in zip(*graph._c_nn_graph.get_runtime_var_states())
if k.startswith("variable_transpose_") and v.ndim == 4
}

setattr(deployable_module, CONSTANT_FOLDING_INFO_ATTR, result)

set_constant_folded_conv_attr(deployable_module, result)

def make_custom_copy_(module):
def custom_copy_(self, src, non_blocking=False):
torch.Tensor.copy_(self, src, non_blocking)
# Update graph related tensors
update_graph_related_tensor(module)

return custom_copy_

from onediff.torch_utils.module_operations import get_sub_module

torch_model: torch.nn.Module = deployable_module._torch_module
for k in result.keys():
module = get_sub_module(torch_model, removesuffix(k, ".weight"))
module.weight.copy_ = types.MethodType(make_custom_copy_(module), module.weight)


def update_graph_with_constant_folding_info(
module: torch.nn.Module, info: Dict[str, flow.Tensor] = None
Expand Down
Loading