Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

RUFF C408 rule (unnecessary-collection-call) #6316

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion comfy/hooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -510,7 +510,7 @@ def get_sorted_list_via_attr(objects: list, attr: str) -> list:
unique_attrs = {}
for o in objects:
val_attr = getattr(o, attr)
attr_list: list = unique_attrs.get(val_attr, list())
attr_list: list = unique_attrs.get(val_attr, [])
attr_list.append(o)
if val_attr not in unique_attrs:
unique_attrs[val_attr] = attr_list
Expand Down
10 changes: 5 additions & 5 deletions comfy/ldm/models/autoencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ def get_trainable_parameters(self) -> Any:
yield from ()

def forward(self, z: torch.Tensor) -> Tuple[torch.Tensor, dict]:
log = dict()
log = {}
posterior = DiagonalGaussianDistribution(z)
if self.sample:
z = posterior.sample()
Expand Down Expand Up @@ -88,7 +88,7 @@ def decode(self, *args, **kwargs) -> torch.Tensor:
def instantiate_optimizer_from_config(self, params, lr, cfg):
logging.info(f"loading >>> {cfg['target']} <<< optimizer from config")
return get_obj_from_str(cfg["target"])(
params, lr=lr, **cfg.get("params", dict())
params, lr=lr, **cfg.get("params", {})
)

def configure_optimizers(self) -> Any:
Expand Down Expand Up @@ -129,7 +129,7 @@ def encode(
) -> Union[torch.Tensor, Tuple[torch.Tensor, dict]]:
z = self.encoder(x)
if unregularized:
return z, dict()
return z, {}
z, reg_log = self.regularization(z)
if return_reg_log:
return z, reg_log
Expand Down Expand Up @@ -191,7 +191,7 @@ def encode(
N = x.shape[0]
bs = self.max_batch_size
n_batches = int(math.ceil(N / bs))
z = list()
z = []
for i_batch in range(n_batches):
z_batch = self.encoder(x[i_batch * bs : (i_batch + 1) * bs])
z_batch = self.quant_conv(z_batch)
Expand All @@ -211,7 +211,7 @@ def decode(self, z: torch.Tensor, **decoder_kwargs) -> torch.Tensor:
N = z.shape[0]
bs = self.max_batch_size
n_batches = int(math.ceil(N / bs))
dec = list()
dec = []
for i_batch in range(n_batches):
dec_batch = self.post_quant_conv(z[i_batch * bs : (i_batch + 1) * bs])
dec_batch = self.decoder(dec_batch, **decoder_kwargs)
Expand Down
10 changes: 5 additions & 5 deletions comfy/ldm/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def log_txt_as_img(wh, xc, size=10):
# wh a tuple of (width, height)
# xc a list of captions to plot
b = len(xc)
txts = list()
txts = []
for bi in range(b):
txt = Image.new("RGB", wh, color="white")
draw = ImageDraw.Draw(txt)
Expand Down Expand Up @@ -77,7 +77,7 @@ def instantiate_from_config(config):
elif config == "__is_unconditional__":
return None
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", dict()))
return get_obj_from_str(config["target"])(**config.get("params", {}))


def get_obj_from_str(string, reload=False):
Expand Down Expand Up @@ -106,9 +106,9 @@ def __init__(self, params, lr=1.e-3, betas=(0.9, 0.999), eps=1.e-8, # TODO: che
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
if not 0.0 <= ema_decay <= 1.0:
raise ValueError("Invalid ema_decay value: {}".format(ema_decay))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad, ema_decay=ema_decay,
ema_power=ema_power, param_names=param_names)
defaults = {"lr": lr, "betas": betas, "eps": eps,
"weight_decay": weight_decay, "amsgrad": amsgrad, "ema_decay": ema_decay,
"ema_power": ema_power, "param_names": param_names}
super().__init__(params, defaults)

def __setstate__(self, state):
Expand Down
4 changes: 2 additions & 2 deletions comfy/samplers.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ def finalize_default_conds(model: 'BaseModel', hooked_to_run: dict[comfy.hooks.H
p = p._replace(mult=mult)
if p.hooks is not None:
model.current_patcher.prepare_hook_patches_current_keyframe(timestep, p.hooks, model_options)
hooked_to_run.setdefault(p.hooks, list())
hooked_to_run.setdefault(p.hooks, [])
hooked_to_run[p.hooks] += [(p, i)]

def calc_cond_batch(model: 'BaseModel', conds: list[list[dict]], x_in: torch.Tensor, timestep, model_options):
Expand Down Expand Up @@ -220,7 +220,7 @@ def _calc_cond_batch(model: 'BaseModel', conds: list[list[dict]], x_in: torch.Te
continue
if p.hooks is not None:
model.current_patcher.prepare_hook_patches_current_keyframe(timestep, p.hooks, model_options)
hooked_to_run.setdefault(p.hooks, list())
hooked_to_run.setdefault(p.hooks, [])
hooked_to_run[p.hooks] += [(p, i)]
default_conds.append(default_c)

Expand Down
2 changes: 1 addition & 1 deletion comfy/sd1_clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def gen_empty_tokens(special_tokens, length):

class ClipTokenWeightEncoder:
def encode_token_weights(self, token_weight_pairs):
to_encode = list()
to_encode = []
max_token_len = 0
has_weights = False
for x in token_weight_pairs:
Expand Down
2 changes: 1 addition & 1 deletion comfy_extras/nodes_audio.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ def INPUT_TYPES(s):
def save_audio(self, audio, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
filename_prefix += self.prefix_append
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir)
results = list()
results = []

metadata = {}
if not args.disable_metadata:
Expand Down
4 changes: 2 additions & 2 deletions comfy_extras/nodes_images.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ def save_images(self, images, fps, filename_prefix, lossless, quality, method, n
method = self.methods.get(method)
filename_prefix += self.prefix_append
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
results = list()
results = []
pil_images = []
for image in images:
i = 255. * image.cpu().numpy()
Expand Down Expand Up @@ -160,7 +160,7 @@ def INPUT_TYPES(s):
def save_images(self, images, fps, compress_level, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
filename_prefix += self.prefix_append
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
results = list()
results = []
pil_images = []
for image in images:
i = 255. * image.cpu().numpy()
Expand Down
2 changes: 1 addition & 1 deletion execution.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,7 @@ def get_output_data(obj, input_data_all, execution_block_cb=None, pre_execute_cb
output = merge_result_data(results, obj)
else:
output = []
ui = dict()
ui = {}
if len(uis) > 0:
ui = {k: [y for x in uis for y in x[k]] for k in uis[0].keys()}
return output, ui, has_subgraph
Expand Down
4 changes: 2 additions & 2 deletions nodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -477,7 +477,7 @@ def save(self, samples, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=No

file = f"{filename}_{counter:05}_.latent"

results = list()
results = []
results.append({
"filename": file,
"subfolder": subfolder,
Expand Down Expand Up @@ -1582,7 +1582,7 @@ def INPUT_TYPES(s):
def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
filename_prefix += self.prefix_append
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
results = list()
results = []
for (batch_number, image) in enumerate(images):
i = 255. * image.cpu().numpy()
img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
Expand Down
6 changes: 4 additions & 2 deletions ruff.toml
Original file line number Diff line number Diff line change
@@ -1,14 +1,16 @@
target-version = "py39"

# Disable all rules by default
lint.ignore = ["ALL"]

# Enable specific rules
# Enable specific rules, see all rules here: https://docs.astral.sh/ruff/rules/
lint.select = [
"S307", # suspicious-eval-usage
"T201", # print-usage
"W",
# The "F" series in Ruff stands for "Pyflakes" rules, which catch various Python syntax errors and undefined names.
# See all rules here: https://docs.astral.sh/ruff/rules/#pyflakes-f
"F",
"C408", # unnecessary dict(), list() or tuple() calls that can be rewritten as empty literals.
]

exclude = ["*.ipynb"]
2 changes: 1 addition & 1 deletion server.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ def __init__(self, loop):

max_upload_size = round(args.max_upload_size * 1024 * 1024)
self.app = web.Application(client_max_size=max_upload_size, middlewares=middlewares)
self.sockets = dict()
self.sockets = {}
self.web_root = (
FrontendManager.init_frontend(args.front_end_version)
if args.front_end_root is None
Expand Down
Loading