From 7074f3191d3e55f03533a9a7b6c07ab9931ececb Mon Sep 17 00:00:00 2001 From: doctorpangloss <@hiddenswitch.com> Date: Tue, 6 Aug 2024 21:57:57 -0700 Subject: [PATCH] Fix some relative path issues --- comfy/ldm/flux/model.py | 4 ++-- comfy/ldm/hydit/attn_layers.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy/ldm/flux/model.py b/comfy/ldm/flux/model.py index db6cf3d22..9124d697d 100644 --- a/comfy/ldm/flux/model.py +++ b/comfy/ldm/flux/model.py @@ -15,7 +15,7 @@ from .layers import ( ) from einops import rearrange, repeat -import comfy.ldm.common_dit +from .. import common_dit @dataclass class FluxParams: @@ -126,7 +126,7 @@ class Flux(nn.Module): def forward(self, x, timestep, context, y, guidance, **kwargs): bs, c, h, w = x.shape patch_size = 2 - x = comfy.ldm.common_dit.pad_to_patch_size(x, (patch_size, patch_size)) + x = common_dit.pad_to_patch_size(x, (patch_size, patch_size)) img = rearrange(x, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=patch_size, pw=patch_size) diff --git a/comfy/ldm/hydit/attn_layers.py b/comfy/ldm/hydit/attn_layers.py index 920b84286..bfc03bacc 100644 --- a/comfy/ldm/hydit/attn_layers.py +++ b/comfy/ldm/hydit/attn_layers.py @@ -1,7 +1,7 @@ import torch import torch.nn as nn from typing import Tuple, Union, Optional -from comfy.ldm.modules.attention import optimized_attention +from ..modules.attention import optimized_attention def reshape_for_broadcast(freqs_cis: Union[torch.Tensor, Tuple[torch.Tensor]], x: torch.Tensor, head_first=False):