fix(docs): correct typos in comments and strings found during code review

Non-functional changes only:
- Fixed minor spelling mistakes in comments
- Corrected typos in user-facing strings
No variables, logic, or functional code was modified.

Signed-off-by: Marcel Petrick <mail@marcelpetrick.it>
This commit is contained in:
Marcel Petrick 2025-10-01 15:59:09 +02:00
parent 638097829d
commit 1b0771357c
16 changed files with 19 additions and 19 deletions

View File

@ -24,7 +24,7 @@ class LogInterceptor(io.TextIOWrapper):
with self._lock: with self._lock:
self._logs_since_flush.append(entry) self._logs_since_flush.append(entry)
# Simple handling for cr to overwrite the last output if it isnt a full line # Simple handling for cr to overwrite the last output if it isn't a full line
# else logs just get full of progress messages # else logs just get full of progress messages
if isinstance(data, str) and data.startswith("\r") and not logs[-1]["m"].endswith("\n"): if isinstance(data, str) and data.startswith("\r") and not logs[-1]["m"].endswith("\n"):
logs.pop() logs.pop()

View File

@ -136,7 +136,7 @@ parser.add_argument("--force-non-blocking", action="store_true", help="Force Com
parser.add_argument("--default-hashing-function", type=str, choices=['md5', 'sha1', 'sha256', 'sha512'], default='sha256', help="Allows you to choose the hash function to use for duplicate filename / contents comparison. Default is sha256.") parser.add_argument("--default-hashing-function", type=str, choices=['md5', 'sha1', 'sha256', 'sha512'], default='sha256', help="Allows you to choose the hash function to use for duplicate filename / contents comparison. Default is sha256.")
parser.add_argument("--disable-smart-memory", action="store_true", help="Force ComfyUI to agressively offload to regular ram instead of keeping models in vram when it can.") parser.add_argument("--disable-smart-memory", action="store_true", help="Force ComfyUI to aggressively offload to regular ram instead of keeping models in vram when it can.")
parser.add_argument("--deterministic", action="store_true", help="Make pytorch use slower deterministic algorithms when it can. Note that this might not make images deterministic in all cases.") parser.add_argument("--deterministic", action="store_true", help="Make pytorch use slower deterministic algorithms when it can. Note that this might not make images deterministic in all cases.")
class PerformanceFeature(enum.Enum): class PerformanceFeature(enum.Enum):

View File

@ -133,7 +133,7 @@ class IndexListContextHandler(ContextHandlerABC):
if isinstance(cond_item, torch.Tensor): if isinstance(cond_item, torch.Tensor):
# check that tensor is the expected length - x.size(0) # check that tensor is the expected length - x.size(0)
if self.dim < cond_item.ndim and cond_item.size(self.dim) == x_in.size(self.dim): if self.dim < cond_item.ndim and cond_item.size(self.dim) == x_in.size(self.dim):
# if so, it's subsetting time - tell controls the expected indeces so they can handle them # if so, it's subsetting time - tell controls the expected indices so they can handle them
actual_cond_item = window.get_tensor(cond_item) actual_cond_item = window.get_tensor(cond_item)
resized_actual_cond[key] = actual_cond_item.to(device) resized_actual_cond[key] = actual_cond_item.to(device)
else: else:

View File

@ -93,7 +93,7 @@ class Hook:
self.hook_scope = hook_scope self.hook_scope = hook_scope
'''Scope of where this hook should apply in terms of the conds used in sampling run.''' '''Scope of where this hook should apply in terms of the conds used in sampling run.'''
self.custom_should_register = default_should_register self.custom_should_register = default_should_register
'''Can be overriden with a compatible function to decide if this hook should be registered without the need to override .should_register''' '''Can be overridden with a compatible function to decide if this hook should be registered without the need to override .should_register'''
@property @property
def strength(self): def strength(self):

View File

@ -136,11 +136,11 @@ class ConvolutionModule(nn.Module):
class PositionwiseFeedForward(torch.nn.Module): class PositionwiseFeedForward(torch.nn.Module):
"""Positionwise feed forward layer. """Positionwise feed forward layer.
FeedForward are appied on each position of the sequence. FeedForward are applied on each position of the sequence.
The output dim is same with the input dim. The output dim is same with the input dim.
Args: Args:
idim (int): Input dimenstion. idim (int): Input dimension.
hidden_units (int): The number of hidden units. hidden_units (int): The number of hidden units.
dropout_rate (float): Dropout rate. dropout_rate (float): Dropout rate.
activation (torch.nn.Module): Activation function activation (torch.nn.Module): Activation function
@ -758,7 +758,7 @@ class EspnetRelPositionalEncoding(torch.nn.Module):
if self.pe.dtype != x.dtype or self.pe.device != x.device: if self.pe.dtype != x.dtype or self.pe.device != x.device:
self.pe = self.pe.to(dtype=x.dtype, device=x.device) self.pe = self.pe.to(dtype=x.dtype, device=x.device)
return return
# Suppose `i` means to the position of query vecotr and `j` means the # Suppose `i` means to the position of query vector and `j` means the
# position of key vector. We use position relative positions when keys # position of key vector. We use position relative positions when keys
# are to the left (i>j) and negative relative positions otherwise (i<j). # are to the left (i>j) and negative relative positions otherwise (i<j).
pe_positive = torch.zeros(x.size(1), self.d_model) pe_positive = torch.zeros(x.size(1), self.d_model)

View File

@ -60,7 +60,7 @@ class VectorQuantize(nn.Module):
def __init__(self, embedding_size, k, ema_decay=0.99, ema_loss=False): def __init__(self, embedding_size, k, ema_decay=0.99, ema_loss=False):
""" """
Takes an input of variable size (as long as the last dimension matches the embedding size). Takes an input of variable size (as long as the last dimension matches the embedding size).
Returns one tensor containing the nearest neigbour embeddings to each of the inputs, Returns one tensor containing the nearest neighbour embeddings to each of the inputs,
with the same size as the input, vq and commitment components for the loss as a touple with the same size as the input, vq and commitment components for the loss as a touple
in the second output and the indices of the quantized vectors in the third: in the second output and the indices of the quantized vectors in the third:
quantized, (vq_loss, commit_loss), indices quantized, (vq_loss, commit_loss), indices

View File

@ -120,7 +120,7 @@ class CausalUpsample3d(nn.Module):
time_factor = time_factor.item() time_factor = time_factor.item()
x = x.repeat_interleave(int(time_factor), dim=2) x = x.repeat_interleave(int(time_factor), dim=2)
# TODO(freda): Check if this causes temporal inconsistency. # TODO(freda): Check if this causes temporal inconsistency.
# Shoule reverse the order of the following two ops, # Should reverse the order of the following two ops,
# better perf and better temporal smoothness. # better perf and better temporal smoothness.
x = self.conv(x) x = self.conv(x)
return x[..., int(time_factor - 1) :, :, :] return x[..., int(time_factor - 1) :, :, :]

View File

@ -178,7 +178,7 @@ class LearnablePosEmbAxis(VideoPositionEmb):
): ):
""" """
Args: Args:
interpolation (str): we curretly only support "crop", ideally when we need extrapolation capacity, we should adjust frequency or other more advanced methods. they are not implemented yet. interpolation (str): we currently only support "crop", ideally when we need extrapolation capacity, we should adjust frequency or other more advanced methods. they are not implemented yet.
""" """
del kwargs # unused del kwargs # unused
super().__init__() super().__init__()

View File

@ -118,7 +118,7 @@ class PointCrossAttention(nn.Module):
take the fourier embeddings for both input and query pc take the fourier embeddings for both input and query pc
Mental Note: FPS-sampled points (query_pc) act as latent tokens that attend to and learn from the broader context in input_pc. Mental Note: FPS-sampled points (query_pc) act as latent tokens that attend to and learn from the broader context in input_pc.
Goal: get a smaller represenation (query_pc) to represent the entire scence structure by learning from a broader subset (input_pc). Goal: get a smaller representation (query_pc) to represent the entire scence structure by learning from a broader subset (input_pc).
More computationally efficient. More computationally efficient.
Features are additional information for each point in the cloud Features are additional information for each point in the cloud
@ -193,7 +193,7 @@ class PointCrossAttention(nn.Module):
query = torch.cat([query, query_features], dim = -1) query = torch.cat([query, query_features], dim = -1)
data = torch.cat([data, input_features], dim = -1) data = torch.cat([data, input_features], dim = -1)
# don't return pc_info to avoid unnecessary memory usuage # don't return pc_info to avoid unnecessary memory usage
return query.view(B, -1, query.shape[-1]), data.view(B, -1, data.shape[-1]) return query.view(B, -1, query.shape[-1]), data.view(B, -1, data.shape[-1])
def forward(self, point_cloud: torch.Tensor, features: torch.Tensor): def forward(self, point_cloud: torch.Tensor, features: torch.Tensor):

View File

@ -24,7 +24,7 @@ def log_txt_as_img(wh, xc, size=10):
try: try:
draw.text((0, 0), lines, fill="black", font=font) draw.text((0, 0), lines, fill="black", font=font)
except UnicodeEncodeError: except UnicodeEncodeError:
logging.warning("Cant encode string for logging. Skipping.") logging.warning("Can't encode string for logging. Skipping.")
txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0 txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0
txts.append(txt) txts.append(txt)

View File

@ -527,7 +527,7 @@ class VAE:
self.latent_dim = 2 self.latent_dim = 2
self.output_channels = 3 self.output_channels = 3
else: else:
logging.warning("WARNING: No VAE weights detected, VAE not initalized.") logging.warning("WARNING: No VAE weights detected, VAE not initialized.")
self.first_stage_model = None self.first_stage_model = None
return return
else: else:

View File

@ -80,7 +80,7 @@ class ComfyExtension(ABC):
async def on_load(self) -> None: async def on_load(self) -> None:
""" """
Called when an extension is loaded. Called when an extension is loaded.
This should be used to initialize any global resources neeeded by the extension. This should be used to initialize any global resources needed by the extension.
""" """
@abstractmethod @abstractmethod

View File

@ -173,7 +173,7 @@ def is_valid_image_response(response: KlingVirtualTryOnResponse) -> bool:
def validate_prompts(prompt: str, negative_prompt: str, max_length: int) -> bool: def validate_prompts(prompt: str, negative_prompt: str, max_length: int) -> bool:
"""Verifies that the positive prompt is not empty and that neither promt is too long.""" """Verifies that the positive prompt is not empty and that neither prompt is too long."""
if not prompt: if not prompt:
raise ValueError("Positive prompt is empty") raise ValueError("Positive prompt is empty")
if len(prompt) > max_length: if len(prompt) > max_length:

View File

@ -89,7 +89,7 @@ def recraft_multipart_parser(data, parent_key=None, formatter: callable=None, co
The OpenAI client that Recraft uses has a bizarre way of serializing lists: The OpenAI client that Recraft uses has a bizarre way of serializing lists:
It does NOT keep track of indeces of each list, so for background_color, that must be serialized as: It does NOT keep track of indices of each list, so for background_color, that must be serialized as:
'background_color[rgb][]' = [0, 0, 255] 'background_color[rgb][]' = [0, 0, 255]
where the array is assigned to a key that has '[]' at the end, to signal it's an array. where the array is assigned to a key that has '[]' at the end, to signal it's an array.

View File

@ -114,7 +114,7 @@ def ray_condition(K, c2w, H, W, device):
rays_d = directions @ c2w[..., :3, :3].transpose(-1, -2) # B, V, 3, HW rays_d = directions @ c2w[..., :3, :3].transpose(-1, -2) # B, V, 3, HW
rays_o = c2w[..., :3, 3] # B, V, 3 rays_o = c2w[..., :3, 3] # B, V, 3
rays_o = rays_o[:, :, None].expand_as(rays_d) # B, V, 3, HW rays_o = rays_o[:, :, None].expand_as(rays_d) # B, V, 3, HW
# c2w @ dirctions # c2w @ directions
rays_dxo = torch.cross(rays_o, rays_d) rays_dxo = torch.cross(rays_o, rays_d)
plucker = torch.cat([rays_dxo, rays_d], dim=-1) plucker = torch.cat([rays_dxo, rays_d], dim=-1)
plucker = plucker.reshape(B, c2w.shape[1], H, W, 6) # B, V, H, W, 6 plucker = plucker.reshape(B, c2w.shape[1], H, W, 6) # B, V, H, W, 6

View File

@ -297,7 +297,7 @@ class TestDynamicDependencyCycle:
mix1 = g.node("TestLazyMixImages", image1=input1, mask=mask.out(0)) mix1 = g.node("TestLazyMixImages", image1=input1, mask=mask.out(0))
mix2 = g.node("TestLazyMixImages", image1=mix1.out(0), image2=input2, mask=mask.out(0)) mix2 = g.node("TestLazyMixImages", image1=mix1.out(0), image2=input2, mask=mask.out(0))
# Create the cyle # Create the cycle
mix1.set_input("image2", mix2.out(0)) mix1.set_input("image2", mix2.out(0))
return { return {