Merge branch 'comfyanonymous:master' into fix/secure-combo

This commit is contained in:
Dr.Lt.Data 2023-08-15 09:24:45 +09:00 committed by GitHub
commit da3c09a704
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 21 additions and 13 deletions

View File

@ -2,6 +2,13 @@ name: "Windows Release cu118 dependencies 2"
on: on:
workflow_dispatch: workflow_dispatch:
inputs:
xformers:
description: 'xformers version'
required: true
type: string
default: "xformers"
# push: # push:
# branches: # branches:
# - master # - master
@ -17,7 +24,7 @@ jobs:
- shell: bash - shell: bash
run: | run: |
python -m pip wheel --no-cache-dir torch torchvision torchaudio xformers --extra-index-url https://download.pytorch.org/whl/cu118 -r requirements.txt pygit2 -w ./temp_wheel_dir python -m pip wheel --no-cache-dir torch torchvision torchaudio ${{ inputs.xformers }} --extra-index-url https://download.pytorch.org/whl/cu118 -r requirements.txt pygit2 -w ./temp_wheel_dir
python -m pip install --no-cache-dir ./temp_wheel_dir/* python -m pip install --no-cache-dir ./temp_wheel_dir/*
echo installed basic echo installed basic
ls -lah temp_wheel_dir ls -lah temp_wheel_dir

1
CODEOWNERS Normal file
View File

@ -0,0 +1 @@
* @comfyanonymous

View File

@ -24,8 +24,8 @@ class ClipVisionModel():
return self.model.load_state_dict(sd, strict=False) return self.model.load_state_dict(sd, strict=False)
def encode_image(self, image): def encode_image(self, image):
img = torch.clip((255. * image[0]), 0, 255).round().int() img = torch.clip((255. * image), 0, 255).round().int()
inputs = self.processor(images=[img], return_tensors="pt") inputs = self.processor(images=img, return_tensors="pt")
outputs = self.model(**inputs) outputs = self.model(**inputs)
return outputs return outputs

View File

@ -120,7 +120,7 @@ class SD21UNCLIP(BaseModel):
weights = [] weights = []
noise_aug = [] noise_aug = []
for unclip_cond in unclip_conditioning: for unclip_cond in unclip_conditioning:
adm_cond = unclip_cond["clip_vision_output"].image_embeds for adm_cond in unclip_cond["clip_vision_output"].image_embeds:
weight = unclip_cond["strength"] weight = unclip_cond["strength"]
noise_augment = unclip_cond["noise_augmentation"] noise_augment = unclip_cond["noise_augmentation"]
noise_level = round((self.noise_augmentor.max_noise_level - 1) * noise_augment) noise_level = round((self.noise_augmentor.max_noise_level - 1) * noise_augment)

View File

@ -771,7 +771,7 @@ class StyleModelApply:
CATEGORY = "conditioning/style_model" CATEGORY = "conditioning/style_model"
def apply_stylemodel(self, clip_vision_output, style_model, conditioning): def apply_stylemodel(self, clip_vision_output, style_model, conditioning):
cond = style_model.get_cond(clip_vision_output) cond = style_model.get_cond(clip_vision_output).flatten(start_dim=0, end_dim=1).unsqueeze(dim=0)
c = [] c = []
for t in conditioning: for t in conditioning:
n = [torch.cat((t[0], cond), dim=1), t[1].copy()] n = [torch.cat((t[0], cond), dim=1), t[1].copy()]