Merge branch 'comfyanonymous:master' into feat/is_change_object_storage

This commit is contained in:
Dr.Lt.Data 2023-07-20 01:20:47 +09:00 committed by GitHub
commit 4516d88491
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 5 additions and 3 deletions

View File

@ -42,7 +42,7 @@ parser.add_argument("--auto-launch", action="store_true", help="Automatically la
parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.")
cm_group = parser.add_mutually_exclusive_group()
cm_group.add_argument("--cuda-malloc", action="store_true", help="Enable cudaMallocAsync (enabled by default for torch 2.0 and up).")
cm_group.add_argument("--disable-cuda-malloc", action="store_true", help="Enable cudaMallocAsync.")
cm_group.add_argument("--disable-cuda-malloc", action="store_true", help="Disable cudaMallocAsync.")
parser.add_argument("--dont-upcast-attention", action="store_true", help="Disable upcasting of attention. Can boost speed but increase the chances of black images.")

View File

@ -91,7 +91,9 @@ class DiscreteSchedule(nn.Module):
return log_sigma.exp()
def predict_eps_discrete_timestep(self, input, t, **kwargs):
sigma = self.t_to_sigma(t.round())
if t.dtype != torch.int64 and t.dtype != torch.int32:
t = t.round()
sigma = self.t_to_sigma(t)
input = input * ((utils.append_dims(sigma, input.ndim) ** 2 + 1.0) ** 0.5)
return (input - self(input, sigma, **kwargs)) / utils.append_dims(sigma, input.ndim)

View File

@ -529,7 +529,7 @@ def should_use_fp16(device=None, model_params=0):
return False
#FP16 is just broken on these cards
nvidia_16_series = ["1660", "1650", "1630", "T500", "T550", "T600"]
nvidia_16_series = ["1660", "1650", "1630", "T500", "T550", "T600", "MX550", "MX450"]
for x in nvidia_16_series:
if x in props.name:
return False