Skip to content

Commit

Permalink
Remove omegaconf dependency and some ci changes.
Browse files Browse the repository at this point in the history
  • Loading branch information
comfyanonymous committed Mar 13, 2023
1 parent 8066508 commit 54dbfaf
Show file tree
Hide file tree
Showing 8 changed files with 26 additions and 22 deletions.
5 changes: 3 additions & 2 deletions .ci/setup_windows_zip_nightly_pytorch.ps1
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,12 @@ cd python_embeded
Add-Content -Path .\python310._pth -Value 'import site'
Invoke-WebRequest -Uri https://bootstrap.pypa.io/get-pip.py -OutFile get-pip.py
.\python.exe get-pip.py
.\python.exe -s -m pip install torch torchvision torchaudio --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu118 -r ../ComfyUI/requirements.txt pygit2
python -m pip wheel torch torchvision torchaudio --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu118 -r ../ComfyUI/requirements.txt pygit2 -w ../temp_wheel_dir
ls ../temp_wheel_dir
.\python.exe -s -m pip install --pre (get-item ..\temp_wheel_dir\*)
"../ComfyUI`n" + (Get-Content .\python310._pth -Raw) | Set-Content .\python310._pth
cd ..


mkdir ComfyUI_windows_portable
mv python_embeded ComfyUI_windows_portable_nightly_pytorch
mv ComfyUI_copy ComfyUI_windows_portable_nightly_pytorch/ComfyUI
Expand Down
13 changes: 7 additions & 6 deletions .github/workflows/windows_release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,11 @@ jobs:
.\setup_windows_zip.ps1
ls
- uses: "marvinpinto/action-automatic-releases@latest"
- name: Upload binaries to release
uses: svenstaro/upload-release-action@v2
with:
repo_token: "${{ secrets.GITHUB_TOKEN }}"
automatic_release_tag: "latest"
prerelease: true
title: "ComfyUI Standalone Portable Windows Build (For NVIDIA or CPU only)"
files: ComfyUI_windows_portable_nvidia_or_cpu.7z
repo_token: ${{ secrets.GITHUB_TOKEN }}
file: ComfyUI_windows_portable_nvidia_or_cpu.7z
tag: "latest"
overwrite: true

4 changes: 3 additions & 1 deletion .github/workflows/windows_release_nightly_pytorch.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,9 @@ jobs:
- uses: actions/checkout@v3
with:
fetch-depth: 0

- uses: actions/setup-python@v4
with:
python-version: '3.10.9'
- run: |
cd ..
cp ComfyUI/.ci/setup_windows_zip_nightly_pytorch.ps1 ./
Expand Down
6 changes: 3 additions & 3 deletions comfy/cldm/cldm.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,9 @@ def __init__(

if context_dim is not None:
assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
from omegaconf.listconfig import ListConfig
if type(context_dim) == ListConfig:
context_dim = list(context_dim)
# from omegaconf.listconfig import ListConfig
# if type(context_dim) == ListConfig:
# context_dim = list(context_dim)

if num_heads_upsample == -1:
num_heads_upsample = num_heads
Expand Down
5 changes: 2 additions & 3 deletions comfy/ldm/models/diffusion/ddpm.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
from tqdm import tqdm
from torchvision.utils import make_grid
# from pytorch_lightning.utilities.distributed import rank_zero_only
from omegaconf import ListConfig

from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
from ldm.modules.ema import LitEma
Expand Down Expand Up @@ -1124,8 +1123,8 @@ def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):
def get_unconditional_conditioning(self, batch_size, null_label=None):
if null_label is not None:
xc = null_label
if isinstance(xc, ListConfig):
xc = list(xc)
# if isinstance(xc, ListConfig):
# xc = list(xc)
if isinstance(xc, dict) or isinstance(xc, list):
c = self.get_learned_conditioning(xc)
else:
Expand Down
6 changes: 3 additions & 3 deletions comfy/ldm/modules/diffusionmodules/openaimodel.py
Original file line number Diff line number Diff line change
Expand Up @@ -477,9 +477,9 @@ def __init__(

if context_dim is not None:
assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
from omegaconf.listconfig import ListConfig
if type(context_dim) == ListConfig:
context_dim = list(context_dim)
# from omegaconf.listconfig import ListConfig
# if type(context_dim) == ListConfig:
# context_dim = list(context_dim)

if num_heads_upsample == -1:
num_heads_upsample = num_heads
Expand Down
7 changes: 4 additions & 3 deletions comfy/sd.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import model_management
from .ldm.util import instantiate_from_config
from .ldm.models.autoencoder import AutoencoderKL
from omegaconf import OmegaConf
import yaml
from .cldm import cldm
from .t2i_adapter import adapter

Expand Down Expand Up @@ -726,7 +726,8 @@ def load_clip(ckpt_path, embedding_directory=None):
return clip

def load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=None):
config = OmegaConf.load(config_path)
with open(config_path, 'r') as stream:
config = yaml.safe_load(stream)
model_config_params = config['model']['params']
clip_config = model_config_params['cond_stage_config']
scale_factor = model_config_params['scale_factor']
Expand All @@ -750,7 +751,7 @@ class WeightsLoader(torch.nn.Module):
w.cond_stage_model = clip.cond_stage_model
load_state_dict_to = [w]

model = instantiate_from_config(config.model)
model = instantiate_from_config(config["model"])
sd = load_torch_file(ckpt_path)
model = load_model_weights(model, sd, verbose=False, load_state_dict_to=load_state_dict_to)
return (ModelPatcher(model), clip, vae)
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
torch
torchdiffeq
torchsde
omegaconf
einops
open-clip-torch
transformers
safetensors
pytorch_lightning
aiohttp
accelerate
pyyaml

0 comments on commit 54dbfaf

Please sign in to comment.