Spaces:
Running
on
Zero
Running
on
Zero
liuhuijie
commited on
Commit
·
ce02858
1
Parent(s):
fb124c7
update
Browse files- install.sh +1 -1
- models/model.py +0 -1
- models/pipe.py +2 -2
- src/lakonlab +1 -0
install.sh
CHANGED
|
@@ -4,4 +4,4 @@
|
|
| 4 |
pip install torch==2.1.1 torchvision==0.16.1 torchaudio==2.1.1 --extra-index-url https://download.pytorch.org/whl/cu121
|
| 5 |
|
| 6 |
# 第二步:安装其他依赖
|
| 7 |
-
pip install --no-cache-dir -r requirements.txt
|
|
|
|
| 4 |
pip install torch==2.1.1 torchvision==0.16.1 torchaudio==2.1.1 --extra-index-url https://download.pytorch.org/whl/cu121
|
| 5 |
|
| 6 |
# 第二步:安装其他依赖
|
| 7 |
+
pip install --no-cache-dir -r requirements.txt --no-deps
|
models/model.py
CHANGED
|
@@ -115,7 +115,6 @@ class StyleGenerator(Qwen2ForCausalLM):
|
|
| 115 |
logits = self.lm_head(hidden_states[:, slice_indices, :])
|
| 116 |
|
| 117 |
loss = None
|
| 118 |
-
# breakpoint()
|
| 119 |
if labels is not None:
|
| 120 |
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
|
| 121 |
coefficient = get_suppression_coefficient(code_freq, code_freq_threshold, k).to(logits.device)
|
|
|
|
| 115 |
logits = self.lm_head(hidden_states[:, slice_indices, :])
|
| 116 |
|
| 117 |
loss = None
|
|
|
|
| 118 |
if labels is not None:
|
| 119 |
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
|
| 120 |
coefficient = get_suppression_coefficient(code_freq, code_freq_threshold, k).to(logits.device)
|
models/pipe.py
CHANGED
|
@@ -30,7 +30,7 @@ import numpy as np
|
|
| 30 |
from PIL import Image
|
| 31 |
from .utils import retrieve_raw_timesteps
|
| 32 |
from .lakonlab.pipelines.piflow_loader import PiFlowLoaderMixin
|
| 33 |
-
|
| 34 |
from .lakonlab.models.diffusions.piflow_policies.dx import DXPolicy
|
| 35 |
from .lakonlab.models.diffusions.piflow_policies.gmflow import GMFlowPolicy
|
| 36 |
|
|
@@ -389,7 +389,7 @@ class CoTylePipeline(QwenImageEditPipeline):
|
|
| 389 |
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
|
| 390 |
timestep = t.expand(latents.shape[0]).to(latents.dtype)
|
| 391 |
with self.transformer.cache_context("cond"):
|
| 392 |
-
|
| 393 |
noise_pred = self.transformer(
|
| 394 |
hidden_states=latent_model_input.to(dtype=self.transformer.dtype),
|
| 395 |
timestep=timestep / 1000,
|
|
|
|
| 30 |
from PIL import Image
|
| 31 |
from .utils import retrieve_raw_timesteps
|
| 32 |
from .lakonlab.pipelines.piflow_loader import PiFlowLoaderMixin
|
| 33 |
+
|
| 34 |
from .lakonlab.models.diffusions.piflow_policies.dx import DXPolicy
|
| 35 |
from .lakonlab.models.diffusions.piflow_policies.gmflow import GMFlowPolicy
|
| 36 |
|
|
|
|
| 389 |
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
|
| 390 |
timestep = t.expand(latents.shape[0]).to(latents.dtype)
|
| 391 |
with self.transformer.cache_context("cond"):
|
| 392 |
+
|
| 393 |
noise_pred = self.transformer(
|
| 394 |
hidden_states=latent_model_input.to(dtype=self.transformer.dtype),
|
| 395 |
timestep=timestep / 1000,
|
src/lakonlab
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
Subproject commit b1ef16e5e305251bccdfeac2a0e3d0ef339b974a
|