Update README.md
Browse files
README.md
CHANGED
|
@@ -28,7 +28,7 @@ Distilled 4-step and 8-step FLUX.1 models proposed in the paper:
|
|
| 28 |
[Sai Bi](https://sai-bi.github.io/)<sup>2</sup><br>
|
| 29 |
<sup>1</sup>Stanford University, <sup>2</sup>Adobe Research
|
| 30 |
<br>
|
| 31 |
-
[[arXiv](https://arxiv.org/abs/2510.14974)] [[Code](https://github.com/Lakonik/piFlow)] [[pi-Qwen Demo🤗](https://huggingface.co/spaces/Lakonik/pi-Qwen)] [[pi-FLUX Demo🤗](https://huggingface.co/spaces/Lakonik/pi-FLUX.1)]
|
| 32 |
|
| 33 |

|
| 34 |
|
|
@@ -42,8 +42,8 @@ We provide diffusers pipelines for easy inference. The following code demonstrat
|
|
| 42 |
Note: For the 8-NFE version, replace `gmflux_k8_piid_4step` with `gmflux_k8_piid_8step` and set `num_inference_steps=8`.
|
| 43 |
```python
|
| 44 |
import torch
|
| 45 |
-
from
|
| 46 |
-
from lakonlab.pipelines.
|
| 47 |
|
| 48 |
pipe = PiFluxPipeline.from_pretrained(
|
| 49 |
'black-forest-labs/FLUX.1-dev',
|
|
@@ -52,8 +52,8 @@ adapter_name = pipe.load_piflow_adapter( # you may later call `pipe.set_adapter
|
|
| 52 |
'Lakonik/pi-FLUX.1',
|
| 53 |
subfolder='gmflux_k8_piid_4step',
|
| 54 |
target_module_name='transformer')
|
| 55 |
-
pipe.scheduler =
|
| 56 |
-
pipe.scheduler.config, shift=3.2, use_dynamic_shifting=False)
|
| 57 |
pipe = pipe.to('cuda')
|
| 58 |
|
| 59 |
out = pipe(
|
|
@@ -71,8 +71,8 @@ out.save('gmflux_4nfe.png')
|
|
| 71 |
### 4-NFE DX-FLUX (DX Policy)
|
| 72 |
```
|
| 73 |
import torch
|
| 74 |
-
from
|
| 75 |
-
from lakonlab.pipelines.
|
| 76 |
|
| 77 |
pipe = PiFluxPipeline.from_pretrained(
|
| 78 |
'black-forest-labs/FLUX.1-dev',
|
|
@@ -85,8 +85,8 @@ adapter_name = pipe.load_piflow_adapter( # you may later call `pipe.set_adapter
|
|
| 85 |
'Lakonik/pi-FLUX.1',
|
| 86 |
subfolder='dxflux_n10_piid_4step',
|
| 87 |
target_module_name='transformer')
|
| 88 |
-
pipe.scheduler =
|
| 89 |
-
pipe.scheduler.config, shift=3.2, use_dynamic_shifting=False)
|
| 90 |
pipe = pipe.to('cuda')
|
| 91 |
|
| 92 |
out = pipe(
|
|
|
|
| 28 |
[Sai Bi](https://sai-bi.github.io/)<sup>2</sup><br>
|
| 29 |
<sup>1</sup>Stanford University, <sup>2</sup>Adobe Research
|
| 30 |
<br>
|
| 31 |
+
[[arXiv](https://arxiv.org/abs/2510.14974)] [[Code](https://github.com/Lakonik/piFlow)] [[pi-Qwen Demo🤗](https://huggingface.co/spaces/Lakonik/pi-Qwen)] [[pi-FLUX Demo🤗](https://huggingface.co/spaces/Lakonik/pi-FLUX.1)] [[pi-FLUX.2 Demo🤗](https://huggingface.co/spaces/Lakonik/pi-FLUX.2)]
|
| 32 |
|
| 33 |

|
| 34 |
|
|
|
|
| 42 |
Note: For the 8-NFE version, replace `gmflux_k8_piid_4step` with `gmflux_k8_piid_8step` and set `num_inference_steps=8`.
|
| 43 |
```python
|
| 44 |
import torch
|
| 45 |
+
from lakonlab.models.diffusions.schedulers import FlowMapSDEScheduler
|
| 46 |
+
from lakonlab.pipelines.pipeline_piflux import PiFluxPipeline
|
| 47 |
|
| 48 |
pipe = PiFluxPipeline.from_pretrained(
|
| 49 |
'black-forest-labs/FLUX.1-dev',
|
|
|
|
| 52 |
'Lakonik/pi-FLUX.1',
|
| 53 |
subfolder='gmflux_k8_piid_4step',
|
| 54 |
target_module_name='transformer')
|
| 55 |
+
pipe.scheduler = FlowMapSDEScheduler.from_config( # use fixed shift=3.2
|
| 56 |
+
pipe.scheduler.config, shift=3.2, use_dynamic_shifting=False, final_step_size_scale=0.5)
|
| 57 |
pipe = pipe.to('cuda')
|
| 58 |
|
| 59 |
out = pipe(
|
|
|
|
| 71 |
### 4-NFE DX-FLUX (DX Policy)
|
| 72 |
```
|
| 73 |
import torch
|
| 74 |
+
from lakonlab.models.diffusions.schedulers import FlowMapSDEScheduler
|
| 75 |
+
from lakonlab.pipelines.pipeline_piflux import PiFluxPipeline
|
| 76 |
|
| 77 |
pipe = PiFluxPipeline.from_pretrained(
|
| 78 |
'black-forest-labs/FLUX.1-dev',
|
|
|
|
| 85 |
'Lakonik/pi-FLUX.1',
|
| 86 |
subfolder='dxflux_n10_piid_4step',
|
| 87 |
target_module_name='transformer')
|
| 88 |
+
pipe.scheduler = FlowMapSDEScheduler.from_config( # use fixed shift=3.2
|
| 89 |
+
pipe.scheduler.config, shift=3.2, use_dynamic_shifting=False, final_step_size_scale=0.5)
|
| 90 |
pipe = pipe.to('cuda')
|
| 91 |
|
| 92 |
out = pipe(
|