End of training
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- README.md +43 -0
- checkpoint-1000/model.safetensors +3 -0
- checkpoint-1000/optimizer.bin +3 -0
- checkpoint-1000/pytorch_lora_weights.safetensors +3 -0
- checkpoint-1000/random_states_0.pkl +3 -0
- checkpoint-1000/scheduler.bin +3 -0
- checkpoint-2000/model.safetensors +3 -0
- checkpoint-2000/optimizer.bin +3 -0
- checkpoint-2000/pytorch_lora_weights.safetensors +3 -0
- checkpoint-2000/random_states_0.pkl +3 -0
- checkpoint-2000/scheduler.bin +3 -0
- checkpoint-3000/model.safetensors +3 -0
- checkpoint-3000/optimizer.bin +3 -0
- checkpoint-3000/pytorch_lora_weights.safetensors +3 -0
- checkpoint-3000/random_states_0.pkl +3 -0
- checkpoint-3000/scheduler.bin +3 -0
- diffusers/.github/ISSUE_TEMPLATE/bug-report.yml +110 -0
- diffusers/.github/ISSUE_TEMPLATE/config.yml +4 -0
- diffusers/.github/ISSUE_TEMPLATE/feature_request.md +20 -0
- diffusers/.github/ISSUE_TEMPLATE/feedback.md +12 -0
- diffusers/.github/ISSUE_TEMPLATE/new-model-addition.yml +31 -0
- diffusers/.github/ISSUE_TEMPLATE/translate.md +29 -0
- diffusers/.github/PULL_REQUEST_TEMPLATE.md +61 -0
- diffusers/.github/actions/setup-miniconda/action.yml +146 -0
- diffusers/.github/workflows/benchmark.yml +67 -0
- diffusers/.github/workflows/build_docker_images.yml +104 -0
- diffusers/.github/workflows/build_documentation.yml +27 -0
- diffusers/.github/workflows/build_pr_documentation.yml +23 -0
- diffusers/.github/workflows/mirror_community_pipeline.yml +102 -0
- diffusers/.github/workflows/nightly_tests.yml +584 -0
- diffusers/.github/workflows/notify_slack_about_release.yml +23 -0
- diffusers/.github/workflows/pr_dependency_test.yml +35 -0
- diffusers/.github/workflows/pr_flax_dependency_test.yml +38 -0
- diffusers/.github/workflows/pr_test_fetcher.yml +177 -0
- diffusers/.github/workflows/pr_tests.yml +301 -0
- diffusers/.github/workflows/pr_torch_dependency_test.yml +36 -0
- diffusers/.github/workflows/push_tests.yml +392 -0
- diffusers/.github/workflows/push_tests_fast.yml +126 -0
- diffusers/.github/workflows/push_tests_mps.yml +76 -0
- diffusers/.github/workflows/pypi_publish.yaml +81 -0
- diffusers/.github/workflows/release_tests_fast.yml +446 -0
- diffusers/.github/workflows/run_tests_from_a_pr.yml +74 -0
- diffusers/.github/workflows/ssh-pr-runner.yml +40 -0
- diffusers/.github/workflows/ssh-runner.yml +52 -0
- diffusers/.github/workflows/stale.yml +30 -0
- diffusers/.github/workflows/trufflehog.yml +15 -0
- diffusers/.github/workflows/typos.yml +14 -0
- diffusers/.github/workflows/update_metadata.yml +30 -0
- diffusers/.github/workflows/upload_pr_documentation.yml +16 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
diffusers/examples/research_projects/gligen/generated-images-100000-00.png filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
base_model: stabilityai/stable-diffusion-2
|
3 |
+
library_name: diffusers
|
4 |
+
license: creativeml-openrail-m
|
5 |
+
inference: true
|
6 |
+
tags:
|
7 |
+
- stable-diffusion
|
8 |
+
- stable-diffusion-diffusers
|
9 |
+
- text-to-image
|
10 |
+
- diffusers
|
11 |
+
- diffusers-training
|
12 |
+
- lora
|
13 |
+
---
|
14 |
+
|
15 |
+
<!-- This model card has been generated automatically according to the information the training script had access to. You
|
16 |
+
should probably proofread and complete it, then remove this comment. -->
|
17 |
+
|
18 |
+
|
19 |
+
# LoRA text2image fine-tuning - ButterChicken98/plantVillage-stableDiffusion-2-iter2_with_one_caption
|
20 |
+
These are LoRA adaption weights for stabilityai/stable-diffusion-2. The weights were fine-tuned on the ButterChicken98/plantvillage-image-text-pairs dataset. You can find some example images in the following.
|
21 |
+
|
22 |
+
![img_0](./image_0.png)
|
23 |
+
![img_1](./image_1.png)
|
24 |
+
![img_2](./image_2.png)
|
25 |
+
![img_3](./image_3.png)
|
26 |
+
|
27 |
+
|
28 |
+
|
29 |
+
## Intended uses & limitations
|
30 |
+
|
31 |
+
#### How to use
|
32 |
+
|
33 |
+
```python
|
34 |
+
# TODO: add an example code snippet for running this diffusion pipeline
|
35 |
+
```
|
36 |
+
|
37 |
+
#### Limitations and bias
|
38 |
+
|
39 |
+
[TODO: provide examples of latent issues and potential remediations]
|
40 |
+
|
41 |
+
## Training details
|
42 |
+
|
43 |
+
[TODO: describe the data used to train the model]
|
checkpoint-1000/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9b872fcaec06ed79b7cb015a4e6fa8dfcab7a2a7b756eccbe823fb620e81fd00
|
3 |
+
size 3467087168
|
checkpoint-1000/optimizer.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e462a3e41f6d8db2c263dcb08e79ed76b5126119eab3fd6ce2af296710823b80
|
3 |
+
size 6854266
|
checkpoint-1000/pytorch_lora_weights.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:36424c4835a86ff41e337e164f8c7f0e01bc676ca15ef5087c096d712b46b70d
|
3 |
+
size 3357296
|
checkpoint-1000/random_states_0.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c5ed7dd35643b601e4cf3b1a6f5e1c54bbe1816ad9938ccd6d4104554fb0da6b
|
3 |
+
size 14668
|
checkpoint-1000/scheduler.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:545821dc5493a874470abe7bed7760d7d04473241c9b6fb7974da5de059be251
|
3 |
+
size 1000
|
checkpoint-2000/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5159c00be78feed169406e1fda4743d2d2201532be7eaf72f5ead762b05bd1e8
|
3 |
+
size 3467087168
|
checkpoint-2000/optimizer.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:32e1e156f0c7e9a0f05c1b0252d3fc151824ceb853b37be5ee4f10da7475a854
|
3 |
+
size 6854266
|
checkpoint-2000/pytorch_lora_weights.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a55753d7f77e39f946299483e14a2cc7511fa96ac5eb148e459e5770e558883f
|
3 |
+
size 3357296
|
checkpoint-2000/random_states_0.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a06234a8af40b17c754d46ac6c649fc7e07f77607e2fe4a09396dc7954c3ef5b
|
3 |
+
size 14668
|
checkpoint-2000/scheduler.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a139f59b5b69af0cda7ebfd38ce170a908e8c9c4a46f24561b5c7c8e15856f7a
|
3 |
+
size 1000
|
checkpoint-3000/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:37cf0ef580d4295f6be06d8c7823b3437c38fe64225ea5bb5a0a44351a07e37e
|
3 |
+
size 3467087168
|
checkpoint-3000/optimizer.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f2f574532020933d2be6acd17d7896e94c6c62e283c6614b821cc3bb889eaf6a
|
3 |
+
size 6854266
|
checkpoint-3000/pytorch_lora_weights.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7ac675d7f72375e781d0c6c0ee5792ccb2facbe5af7b67f38eda146b1b2fcdef
|
3 |
+
size 3357296
|
checkpoint-3000/random_states_0.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:04eff2137eaf5e3d12fafda3820d05351794a8e275102455bfd3625bcdecab1d
|
3 |
+
size 14668
|
checkpoint-3000/scheduler.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:aa9aaa747f8bfe7bbd402fd01ce71c8b4e9bef64d71e2dae6c447447226609ab
|
3 |
+
size 1000
|
diffusers/.github/ISSUE_TEMPLATE/bug-report.yml
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "\U0001F41B Bug Report"
|
2 |
+
description: Report a bug on Diffusers
|
3 |
+
labels: [ "bug" ]
|
4 |
+
body:
|
5 |
+
- type: markdown
|
6 |
+
attributes:
|
7 |
+
value: |
|
8 |
+
Thanks a lot for taking the time to file this issue 🤗.
|
9 |
+
Issues do not only help to improve the library, but also publicly document common problems, questions, workflows for the whole community!
|
10 |
+
Thus, issues are of the same importance as pull requests when contributing to this library ❤️.
|
11 |
+
In order to make your issue as **useful for the community as possible**, let's try to stick to some simple guidelines:
|
12 |
+
- 1. Please try to be as precise and concise as possible.
|
13 |
+
*Give your issue a fitting title. Assume that someone which very limited knowledge of Diffusers can understand your issue. Add links to the source code, documentation other issues, pull requests etc...*
|
14 |
+
- 2. If your issue is about something not working, **always** provide a reproducible code snippet. The reader should be able to reproduce your issue by **only copy-pasting your code snippet into a Python shell**.
|
15 |
+
*The community cannot solve your issue if it cannot reproduce it. If your bug is related to training, add your training script and make everything needed to train public. Otherwise, just add a simple Python code snippet.*
|
16 |
+
- 3. Add the **minimum** amount of code / context that is needed to understand, reproduce your issue.
|
17 |
+
*Make the life of maintainers easy. `diffusers` is getting many issues every day. Make sure your issue is about one bug and one bug only. Make sure you add only the context, code needed to understand your issues - nothing more. Generally, every issue is a way of documenting this library, try to make it a good documentation entry.*
|
18 |
+
- 4. For issues related to community pipelines (i.e., the pipelines located in the `examples/community` folder), please tag the author of the pipeline in your issue thread as those pipelines are not maintained.
|
19 |
+
- type: markdown
|
20 |
+
attributes:
|
21 |
+
value: |
|
22 |
+
For more in-detail information on how to write good issues you can have a look [here](https://huggingface.co/course/chapter8/5?fw=pt).
|
23 |
+
- type: textarea
|
24 |
+
id: bug-description
|
25 |
+
attributes:
|
26 |
+
label: Describe the bug
|
27 |
+
description: A clear and concise description of what the bug is. If you intend to submit a pull request for this issue, tell us in the description. Thanks!
|
28 |
+
placeholder: Bug description
|
29 |
+
validations:
|
30 |
+
required: true
|
31 |
+
- type: textarea
|
32 |
+
id: reproduction
|
33 |
+
attributes:
|
34 |
+
label: Reproduction
|
35 |
+
description: Please provide a minimal reproducible code which we can copy/paste and reproduce the issue.
|
36 |
+
placeholder: Reproduction
|
37 |
+
validations:
|
38 |
+
required: true
|
39 |
+
- type: textarea
|
40 |
+
id: logs
|
41 |
+
attributes:
|
42 |
+
label: Logs
|
43 |
+
description: "Please include the Python logs if you can."
|
44 |
+
render: shell
|
45 |
+
- type: textarea
|
46 |
+
id: system-info
|
47 |
+
attributes:
|
48 |
+
label: System Info
|
49 |
+
description: Please share your system info with us. You can run the command `diffusers-cli env` and copy-paste its output below.
|
50 |
+
placeholder: Diffusers version, platform, Python version, ...
|
51 |
+
validations:
|
52 |
+
required: true
|
53 |
+
- type: textarea
|
54 |
+
id: who-can-help
|
55 |
+
attributes:
|
56 |
+
label: Who can help?
|
57 |
+
description: |
|
58 |
+
Your issue will be replied to more quickly if you can figure out the right person to tag with @.
|
59 |
+
If you know how to use git blame, that is the easiest way, otherwise, here is a rough guide of **who to tag**.
|
60 |
+
|
61 |
+
All issues are read by one of the core maintainers, so if you don't know who to tag, just leave this blank and
|
62 |
+
a core maintainer will ping the right person.
|
63 |
+
|
64 |
+
Please tag a maximum of 2 people.
|
65 |
+
|
66 |
+
Questions on DiffusionPipeline (Saving, Loading, From pretrained, ...): @sayakpaul @DN6
|
67 |
+
|
68 |
+
Questions on pipelines:
|
69 |
+
- Stable Diffusion @yiyixuxu @asomoza
|
70 |
+
- Stable Diffusion XL @yiyixuxu @sayakpaul @DN6
|
71 |
+
- Stable Diffusion 3: @yiyixuxu @sayakpaul @DN6 @asomoza
|
72 |
+
- Kandinsky @yiyixuxu
|
73 |
+
- ControlNet @sayakpaul @yiyixuxu @DN6
|
74 |
+
- T2I Adapter @sayakpaul @yiyixuxu @DN6
|
75 |
+
- IF @DN6
|
76 |
+
- Text-to-Video / Video-to-Video @DN6 @a-r-r-o-w
|
77 |
+
- Wuerstchen @DN6
|
78 |
+
- Other: @yiyixuxu @DN6
|
79 |
+
- Improving generation quality: @asomoza
|
80 |
+
|
81 |
+
Questions on models:
|
82 |
+
- UNet @DN6 @yiyixuxu @sayakpaul
|
83 |
+
- VAE @sayakpaul @DN6 @yiyixuxu
|
84 |
+
- Transformers/Attention @DN6 @yiyixuxu @sayakpaul
|
85 |
+
|
86 |
+
Questions on single file checkpoints: @DN6
|
87 |
+
|
88 |
+
Questions on Schedulers: @yiyixuxu
|
89 |
+
|
90 |
+
Questions on LoRA: @sayakpaul
|
91 |
+
|
92 |
+
Questions on Textual Inversion: @sayakpaul
|
93 |
+
|
94 |
+
Questions on Training:
|
95 |
+
- DreamBooth @sayakpaul
|
96 |
+
- Text-to-Image Fine-tuning @sayakpaul
|
97 |
+
- Textual Inversion @sayakpaul
|
98 |
+
- ControlNet @sayakpaul
|
99 |
+
|
100 |
+
Questions on Tests: @DN6 @sayakpaul @yiyixuxu
|
101 |
+
|
102 |
+
Questions on Documentation: @stevhliu
|
103 |
+
|
104 |
+
Questions on JAX- and MPS-related things: @pcuenca
|
105 |
+
|
106 |
+
Questions on audio pipelines: @sanchit-gandhi
|
107 |
+
|
108 |
+
|
109 |
+
|
110 |
+
placeholder: "@Username ..."
|
diffusers/.github/ISSUE_TEMPLATE/config.yml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
contact_links:
|
2 |
+
- name: Questions / Discussions
|
3 |
+
url: https://github.com/huggingface/diffusers/discussions
|
4 |
+
about: General usage questions and community discussions
|
diffusers/.github/ISSUE_TEMPLATE/feature_request.md
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
name: "\U0001F680 Feature Request"
|
3 |
+
about: Suggest an idea for this project
|
4 |
+
title: ''
|
5 |
+
labels: ''
|
6 |
+
assignees: ''
|
7 |
+
|
8 |
+
---
|
9 |
+
|
10 |
+
**Is your feature request related to a problem? Please describe.**
|
11 |
+
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...].
|
12 |
+
|
13 |
+
**Describe the solution you'd like.**
|
14 |
+
A clear and concise description of what you want to happen.
|
15 |
+
|
16 |
+
**Describe alternatives you've considered.**
|
17 |
+
A clear and concise description of any alternative solutions or features you've considered.
|
18 |
+
|
19 |
+
**Additional context.**
|
20 |
+
Add any other context or screenshots about the feature request here.
|
diffusers/.github/ISSUE_TEMPLATE/feedback.md
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
name: "💬 Feedback about API Design"
|
3 |
+
about: Give feedback about the current API design
|
4 |
+
title: ''
|
5 |
+
labels: ''
|
6 |
+
assignees: ''
|
7 |
+
|
8 |
+
---
|
9 |
+
|
10 |
+
**What API design would you like to have changed or added to the library? Why?**
|
11 |
+
|
12 |
+
**What use case would this enable or better enable? Can you give us a code example?**
|
diffusers/.github/ISSUE_TEMPLATE/new-model-addition.yml
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "\U0001F31F New Model/Pipeline/Scheduler Addition"
|
2 |
+
description: Submit a proposal/request to implement a new diffusion model/pipeline/scheduler
|
3 |
+
labels: [ "New model/pipeline/scheduler" ]
|
4 |
+
|
5 |
+
body:
|
6 |
+
- type: textarea
|
7 |
+
id: description-request
|
8 |
+
validations:
|
9 |
+
required: true
|
10 |
+
attributes:
|
11 |
+
label: Model/Pipeline/Scheduler description
|
12 |
+
description: |
|
13 |
+
Put any and all important information relative to the model/pipeline/scheduler
|
14 |
+
|
15 |
+
- type: checkboxes
|
16 |
+
id: information-tasks
|
17 |
+
attributes:
|
18 |
+
label: Open source status
|
19 |
+
description: |
|
20 |
+
Please note that if the model implementation isn't available or if the weights aren't open-source, we are less likely to implement it in `diffusers`.
|
21 |
+
options:
|
22 |
+
- label: "The model implementation is available."
|
23 |
+
- label: "The model weights are available (Only relevant if addition is not a scheduler)."
|
24 |
+
|
25 |
+
- type: textarea
|
26 |
+
id: additional-info
|
27 |
+
attributes:
|
28 |
+
label: Provide useful links for the implementation
|
29 |
+
description: |
|
30 |
+
Please provide information regarding the implementation, the weights, and the authors.
|
31 |
+
Please mention the authors by @gh-username if you're aware of their usernames.
|
diffusers/.github/ISSUE_TEMPLATE/translate.md
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
name: 🌐 Translating a New Language?
|
3 |
+
about: Start a new translation effort in your language
|
4 |
+
title: '[<languageCode>] Translating docs to <languageName>'
|
5 |
+
labels: WIP
|
6 |
+
assignees: ''
|
7 |
+
|
8 |
+
---
|
9 |
+
|
10 |
+
<!--
|
11 |
+
Note: Please search to see if an issue already exists for the language you are trying to translate.
|
12 |
+
-->
|
13 |
+
|
14 |
+
Hi!
|
15 |
+
|
16 |
+
Let's bring the documentation to all the <languageName>-speaking community 🌐.
|
17 |
+
|
18 |
+
Who would want to translate? Please follow the 🤗 [TRANSLATING guide](https://github.com/huggingface/diffusers/blob/main/docs/TRANSLATING.md). Here is a list of the files ready for translation. Let us know in this issue if you'd like to translate any, and we'll add your name to the list.
|
19 |
+
|
20 |
+
Some notes:
|
21 |
+
|
22 |
+
* Please translate using an informal tone (imagine you are talking with a friend about Diffusers 🤗).
|
23 |
+
* Please translate in a gender-neutral way.
|
24 |
+
* Add your translations to the folder called `<languageCode>` inside the [source folder](https://github.com/huggingface/diffusers/tree/main/docs/source).
|
25 |
+
* Register your translation in `<languageCode>/_toctree.yml`; please follow the order of the [English version](https://github.com/huggingface/diffusers/blob/main/docs/source/en/_toctree.yml).
|
26 |
+
* Once you're finished, open a pull request and tag this issue by including #issue-number in the description, where issue-number is the number of this issue. Please ping @stevhliu for review.
|
27 |
+
* 🙋 If you'd like others to help you with the translation, you can also post in the 🤗 [forums](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63).
|
28 |
+
|
29 |
+
Thank you so much for your help! 🤗
|
diffusers/.github/PULL_REQUEST_TEMPLATE.md
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# What does this PR do?
|
2 |
+
|
3 |
+
<!--
|
4 |
+
Congratulations! You've made it this far! You're not quite done yet though.
|
5 |
+
|
6 |
+
Once merged, your PR is going to appear in the release notes with the title you set, so make sure it's a great title that fully reflects the extent of your awesome contribution.
|
7 |
+
|
8 |
+
Then, please replace this with a description of the change and which issue is fixed (if applicable). Please also include relevant motivation and context. List any dependencies (if any) that are required for this change.
|
9 |
+
|
10 |
+
Once you're done, someone will review your PR shortly (see the section "Who can review?" below to tag some potential reviewers). They may suggest changes to make the code even better. If no one reviewed your PR after a week has passed, don't hesitate to post a new comment @-mentioning the same persons---sometimes notifications get lost.
|
11 |
+
-->
|
12 |
+
|
13 |
+
<!-- Remove if not applicable -->
|
14 |
+
|
15 |
+
Fixes # (issue)
|
16 |
+
|
17 |
+
|
18 |
+
## Before submitting
|
19 |
+
- [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case).
|
20 |
+
- [ ] Did you read the [contributor guideline](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md)?
|
21 |
+
- [ ] Did you read our [philosophy doc](https://github.com/huggingface/diffusers/blob/main/PHILOSOPHY.md) (important for complex PRs)?
|
22 |
+
- [ ] Was this discussed/approved via a GitHub issue or the [forum](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63)? Please add a link to it if that's the case.
|
23 |
+
- [ ] Did you make sure to update the documentation with your changes? Here are the
|
24 |
+
[documentation guidelines](https://github.com/huggingface/diffusers/tree/main/docs), and
|
25 |
+
[here are tips on formatting docstrings](https://github.com/huggingface/diffusers/tree/main/docs#writing-source-documentation).
|
26 |
+
- [ ] Did you write any new necessary tests?
|
27 |
+
|
28 |
+
|
29 |
+
## Who can review?
|
30 |
+
|
31 |
+
Anyone in the community is free to review the PR once the tests have passed. Feel free to tag
|
32 |
+
members/contributors who may be interested in your PR.
|
33 |
+
|
34 |
+
<!-- Your PR will be replied to more quickly if you can figure out the right person to tag with @.
|
35 |
+
|
36 |
+
If you know how to use git blame, that is the easiest way, otherwise, here is a rough guide of **who to tag**.
|
37 |
+
Please tag fewer than 3 people.
|
38 |
+
|
39 |
+
Core library:
|
40 |
+
|
41 |
+
- Schedulers: @yiyixuxu
|
42 |
+
- Pipelines and pipeline callbacks: @yiyixuxu and @asomoza
|
43 |
+
- Training examples: @sayakpaul
|
44 |
+
- Docs: @stevhliu and @sayakpaul
|
45 |
+
- JAX and MPS: @pcuenca
|
46 |
+
- Audio: @sanchit-gandhi
|
47 |
+
- General functionalities: @sayakpaul @yiyixuxu @DN6
|
48 |
+
|
49 |
+
Integrations:
|
50 |
+
|
51 |
+
- deepspeed: HF Trainer/Accelerate: @SunMarc
|
52 |
+
- PEFT: @sayakpaul @BenjaminBossan
|
53 |
+
|
54 |
+
HF projects:
|
55 |
+
|
56 |
+
- accelerate: [different repo](https://github.com/huggingface/accelerate)
|
57 |
+
- datasets: [different repo](https://github.com/huggingface/datasets)
|
58 |
+
- transformers: [different repo](https://github.com/huggingface/transformers)
|
59 |
+
- safetensors: [different repo](https://github.com/huggingface/safetensors)
|
60 |
+
|
61 |
+
-->
|
diffusers/.github/actions/setup-miniconda/action.yml
ADDED
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Set up conda environment for testing
|
2 |
+
|
3 |
+
description: Sets up miniconda in your ${RUNNER_TEMP} environment and gives you the ${CONDA_RUN} environment variable so you don't have to worry about polluting non-empeheral runners anymore
|
4 |
+
|
5 |
+
inputs:
|
6 |
+
python-version:
|
7 |
+
description: If set to any value, don't use sudo to clean the workspace
|
8 |
+
required: false
|
9 |
+
type: string
|
10 |
+
default: "3.9"
|
11 |
+
miniconda-version:
|
12 |
+
description: Miniconda version to install
|
13 |
+
required: false
|
14 |
+
type: string
|
15 |
+
default: "4.12.0"
|
16 |
+
environment-file:
|
17 |
+
description: Environment file to install dependencies from
|
18 |
+
required: false
|
19 |
+
type: string
|
20 |
+
default: ""
|
21 |
+
|
22 |
+
runs:
|
23 |
+
using: composite
|
24 |
+
steps:
|
25 |
+
# Use the same trick from https://github.com/marketplace/actions/setup-miniconda
|
26 |
+
# to refresh the cache daily. This is kind of optional though
|
27 |
+
- name: Get date
|
28 |
+
id: get-date
|
29 |
+
shell: bash
|
30 |
+
run: echo "today=$(/bin/date -u '+%Y%m%d')d" >> $GITHUB_OUTPUT
|
31 |
+
- name: Setup miniconda cache
|
32 |
+
id: miniconda-cache
|
33 |
+
uses: actions/cache@v2
|
34 |
+
with:
|
35 |
+
path: ${{ runner.temp }}/miniconda
|
36 |
+
key: miniconda-${{ runner.os }}-${{ runner.arch }}-${{ inputs.python-version }}-${{ steps.get-date.outputs.today }}
|
37 |
+
- name: Install miniconda (${{ inputs.miniconda-version }})
|
38 |
+
if: steps.miniconda-cache.outputs.cache-hit != 'true'
|
39 |
+
env:
|
40 |
+
MINICONDA_VERSION: ${{ inputs.miniconda-version }}
|
41 |
+
shell: bash -l {0}
|
42 |
+
run: |
|
43 |
+
MINICONDA_INSTALL_PATH="${RUNNER_TEMP}/miniconda"
|
44 |
+
mkdir -p "${MINICONDA_INSTALL_PATH}"
|
45 |
+
case ${RUNNER_OS}-${RUNNER_ARCH} in
|
46 |
+
Linux-X64)
|
47 |
+
MINICONDA_ARCH="Linux-x86_64"
|
48 |
+
;;
|
49 |
+
macOS-ARM64)
|
50 |
+
MINICONDA_ARCH="MacOSX-arm64"
|
51 |
+
;;
|
52 |
+
macOS-X64)
|
53 |
+
MINICONDA_ARCH="MacOSX-x86_64"
|
54 |
+
;;
|
55 |
+
*)
|
56 |
+
echo "::error::Platform ${RUNNER_OS}-${RUNNER_ARCH} currently unsupported using this action"
|
57 |
+
exit 1
|
58 |
+
;;
|
59 |
+
esac
|
60 |
+
MINICONDA_URL="https://repo.anaconda.com/miniconda/Miniconda3-py39_${MINICONDA_VERSION}-${MINICONDA_ARCH}.sh"
|
61 |
+
curl -fsSL "${MINICONDA_URL}" -o "${MINICONDA_INSTALL_PATH}/miniconda.sh"
|
62 |
+
bash "${MINICONDA_INSTALL_PATH}/miniconda.sh" -b -u -p "${MINICONDA_INSTALL_PATH}"
|
63 |
+
rm -rf "${MINICONDA_INSTALL_PATH}/miniconda.sh"
|
64 |
+
- name: Update GitHub path to include miniconda install
|
65 |
+
shell: bash
|
66 |
+
run: |
|
67 |
+
MINICONDA_INSTALL_PATH="${RUNNER_TEMP}/miniconda"
|
68 |
+
echo "${MINICONDA_INSTALL_PATH}/bin" >> $GITHUB_PATH
|
69 |
+
- name: Setup miniconda env cache (with env file)
|
70 |
+
id: miniconda-env-cache-env-file
|
71 |
+
if: ${{ runner.os }} == 'macOS' && ${{ inputs.environment-file }} != ''
|
72 |
+
uses: actions/cache@v2
|
73 |
+
with:
|
74 |
+
path: ${{ runner.temp }}/conda-python-${{ inputs.python-version }}
|
75 |
+
key: miniconda-env-${{ runner.os }}-${{ runner.arch }}-${{ inputs.python-version }}-${{ steps.get-date.outputs.today }}-${{ hashFiles(inputs.environment-file) }}
|
76 |
+
- name: Setup miniconda env cache (without env file)
|
77 |
+
id: miniconda-env-cache
|
78 |
+
if: ${{ runner.os }} == 'macOS' && ${{ inputs.environment-file }} == ''
|
79 |
+
uses: actions/cache@v2
|
80 |
+
with:
|
81 |
+
path: ${{ runner.temp }}/conda-python-${{ inputs.python-version }}
|
82 |
+
key: miniconda-env-${{ runner.os }}-${{ runner.arch }}-${{ inputs.python-version }}-${{ steps.get-date.outputs.today }}
|
83 |
+
- name: Setup conda environment with python (v${{ inputs.python-version }})
|
84 |
+
if: steps.miniconda-env-cache-env-file.outputs.cache-hit != 'true' && steps.miniconda-env-cache.outputs.cache-hit != 'true'
|
85 |
+
shell: bash
|
86 |
+
env:
|
87 |
+
PYTHON_VERSION: ${{ inputs.python-version }}
|
88 |
+
ENV_FILE: ${{ inputs.environment-file }}
|
89 |
+
run: |
|
90 |
+
CONDA_BASE_ENV="${RUNNER_TEMP}/conda-python-${PYTHON_VERSION}"
|
91 |
+
ENV_FILE_FLAG=""
|
92 |
+
if [[ -f "${ENV_FILE}" ]]; then
|
93 |
+
ENV_FILE_FLAG="--file ${ENV_FILE}"
|
94 |
+
elif [[ -n "${ENV_FILE}" ]]; then
|
95 |
+
echo "::warning::Specified env file (${ENV_FILE}) not found, not going to include it"
|
96 |
+
fi
|
97 |
+
conda create \
|
98 |
+
--yes \
|
99 |
+
--prefix "${CONDA_BASE_ENV}" \
|
100 |
+
"python=${PYTHON_VERSION}" \
|
101 |
+
${ENV_FILE_FLAG} \
|
102 |
+
cmake=3.22 \
|
103 |
+
conda-build=3.21 \
|
104 |
+
ninja=1.10 \
|
105 |
+
pkg-config=0.29 \
|
106 |
+
wheel=0.37
|
107 |
+
- name: Clone the base conda environment and update GitHub env
|
108 |
+
shell: bash
|
109 |
+
env:
|
110 |
+
PYTHON_VERSION: ${{ inputs.python-version }}
|
111 |
+
CONDA_BASE_ENV: ${{ runner.temp }}/conda-python-${{ inputs.python-version }}
|
112 |
+
run: |
|
113 |
+
CONDA_ENV="${RUNNER_TEMP}/conda_environment_${GITHUB_RUN_ID}"
|
114 |
+
conda create \
|
115 |
+
--yes \
|
116 |
+
--prefix "${CONDA_ENV}" \
|
117 |
+
--clone "${CONDA_BASE_ENV}"
|
118 |
+
# TODO: conda-build could not be cloned because it hardcodes the path, so it
|
119 |
+
# could not be cached
|
120 |
+
conda install --yes -p ${CONDA_ENV} conda-build=3.21
|
121 |
+
echo "CONDA_ENV=${CONDA_ENV}" >> "${GITHUB_ENV}"
|
122 |
+
echo "CONDA_RUN=conda run -p ${CONDA_ENV} --no-capture-output" >> "${GITHUB_ENV}"
|
123 |
+
echo "CONDA_BUILD=conda run -p ${CONDA_ENV} conda-build" >> "${GITHUB_ENV}"
|
124 |
+
echo "CONDA_INSTALL=conda install -p ${CONDA_ENV}" >> "${GITHUB_ENV}"
|
125 |
+
- name: Get disk space usage and throw an error for low disk space
|
126 |
+
shell: bash
|
127 |
+
run: |
|
128 |
+
echo "Print the available disk space for manual inspection"
|
129 |
+
df -h
|
130 |
+
# Set the minimum requirement space to 4GB
|
131 |
+
MINIMUM_AVAILABLE_SPACE_IN_GB=4
|
132 |
+
MINIMUM_AVAILABLE_SPACE_IN_KB=$(($MINIMUM_AVAILABLE_SPACE_IN_GB * 1024 * 1024))
|
133 |
+
# Use KB to avoid floating point warning like 3.1GB
|
134 |
+
df -k | tr -s ' ' | cut -d' ' -f 4,9 | while read -r LINE;
|
135 |
+
do
|
136 |
+
AVAIL=$(echo $LINE | cut -f1 -d' ')
|
137 |
+
MOUNT=$(echo $LINE | cut -f2 -d' ')
|
138 |
+
if [ "$MOUNT" = "/" ]; then
|
139 |
+
if [ "$AVAIL" -lt "$MINIMUM_AVAILABLE_SPACE_IN_KB" ]; then
|
140 |
+
echo "There is only ${AVAIL}KB free space left in $MOUNT, which is less than the minimum requirement of ${MINIMUM_AVAILABLE_SPACE_IN_KB}KB. Please help create an issue to PyTorch Release Engineering via https://github.com/pytorch/test-infra/issues and provide the link to the workflow run."
|
141 |
+
exit 1;
|
142 |
+
else
|
143 |
+
echo "There is ${AVAIL}KB free space left in $MOUNT, continue"
|
144 |
+
fi
|
145 |
+
fi
|
146 |
+
done
|
diffusers/.github/workflows/benchmark.yml
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Benchmarking tests
|
2 |
+
|
3 |
+
on:
|
4 |
+
workflow_dispatch:
|
5 |
+
schedule:
|
6 |
+
- cron: "30 1 1,15 * *" # every 2 weeks on the 1st and the 15th of every month at 1:30 AM
|
7 |
+
|
8 |
+
env:
|
9 |
+
DIFFUSERS_IS_CI: yes
|
10 |
+
HF_HUB_ENABLE_HF_TRANSFER: 1
|
11 |
+
HF_HOME: /mnt/cache
|
12 |
+
OMP_NUM_THREADS: 8
|
13 |
+
MKL_NUM_THREADS: 8
|
14 |
+
|
15 |
+
jobs:
|
16 |
+
torch_pipelines_cuda_benchmark_tests:
|
17 |
+
env:
|
18 |
+
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_BENCHMARK }}
|
19 |
+
name: Torch Core Pipelines CUDA Benchmarking Tests
|
20 |
+
strategy:
|
21 |
+
fail-fast: false
|
22 |
+
max-parallel: 1
|
23 |
+
runs-on:
|
24 |
+
group: aws-g6-4xlarge-plus
|
25 |
+
container:
|
26 |
+
image: diffusers/diffusers-pytorch-compile-cuda
|
27 |
+
options: --shm-size "16gb" --ipc host --gpus 0
|
28 |
+
steps:
|
29 |
+
- name: Checkout diffusers
|
30 |
+
uses: actions/checkout@v3
|
31 |
+
with:
|
32 |
+
fetch-depth: 2
|
33 |
+
- name: NVIDIA-SMI
|
34 |
+
run: |
|
35 |
+
nvidia-smi
|
36 |
+
- name: Install dependencies
|
37 |
+
run: |
|
38 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
39 |
+
python -m uv pip install -e [quality,test]
|
40 |
+
python -m uv pip install pandas peft
|
41 |
+
- name: Environment
|
42 |
+
run: |
|
43 |
+
python utils/print_env.py
|
44 |
+
- name: Diffusers Benchmarking
|
45 |
+
env:
|
46 |
+
HF_TOKEN: ${{ secrets.DIFFUSERS_BOT_TOKEN }}
|
47 |
+
BASE_PATH: benchmark_outputs
|
48 |
+
run: |
|
49 |
+
export TOTAL_GPU_MEMORY=$(python -c "import torch; print(torch.cuda.get_device_properties(0).total_memory / (1024**3))")
|
50 |
+
cd benchmarks && mkdir ${BASE_PATH} && python run_all.py && python push_results.py
|
51 |
+
|
52 |
+
- name: Test suite reports artifacts
|
53 |
+
if: ${{ always() }}
|
54 |
+
uses: actions/upload-artifact@v4
|
55 |
+
with:
|
56 |
+
name: benchmark_test_reports
|
57 |
+
path: benchmarks/benchmark_outputs
|
58 |
+
|
59 |
+
- name: Report success status
|
60 |
+
if: ${{ success() }}
|
61 |
+
run: |
|
62 |
+
pip install requests && python utils/notify_benchmarking_status.py --status=success
|
63 |
+
|
64 |
+
- name: Report failure status
|
65 |
+
if: ${{ failure() }}
|
66 |
+
run: |
|
67 |
+
pip install requests && python utils/notify_benchmarking_status.py --status=failure
|
diffusers/.github/workflows/build_docker_images.yml
ADDED
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Test, build, and push Docker images
|
2 |
+
|
3 |
+
on:
|
4 |
+
pull_request: # During PRs, we just check if the changes Dockerfiles can be successfully built
|
5 |
+
branches:
|
6 |
+
- main
|
7 |
+
paths:
|
8 |
+
- "docker/**"
|
9 |
+
workflow_dispatch:
|
10 |
+
schedule:
|
11 |
+
- cron: "0 0 * * *" # every day at midnight
|
12 |
+
|
13 |
+
concurrency:
|
14 |
+
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
15 |
+
cancel-in-progress: true
|
16 |
+
|
17 |
+
env:
|
18 |
+
REGISTRY: diffusers
|
19 |
+
CI_SLACK_CHANNEL: ${{ secrets.CI_DOCKER_CHANNEL }}
|
20 |
+
|
21 |
+
jobs:
|
22 |
+
test-build-docker-images:
|
23 |
+
runs-on:
|
24 |
+
group: aws-general-8-plus
|
25 |
+
if: github.event_name == 'pull_request'
|
26 |
+
steps:
|
27 |
+
- name: Set up Docker Buildx
|
28 |
+
uses: docker/setup-buildx-action@v1
|
29 |
+
|
30 |
+
- name: Check out code
|
31 |
+
uses: actions/checkout@v3
|
32 |
+
|
33 |
+
- name: Find Changed Dockerfiles
|
34 |
+
id: file_changes
|
35 |
+
uses: jitterbit/get-changed-files@v1
|
36 |
+
with:
|
37 |
+
format: "space-delimited"
|
38 |
+
token: ${{ secrets.GITHUB_TOKEN }}
|
39 |
+
|
40 |
+
- name: Build Changed Docker Images
|
41 |
+
run: |
|
42 |
+
CHANGED_FILES="${{ steps.file_changes.outputs.all }}"
|
43 |
+
for FILE in $CHANGED_FILES; do
|
44 |
+
if [[ "$FILE" == docker/*Dockerfile ]]; then
|
45 |
+
DOCKER_PATH="${FILE%/Dockerfile}"
|
46 |
+
DOCKER_TAG=$(basename "$DOCKER_PATH")
|
47 |
+
echo "Building Docker image for $DOCKER_TAG"
|
48 |
+
docker build -t "$DOCKER_TAG" "$DOCKER_PATH"
|
49 |
+
fi
|
50 |
+
done
|
51 |
+
if: steps.file_changes.outputs.all != ''
|
52 |
+
|
53 |
+
build-and-push-docker-images:
|
54 |
+
runs-on:
|
55 |
+
group: aws-general-8-plus
|
56 |
+
if: github.event_name != 'pull_request'
|
57 |
+
|
58 |
+
permissions:
|
59 |
+
contents: read
|
60 |
+
packages: write
|
61 |
+
|
62 |
+
strategy:
|
63 |
+
fail-fast: false
|
64 |
+
matrix:
|
65 |
+
image-name:
|
66 |
+
- diffusers-pytorch-cpu
|
67 |
+
- diffusers-pytorch-cuda
|
68 |
+
- diffusers-pytorch-compile-cuda
|
69 |
+
- diffusers-pytorch-xformers-cuda
|
70 |
+
- diffusers-pytorch-minimum-cuda
|
71 |
+
- diffusers-flax-cpu
|
72 |
+
- diffusers-flax-tpu
|
73 |
+
- diffusers-onnxruntime-cpu
|
74 |
+
- diffusers-onnxruntime-cuda
|
75 |
+
- diffusers-doc-builder
|
76 |
+
|
77 |
+
steps:
|
78 |
+
- name: Checkout repository
|
79 |
+
uses: actions/checkout@v3
|
80 |
+
- name: Set up Docker Buildx
|
81 |
+
uses: docker/setup-buildx-action@v1
|
82 |
+
- name: Login to Docker Hub
|
83 |
+
uses: docker/login-action@v2
|
84 |
+
with:
|
85 |
+
username: ${{ env.REGISTRY }}
|
86 |
+
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
87 |
+
- name: Build and push
|
88 |
+
uses: docker/build-push-action@v3
|
89 |
+
with:
|
90 |
+
no-cache: true
|
91 |
+
context: ./docker/${{ matrix.image-name }}
|
92 |
+
push: true
|
93 |
+
tags: ${{ env.REGISTRY }}/${{ matrix.image-name }}:latest
|
94 |
+
|
95 |
+
- name: Post to a Slack channel
|
96 |
+
id: slack
|
97 |
+
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
98 |
+
with:
|
99 |
+
# Slack channel id, channel name, or user id to post message.
|
100 |
+
# See also: https://api.slack.com/methods/chat.postMessage#channels
|
101 |
+
slack_channel: ${{ env.CI_SLACK_CHANNEL }}
|
102 |
+
title: "🤗 Results of the ${{ matrix.image-name }} Docker Image build"
|
103 |
+
status: ${{ job.status }}
|
104 |
+
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
diffusers/.github/workflows/build_documentation.yml
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Build documentation
|
2 |
+
|
3 |
+
on:
|
4 |
+
push:
|
5 |
+
branches:
|
6 |
+
- main
|
7 |
+
- doc-builder*
|
8 |
+
- v*-release
|
9 |
+
- v*-patch
|
10 |
+
paths:
|
11 |
+
- "src/diffusers/**.py"
|
12 |
+
- "examples/**"
|
13 |
+
- "docs/**"
|
14 |
+
|
15 |
+
jobs:
|
16 |
+
build:
|
17 |
+
uses: huggingface/doc-builder/.github/workflows/build_main_documentation.yml@main
|
18 |
+
with:
|
19 |
+
commit_sha: ${{ github.sha }}
|
20 |
+
install_libgl1: true
|
21 |
+
package: diffusers
|
22 |
+
notebook_folder: diffusers_doc
|
23 |
+
languages: en ko zh ja pt
|
24 |
+
custom_container: diffusers/diffusers-doc-builder
|
25 |
+
secrets:
|
26 |
+
token: ${{ secrets.HUGGINGFACE_PUSH }}
|
27 |
+
hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
|
diffusers/.github/workflows/build_pr_documentation.yml
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Build PR Documentation
|
2 |
+
|
3 |
+
on:
|
4 |
+
pull_request:
|
5 |
+
paths:
|
6 |
+
- "src/diffusers/**.py"
|
7 |
+
- "examples/**"
|
8 |
+
- "docs/**"
|
9 |
+
|
10 |
+
concurrency:
|
11 |
+
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
12 |
+
cancel-in-progress: true
|
13 |
+
|
14 |
+
jobs:
|
15 |
+
build:
|
16 |
+
uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@main
|
17 |
+
with:
|
18 |
+
commit_sha: ${{ github.event.pull_request.head.sha }}
|
19 |
+
pr_number: ${{ github.event.number }}
|
20 |
+
install_libgl1: true
|
21 |
+
package: diffusers
|
22 |
+
languages: en ko zh ja pt
|
23 |
+
custom_container: diffusers/diffusers-doc-builder
|
diffusers/.github/workflows/mirror_community_pipeline.yml
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Mirror Community Pipeline
|
2 |
+
|
3 |
+
on:
|
4 |
+
# Push changes on the main branch
|
5 |
+
push:
|
6 |
+
branches:
|
7 |
+
- main
|
8 |
+
paths:
|
9 |
+
- 'examples/community/**.py'
|
10 |
+
|
11 |
+
# And on tag creation (e.g. `v0.28.1`)
|
12 |
+
tags:
|
13 |
+
- '*'
|
14 |
+
|
15 |
+
# Manual trigger with ref input
|
16 |
+
workflow_dispatch:
|
17 |
+
inputs:
|
18 |
+
ref:
|
19 |
+
description: "Either 'main' or a tag ref"
|
20 |
+
required: true
|
21 |
+
default: 'main'
|
22 |
+
|
23 |
+
jobs:
|
24 |
+
mirror_community_pipeline:
|
25 |
+
env:
|
26 |
+
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_COMMUNITY_MIRROR }}
|
27 |
+
|
28 |
+
runs-on: ubuntu-22.04
|
29 |
+
steps:
|
30 |
+
# Checkout to correct ref
|
31 |
+
# If workflow dispatch
|
32 |
+
# If ref is 'main', set:
|
33 |
+
# CHECKOUT_REF=refs/heads/main
|
34 |
+
# PATH_IN_REPO=main
|
35 |
+
# Else it must be a tag. Set:
|
36 |
+
# CHECKOUT_REF=refs/tags/{tag}
|
37 |
+
# PATH_IN_REPO={tag}
|
38 |
+
# If not workflow dispatch
|
39 |
+
# If ref is 'refs/heads/main' => set 'main'
|
40 |
+
# Else it must be a tag => set {tag}
|
41 |
+
- name: Set checkout_ref and path_in_repo
|
42 |
+
run: |
|
43 |
+
if [ "${{ github.event_name }}" == "workflow_dispatch" ]; then
|
44 |
+
if [ -z "${{ github.event.inputs.ref }}" ]; then
|
45 |
+
echo "Error: Missing ref input"
|
46 |
+
exit 1
|
47 |
+
elif [ "${{ github.event.inputs.ref }}" == "main" ]; then
|
48 |
+
echo "CHECKOUT_REF=refs/heads/main" >> $GITHUB_ENV
|
49 |
+
echo "PATH_IN_REPO=main" >> $GITHUB_ENV
|
50 |
+
else
|
51 |
+
echo "CHECKOUT_REF=refs/tags/${{ github.event.inputs.ref }}" >> $GITHUB_ENV
|
52 |
+
echo "PATH_IN_REPO=${{ github.event.inputs.ref }}" >> $GITHUB_ENV
|
53 |
+
fi
|
54 |
+
elif [ "${{ github.ref }}" == "refs/heads/main" ]; then
|
55 |
+
echo "CHECKOUT_REF=${{ github.ref }}" >> $GITHUB_ENV
|
56 |
+
echo "PATH_IN_REPO=main" >> $GITHUB_ENV
|
57 |
+
else
|
58 |
+
# e.g. refs/tags/v0.28.1 -> v0.28.1
|
59 |
+
echo "CHECKOUT_REF=${{ github.ref }}" >> $GITHUB_ENV
|
60 |
+
echo "PATH_IN_REPO=$(echo ${{ github.ref }} | sed 's/^refs\/tags\///')" >> $GITHUB_ENV
|
61 |
+
fi
|
62 |
+
- name: Print env vars
|
63 |
+
run: |
|
64 |
+
echo "CHECKOUT_REF: ${{ env.CHECKOUT_REF }}"
|
65 |
+
echo "PATH_IN_REPO: ${{ env.PATH_IN_REPO }}"
|
66 |
+
- uses: actions/checkout@v3
|
67 |
+
with:
|
68 |
+
ref: ${{ env.CHECKOUT_REF }}
|
69 |
+
|
70 |
+
# Setup + install dependencies
|
71 |
+
- name: Set up Python
|
72 |
+
uses: actions/setup-python@v4
|
73 |
+
with:
|
74 |
+
python-version: "3.10"
|
75 |
+
- name: Install dependencies
|
76 |
+
run: |
|
77 |
+
python -m pip install --upgrade pip
|
78 |
+
pip install --upgrade huggingface_hub
|
79 |
+
|
80 |
+
# Check secret is set
|
81 |
+
- name: whoami
|
82 |
+
run: huggingface-cli whoami
|
83 |
+
env:
|
84 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN_MIRROR_COMMUNITY_PIPELINES }}
|
85 |
+
|
86 |
+
# Push to HF! (under subfolder based on checkout ref)
|
87 |
+
# https://huggingface.co/datasets/diffusers/community-pipelines-mirror
|
88 |
+
- name: Mirror community pipeline to HF
|
89 |
+
run: huggingface-cli upload diffusers/community-pipelines-mirror ./examples/community ${PATH_IN_REPO} --repo-type dataset
|
90 |
+
env:
|
91 |
+
PATH_IN_REPO: ${{ env.PATH_IN_REPO }}
|
92 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN_MIRROR_COMMUNITY_PIPELINES }}
|
93 |
+
|
94 |
+
- name: Report success status
|
95 |
+
if: ${{ success() }}
|
96 |
+
run: |
|
97 |
+
pip install requests && python utils/notify_community_pipelines_mirror.py --status=success
|
98 |
+
|
99 |
+
- name: Report failure status
|
100 |
+
if: ${{ failure() }}
|
101 |
+
run: |
|
102 |
+
pip install requests && python utils/notify_community_pipelines_mirror.py --status=failure
|
diffusers/.github/workflows/nightly_tests.yml
ADDED
@@ -0,0 +1,584 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Nightly and release tests on main/release branch
|
2 |
+
|
3 |
+
on:
|
4 |
+
workflow_dispatch:
|
5 |
+
schedule:
|
6 |
+
- cron: "0 0 * * *" # every day at midnight
|
7 |
+
|
8 |
+
env:
|
9 |
+
DIFFUSERS_IS_CI: yes
|
10 |
+
HF_HUB_ENABLE_HF_TRANSFER: 1
|
11 |
+
OMP_NUM_THREADS: 8
|
12 |
+
MKL_NUM_THREADS: 8
|
13 |
+
PYTEST_TIMEOUT: 600
|
14 |
+
RUN_SLOW: yes
|
15 |
+
RUN_NIGHTLY: yes
|
16 |
+
PIPELINE_USAGE_CUTOFF: 5000
|
17 |
+
SLACK_API_TOKEN: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
18 |
+
|
19 |
+
jobs:
|
20 |
+
setup_torch_cuda_pipeline_matrix:
|
21 |
+
name: Setup Torch Pipelines CUDA Slow Tests Matrix
|
22 |
+
runs-on:
|
23 |
+
group: aws-general-8-plus
|
24 |
+
container:
|
25 |
+
image: diffusers/diffusers-pytorch-cpu
|
26 |
+
outputs:
|
27 |
+
pipeline_test_matrix: ${{ steps.fetch_pipeline_matrix.outputs.pipeline_test_matrix }}
|
28 |
+
steps:
|
29 |
+
- name: Checkout diffusers
|
30 |
+
uses: actions/checkout@v3
|
31 |
+
with:
|
32 |
+
fetch-depth: 2
|
33 |
+
- name: Install dependencies
|
34 |
+
run: |
|
35 |
+
pip install -e .[test]
|
36 |
+
pip install huggingface_hub
|
37 |
+
- name: Fetch Pipeline Matrix
|
38 |
+
id: fetch_pipeline_matrix
|
39 |
+
run: |
|
40 |
+
matrix=$(python utils/fetch_torch_cuda_pipeline_test_matrix.py)
|
41 |
+
echo $matrix
|
42 |
+
echo "pipeline_test_matrix=$matrix" >> $GITHUB_OUTPUT
|
43 |
+
|
44 |
+
- name: Pipeline Tests Artifacts
|
45 |
+
if: ${{ always() }}
|
46 |
+
uses: actions/upload-artifact@v4
|
47 |
+
with:
|
48 |
+
name: test-pipelines.json
|
49 |
+
path: reports
|
50 |
+
|
51 |
+
run_nightly_tests_for_torch_pipelines:
|
52 |
+
name: Nightly Torch Pipelines CUDA Tests
|
53 |
+
needs: setup_torch_cuda_pipeline_matrix
|
54 |
+
strategy:
|
55 |
+
fail-fast: false
|
56 |
+
max-parallel: 8
|
57 |
+
matrix:
|
58 |
+
module: ${{ fromJson(needs.setup_torch_cuda_pipeline_matrix.outputs.pipeline_test_matrix) }}
|
59 |
+
runs-on:
|
60 |
+
group: aws-g4dn-2xlarge
|
61 |
+
container:
|
62 |
+
image: diffusers/diffusers-pytorch-cuda
|
63 |
+
options: --shm-size "16gb" --ipc host --gpus 0
|
64 |
+
steps:
|
65 |
+
- name: Checkout diffusers
|
66 |
+
uses: actions/checkout@v3
|
67 |
+
with:
|
68 |
+
fetch-depth: 2
|
69 |
+
- name: NVIDIA-SMI
|
70 |
+
run: nvidia-smi
|
71 |
+
- name: Install dependencies
|
72 |
+
run: |
|
73 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
74 |
+
python -m uv pip install -e [quality,test]
|
75 |
+
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
76 |
+
python -m uv pip install pytest-reportlog
|
77 |
+
- name: Environment
|
78 |
+
run: |
|
79 |
+
python utils/print_env.py
|
80 |
+
- name: Pipeline CUDA Test
|
81 |
+
env:
|
82 |
+
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
83 |
+
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
84 |
+
CUBLAS_WORKSPACE_CONFIG: :16:8
|
85 |
+
run: |
|
86 |
+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
87 |
+
-s -v -k "not Flax and not Onnx" \
|
88 |
+
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
|
89 |
+
--report-log=tests_pipeline_${{ matrix.module }}_cuda.log \
|
90 |
+
tests/pipelines/${{ matrix.module }}
|
91 |
+
- name: Failure short reports
|
92 |
+
if: ${{ failure() }}
|
93 |
+
run: |
|
94 |
+
cat reports/tests_pipeline_${{ matrix.module }}_cuda_stats.txt
|
95 |
+
cat reports/tests_pipeline_${{ matrix.module }}_cuda_failures_short.txt
|
96 |
+
- name: Test suite reports artifacts
|
97 |
+
if: ${{ always() }}
|
98 |
+
uses: actions/upload-artifact@v4
|
99 |
+
with:
|
100 |
+
name: pipeline_${{ matrix.module }}_test_reports
|
101 |
+
path: reports
|
102 |
+
- name: Generate Report and Notify Channel
|
103 |
+
if: always()
|
104 |
+
run: |
|
105 |
+
pip install slack_sdk tabulate
|
106 |
+
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
107 |
+
|
108 |
+
run_nightly_tests_for_other_torch_modules:
|
109 |
+
name: Nightly Torch CUDA Tests
|
110 |
+
runs-on:
|
111 |
+
group: aws-g4dn-2xlarge
|
112 |
+
container:
|
113 |
+
image: diffusers/diffusers-pytorch-cuda
|
114 |
+
options: --shm-size "16gb" --ipc host --gpus 0
|
115 |
+
defaults:
|
116 |
+
run:
|
117 |
+
shell: bash
|
118 |
+
strategy:
|
119 |
+
fail-fast: false
|
120 |
+
max-parallel: 2
|
121 |
+
matrix:
|
122 |
+
module: [models, schedulers, lora, others, single_file, examples]
|
123 |
+
steps:
|
124 |
+
- name: Checkout diffusers
|
125 |
+
uses: actions/checkout@v3
|
126 |
+
with:
|
127 |
+
fetch-depth: 2
|
128 |
+
|
129 |
+
- name: Install dependencies
|
130 |
+
run: |
|
131 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
132 |
+
python -m uv pip install -e [quality,test]
|
133 |
+
python -m uv pip install peft@git+https://github.com/huggingface/peft.git
|
134 |
+
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
135 |
+
python -m uv pip install pytest-reportlog
|
136 |
+
- name: Environment
|
137 |
+
run: python utils/print_env.py
|
138 |
+
|
139 |
+
- name: Run nightly PyTorch CUDA tests for non-pipeline modules
|
140 |
+
if: ${{ matrix.module != 'examples'}}
|
141 |
+
env:
|
142 |
+
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
143 |
+
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
144 |
+
CUBLAS_WORKSPACE_CONFIG: :16:8
|
145 |
+
run: |
|
146 |
+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
147 |
+
-s -v -k "not Flax and not Onnx" \
|
148 |
+
--make-reports=tests_torch_${{ matrix.module }}_cuda \
|
149 |
+
--report-log=tests_torch_${{ matrix.module }}_cuda.log \
|
150 |
+
tests/${{ matrix.module }}
|
151 |
+
|
152 |
+
- name: Run nightly example tests with Torch
|
153 |
+
if: ${{ matrix.module == 'examples' }}
|
154 |
+
env:
|
155 |
+
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
156 |
+
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
157 |
+
CUBLAS_WORKSPACE_CONFIG: :16:8
|
158 |
+
run: |
|
159 |
+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
160 |
+
-s -v --make-reports=examples_torch_cuda \
|
161 |
+
--report-log=examples_torch_cuda.log \
|
162 |
+
examples/
|
163 |
+
|
164 |
+
- name: Failure short reports
|
165 |
+
if: ${{ failure() }}
|
166 |
+
run: |
|
167 |
+
cat reports/tests_torch_${{ matrix.module }}_cuda_stats.txt
|
168 |
+
cat reports/tests_torch_${{ matrix.module }}_cuda_failures_short.txt
|
169 |
+
|
170 |
+
- name: Test suite reports artifacts
|
171 |
+
if: ${{ always() }}
|
172 |
+
uses: actions/upload-artifact@v4
|
173 |
+
with:
|
174 |
+
name: torch_${{ matrix.module }}_cuda_test_reports
|
175 |
+
path: reports
|
176 |
+
|
177 |
+
- name: Generate Report and Notify Channel
|
178 |
+
if: always()
|
179 |
+
run: |
|
180 |
+
pip install slack_sdk tabulate
|
181 |
+
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
182 |
+
|
183 |
+
run_big_gpu_torch_tests:
|
184 |
+
name: Torch tests on big GPU
|
185 |
+
strategy:
|
186 |
+
fail-fast: false
|
187 |
+
max-parallel: 2
|
188 |
+
runs-on:
|
189 |
+
group: aws-g6e-xlarge-plus
|
190 |
+
container:
|
191 |
+
image: diffusers/diffusers-pytorch-cuda
|
192 |
+
options: --shm-size "16gb" --ipc host --gpus 0
|
193 |
+
steps:
|
194 |
+
- name: Checkout diffusers
|
195 |
+
uses: actions/checkout@v3
|
196 |
+
with:
|
197 |
+
fetch-depth: 2
|
198 |
+
- name: NVIDIA-SMI
|
199 |
+
run: nvidia-smi
|
200 |
+
- name: Install dependencies
|
201 |
+
run: |
|
202 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
203 |
+
python -m uv pip install -e [quality,test]
|
204 |
+
python -m uv pip install peft@git+https://github.com/huggingface/peft.git
|
205 |
+
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
206 |
+
python -m uv pip install pytest-reportlog
|
207 |
+
- name: Environment
|
208 |
+
run: |
|
209 |
+
python utils/print_env.py
|
210 |
+
- name: Selected Torch CUDA Test on big GPU
|
211 |
+
env:
|
212 |
+
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
213 |
+
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
214 |
+
CUBLAS_WORKSPACE_CONFIG: :16:8
|
215 |
+
BIG_GPU_MEMORY: 40
|
216 |
+
run: |
|
217 |
+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
218 |
+
-m "big_gpu_with_torch_cuda" \
|
219 |
+
--make-reports=tests_big_gpu_torch_cuda \
|
220 |
+
--report-log=tests_big_gpu_torch_cuda.log \
|
221 |
+
tests/
|
222 |
+
- name: Failure short reports
|
223 |
+
if: ${{ failure() }}
|
224 |
+
run: |
|
225 |
+
cat reports/tests_big_gpu_torch_cuda_stats.txt
|
226 |
+
cat reports/tests_big_gpu_torch_cuda_failures_short.txt
|
227 |
+
- name: Test suite reports artifacts
|
228 |
+
if: ${{ always() }}
|
229 |
+
uses: actions/upload-artifact@v4
|
230 |
+
with:
|
231 |
+
name: torch_cuda_big_gpu_test_reports
|
232 |
+
path: reports
|
233 |
+
- name: Generate Report and Notify Channel
|
234 |
+
if: always()
|
235 |
+
run: |
|
236 |
+
pip install slack_sdk tabulate
|
237 |
+
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
238 |
+
|
239 |
+
torch_minimum_version_cuda_tests:
|
240 |
+
name: Torch Minimum Version CUDA Tests
|
241 |
+
runs-on:
|
242 |
+
group: aws-g4dn-2xlarge
|
243 |
+
container:
|
244 |
+
image: diffusers/diffusers-pytorch-minimum-cuda
|
245 |
+
options: --shm-size "16gb" --ipc host --gpus 0
|
246 |
+
defaults:
|
247 |
+
run:
|
248 |
+
shell: bash
|
249 |
+
steps:
|
250 |
+
- name: Checkout diffusers
|
251 |
+
uses: actions/checkout@v3
|
252 |
+
with:
|
253 |
+
fetch-depth: 2
|
254 |
+
|
255 |
+
- name: Install dependencies
|
256 |
+
run: |
|
257 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
258 |
+
python -m uv pip install -e [quality,test]
|
259 |
+
python -m uv pip install peft@git+https://github.com/huggingface/peft.git
|
260 |
+
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
261 |
+
|
262 |
+
- name: Environment
|
263 |
+
run: |
|
264 |
+
python utils/print_env.py
|
265 |
+
|
266 |
+
- name: Run PyTorch CUDA tests
|
267 |
+
env:
|
268 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
269 |
+
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
270 |
+
CUBLAS_WORKSPACE_CONFIG: :16:8
|
271 |
+
run: |
|
272 |
+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
273 |
+
-s -v -k "not Flax and not Onnx" \
|
274 |
+
--make-reports=tests_torch_minimum_version_cuda \
|
275 |
+
tests/models/test_modeling_common.py \
|
276 |
+
tests/pipelines/test_pipelines_common.py \
|
277 |
+
tests/pipelines/test_pipeline_utils.py \
|
278 |
+
tests/pipelines/test_pipelines.py \
|
279 |
+
tests/pipelines/test_pipelines_auto.py \
|
280 |
+
tests/schedulers/test_schedulers.py \
|
281 |
+
tests/others
|
282 |
+
|
283 |
+
- name: Failure short reports
|
284 |
+
if: ${{ failure() }}
|
285 |
+
run: |
|
286 |
+
cat reports/tests_torch_minimum_version_cuda_stats.txt
|
287 |
+
cat reports/tests_torch_minimum_version_cuda_failures_short.txt
|
288 |
+
|
289 |
+
- name: Test suite reports artifacts
|
290 |
+
if: ${{ always() }}
|
291 |
+
uses: actions/upload-artifact@v4
|
292 |
+
with:
|
293 |
+
name: torch_minimum_version_cuda_test_reports
|
294 |
+
path: reports
|
295 |
+
|
296 |
+
run_flax_tpu_tests:
|
297 |
+
name: Nightly Flax TPU Tests
|
298 |
+
runs-on:
|
299 |
+
group: gcp-ct5lp-hightpu-8t
|
300 |
+
if: github.event_name == 'schedule'
|
301 |
+
|
302 |
+
container:
|
303 |
+
image: diffusers/diffusers-flax-tpu
|
304 |
+
options: --shm-size "16gb" --ipc host --privileged ${{ vars.V5_LITEPOD_8_ENV}} -v /mnt/hf_cache:/mnt/hf_cache
|
305 |
+
defaults:
|
306 |
+
run:
|
307 |
+
shell: bash
|
308 |
+
steps:
|
309 |
+
- name: Checkout diffusers
|
310 |
+
uses: actions/checkout@v3
|
311 |
+
with:
|
312 |
+
fetch-depth: 2
|
313 |
+
|
314 |
+
- name: Install dependencies
|
315 |
+
run: |
|
316 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
317 |
+
python -m uv pip install -e [quality,test]
|
318 |
+
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
319 |
+
python -m uv pip install pytest-reportlog
|
320 |
+
|
321 |
+
- name: Environment
|
322 |
+
run: python utils/print_env.py
|
323 |
+
|
324 |
+
- name: Run nightly Flax TPU tests
|
325 |
+
env:
|
326 |
+
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
327 |
+
run: |
|
328 |
+
python -m pytest -n 0 \
|
329 |
+
-s -v -k "Flax" \
|
330 |
+
--make-reports=tests_flax_tpu \
|
331 |
+
--report-log=tests_flax_tpu.log \
|
332 |
+
tests/
|
333 |
+
|
334 |
+
- name: Failure short reports
|
335 |
+
if: ${{ failure() }}
|
336 |
+
run: |
|
337 |
+
cat reports/tests_flax_tpu_stats.txt
|
338 |
+
cat reports/tests_flax_tpu_failures_short.txt
|
339 |
+
|
340 |
+
- name: Test suite reports artifacts
|
341 |
+
if: ${{ always() }}
|
342 |
+
uses: actions/upload-artifact@v4
|
343 |
+
with:
|
344 |
+
name: flax_tpu_test_reports
|
345 |
+
path: reports
|
346 |
+
|
347 |
+
- name: Generate Report and Notify Channel
|
348 |
+
if: always()
|
349 |
+
run: |
|
350 |
+
pip install slack_sdk tabulate
|
351 |
+
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
352 |
+
|
353 |
+
run_nightly_onnx_tests:
|
354 |
+
name: Nightly ONNXRuntime CUDA tests on Ubuntu
|
355 |
+
runs-on:
|
356 |
+
group: aws-g4dn-2xlarge
|
357 |
+
container:
|
358 |
+
image: diffusers/diffusers-onnxruntime-cuda
|
359 |
+
options: --gpus 0 --shm-size "16gb" --ipc host
|
360 |
+
|
361 |
+
steps:
|
362 |
+
- name: Checkout diffusers
|
363 |
+
uses: actions/checkout@v3
|
364 |
+
with:
|
365 |
+
fetch-depth: 2
|
366 |
+
|
367 |
+
- name: NVIDIA-SMI
|
368 |
+
run: nvidia-smi
|
369 |
+
|
370 |
+
- name: Install dependencies
|
371 |
+
run: |
|
372 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
373 |
+
python -m uv pip install -e [quality,test]
|
374 |
+
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
375 |
+
python -m uv pip install pytest-reportlog
|
376 |
+
- name: Environment
|
377 |
+
run: python utils/print_env.py
|
378 |
+
|
379 |
+
- name: Run Nightly ONNXRuntime CUDA tests
|
380 |
+
env:
|
381 |
+
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
382 |
+
run: |
|
383 |
+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
384 |
+
-s -v -k "Onnx" \
|
385 |
+
--make-reports=tests_onnx_cuda \
|
386 |
+
--report-log=tests_onnx_cuda.log \
|
387 |
+
tests/
|
388 |
+
|
389 |
+
- name: Failure short reports
|
390 |
+
if: ${{ failure() }}
|
391 |
+
run: |
|
392 |
+
cat reports/tests_onnx_cuda_stats.txt
|
393 |
+
cat reports/tests_onnx_cuda_failures_short.txt
|
394 |
+
|
395 |
+
- name: Test suite reports artifacts
|
396 |
+
if: ${{ always() }}
|
397 |
+
uses: actions/upload-artifact@v4
|
398 |
+
with:
|
399 |
+
name: tests_onnx_cuda_reports
|
400 |
+
path: reports
|
401 |
+
|
402 |
+
- name: Generate Report and Notify Channel
|
403 |
+
if: always()
|
404 |
+
run: |
|
405 |
+
pip install slack_sdk tabulate
|
406 |
+
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
407 |
+
|
408 |
+
run_nightly_quantization_tests:
|
409 |
+
name: Torch quantization nightly tests
|
410 |
+
strategy:
|
411 |
+
fail-fast: false
|
412 |
+
max-parallel: 2
|
413 |
+
matrix:
|
414 |
+
config:
|
415 |
+
- backend: "bitsandbytes"
|
416 |
+
test_location: "bnb"
|
417 |
+
- backend: "gguf"
|
418 |
+
test_location: "gguf"
|
419 |
+
- backend: "torchao"
|
420 |
+
test_location: "torchao"
|
421 |
+
runs-on:
|
422 |
+
group: aws-g6e-xlarge-plus
|
423 |
+
container:
|
424 |
+
image: diffusers/diffusers-pytorch-cuda
|
425 |
+
options: --shm-size "20gb" --ipc host --gpus 0
|
426 |
+
steps:
|
427 |
+
- name: Checkout diffusers
|
428 |
+
uses: actions/checkout@v3
|
429 |
+
with:
|
430 |
+
fetch-depth: 2
|
431 |
+
- name: NVIDIA-SMI
|
432 |
+
run: nvidia-smi
|
433 |
+
- name: Install dependencies
|
434 |
+
run: |
|
435 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
436 |
+
python -m uv pip install -e [quality,test]
|
437 |
+
python -m uv pip install -U ${{ matrix.config.backend }}
|
438 |
+
python -m uv pip install pytest-reportlog
|
439 |
+
- name: Environment
|
440 |
+
run: |
|
441 |
+
python utils/print_env.py
|
442 |
+
- name: ${{ matrix.config.backend }} quantization tests on GPU
|
443 |
+
env:
|
444 |
+
HF_TOKEN: ${{ secrets.DIFFUSERS_HF_HUB_READ_TOKEN }}
|
445 |
+
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
446 |
+
CUBLAS_WORKSPACE_CONFIG: :16:8
|
447 |
+
BIG_GPU_MEMORY: 40
|
448 |
+
run: |
|
449 |
+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
450 |
+
--make-reports=tests_${{ matrix.config.backend }}_torch_cuda \
|
451 |
+
--report-log=tests_${{ matrix.config.backend }}_torch_cuda.log \
|
452 |
+
tests/quantization/${{ matrix.config.test_location }}
|
453 |
+
- name: Failure short reports
|
454 |
+
if: ${{ failure() }}
|
455 |
+
run: |
|
456 |
+
cat reports/tests_${{ matrix.config.backend }}_torch_cuda_stats.txt
|
457 |
+
cat reports/tests_${{ matrix.config.backend }}_torch_cuda_failures_short.txt
|
458 |
+
- name: Test suite reports artifacts
|
459 |
+
if: ${{ always() }}
|
460 |
+
uses: actions/upload-artifact@v4
|
461 |
+
with:
|
462 |
+
name: torch_cuda_${{ matrix.config.backend }}_reports
|
463 |
+
path: reports
|
464 |
+
- name: Generate Report and Notify Channel
|
465 |
+
if: always()
|
466 |
+
run: |
|
467 |
+
pip install slack_sdk tabulate
|
468 |
+
python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
469 |
+
|
470 |
+
# M1 runner currently not well supported
|
471 |
+
# TODO: (Dhruv) add these back when we setup better testing for Apple Silicon
|
472 |
+
# run_nightly_tests_apple_m1:
|
473 |
+
# name: Nightly PyTorch MPS tests on MacOS
|
474 |
+
# runs-on: [ self-hosted, apple-m1 ]
|
475 |
+
# if: github.event_name == 'schedule'
|
476 |
+
#
|
477 |
+
# steps:
|
478 |
+
# - name: Checkout diffusers
|
479 |
+
# uses: actions/checkout@v3
|
480 |
+
# with:
|
481 |
+
# fetch-depth: 2
|
482 |
+
#
|
483 |
+
# - name: Clean checkout
|
484 |
+
# shell: arch -arch arm64 bash {0}
|
485 |
+
# run: |
|
486 |
+
# git clean -fxd
|
487 |
+
# - name: Setup miniconda
|
488 |
+
# uses: ./.github/actions/setup-miniconda
|
489 |
+
# with:
|
490 |
+
# python-version: 3.9
|
491 |
+
#
|
492 |
+
# - name: Install dependencies
|
493 |
+
# shell: arch -arch arm64 bash {0}
|
494 |
+
# run: |
|
495 |
+
# ${CONDA_RUN} python -m pip install --upgrade pip uv
|
496 |
+
# ${CONDA_RUN} python -m uv pip install -e [quality,test]
|
497 |
+
# ${CONDA_RUN} python -m uv pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu
|
498 |
+
# ${CONDA_RUN} python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate
|
499 |
+
# ${CONDA_RUN} python -m uv pip install pytest-reportlog
|
500 |
+
# - name: Environment
|
501 |
+
# shell: arch -arch arm64 bash {0}
|
502 |
+
# run: |
|
503 |
+
# ${CONDA_RUN} python utils/print_env.py
|
504 |
+
# - name: Run nightly PyTorch tests on M1 (MPS)
|
505 |
+
# shell: arch -arch arm64 bash {0}
|
506 |
+
# env:
|
507 |
+
# HF_HOME: /System/Volumes/Data/mnt/cache
|
508 |
+
# HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
509 |
+
# run: |
|
510 |
+
# ${CONDA_RUN} python -m pytest -n 1 -s -v --make-reports=tests_torch_mps \
|
511 |
+
# --report-log=tests_torch_mps.log \
|
512 |
+
# tests/
|
513 |
+
# - name: Failure short reports
|
514 |
+
# if: ${{ failure() }}
|
515 |
+
# run: cat reports/tests_torch_mps_failures_short.txt
|
516 |
+
#
|
517 |
+
# - name: Test suite reports artifacts
|
518 |
+
# if: ${{ always() }}
|
519 |
+
# uses: actions/upload-artifact@v4
|
520 |
+
# with:
|
521 |
+
# name: torch_mps_test_reports
|
522 |
+
# path: reports
|
523 |
+
#
|
524 |
+
# - name: Generate Report and Notify Channel
|
525 |
+
# if: always()
|
526 |
+
# run: |
|
527 |
+
# pip install slack_sdk tabulate
|
528 |
+
# python utils/log_reports.py >> $GITHUB_STEP_SUMMARY run_nightly_tests_apple_m1:
|
529 |
+
# name: Nightly PyTorch MPS tests on MacOS
|
530 |
+
# runs-on: [ self-hosted, apple-m1 ]
|
531 |
+
# if: github.event_name == 'schedule'
|
532 |
+
#
|
533 |
+
# steps:
|
534 |
+
# - name: Checkout diffusers
|
535 |
+
# uses: actions/checkout@v3
|
536 |
+
# with:
|
537 |
+
# fetch-depth: 2
|
538 |
+
#
|
539 |
+
# - name: Clean checkout
|
540 |
+
# shell: arch -arch arm64 bash {0}
|
541 |
+
# run: |
|
542 |
+
# git clean -fxd
|
543 |
+
# - name: Setup miniconda
|
544 |
+
# uses: ./.github/actions/setup-miniconda
|
545 |
+
# with:
|
546 |
+
# python-version: 3.9
|
547 |
+
#
|
548 |
+
# - name: Install dependencies
|
549 |
+
# shell: arch -arch arm64 bash {0}
|
550 |
+
# run: |
|
551 |
+
# ${CONDA_RUN} python -m pip install --upgrade pip uv
|
552 |
+
# ${CONDA_RUN} python -m uv pip install -e [quality,test]
|
553 |
+
# ${CONDA_RUN} python -m uv pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu
|
554 |
+
# ${CONDA_RUN} python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate
|
555 |
+
# ${CONDA_RUN} python -m uv pip install pytest-reportlog
|
556 |
+
# - name: Environment
|
557 |
+
# shell: arch -arch arm64 bash {0}
|
558 |
+
# run: |
|
559 |
+
# ${CONDA_RUN} python utils/print_env.py
|
560 |
+
# - name: Run nightly PyTorch tests on M1 (MPS)
|
561 |
+
# shell: arch -arch arm64 bash {0}
|
562 |
+
# env:
|
563 |
+
# HF_HOME: /System/Volumes/Data/mnt/cache
|
564 |
+
# HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
565 |
+
# run: |
|
566 |
+
# ${CONDA_RUN} python -m pytest -n 1 -s -v --make-reports=tests_torch_mps \
|
567 |
+
# --report-log=tests_torch_mps.log \
|
568 |
+
# tests/
|
569 |
+
# - name: Failure short reports
|
570 |
+
# if: ${{ failure() }}
|
571 |
+
# run: cat reports/tests_torch_mps_failures_short.txt
|
572 |
+
#
|
573 |
+
# - name: Test suite reports artifacts
|
574 |
+
# if: ${{ always() }}
|
575 |
+
# uses: actions/upload-artifact@v4
|
576 |
+
# with:
|
577 |
+
# name: torch_mps_test_reports
|
578 |
+
# path: reports
|
579 |
+
#
|
580 |
+
# - name: Generate Report and Notify Channel
|
581 |
+
# if: always()
|
582 |
+
# run: |
|
583 |
+
# pip install slack_sdk tabulate
|
584 |
+
# python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
|
diffusers/.github/workflows/notify_slack_about_release.yml
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Notify Slack about a release
|
2 |
+
|
3 |
+
on:
|
4 |
+
workflow_dispatch:
|
5 |
+
release:
|
6 |
+
types: [published]
|
7 |
+
|
8 |
+
jobs:
|
9 |
+
build:
|
10 |
+
runs-on: ubuntu-22.04
|
11 |
+
|
12 |
+
steps:
|
13 |
+
- uses: actions/checkout@v3
|
14 |
+
|
15 |
+
- name: Setup Python
|
16 |
+
uses: actions/setup-python@v4
|
17 |
+
with:
|
18 |
+
python-version: '3.8'
|
19 |
+
|
20 |
+
- name: Notify Slack about the release
|
21 |
+
env:
|
22 |
+
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
23 |
+
run: pip install requests && python utils/notify_slack_about_release.py
|
diffusers/.github/workflows/pr_dependency_test.yml
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Run dependency tests
|
2 |
+
|
3 |
+
on:
|
4 |
+
pull_request:
|
5 |
+
branches:
|
6 |
+
- main
|
7 |
+
paths:
|
8 |
+
- "src/diffusers/**.py"
|
9 |
+
push:
|
10 |
+
branches:
|
11 |
+
- main
|
12 |
+
|
13 |
+
concurrency:
|
14 |
+
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
15 |
+
cancel-in-progress: true
|
16 |
+
|
17 |
+
jobs:
|
18 |
+
check_dependencies:
|
19 |
+
runs-on: ubuntu-22.04
|
20 |
+
steps:
|
21 |
+
- uses: actions/checkout@v3
|
22 |
+
- name: Set up Python
|
23 |
+
uses: actions/setup-python@v4
|
24 |
+
with:
|
25 |
+
python-version: "3.8"
|
26 |
+
- name: Install dependencies
|
27 |
+
run: |
|
28 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
29 |
+
python -m pip install --upgrade pip uv
|
30 |
+
python -m uv pip install -e .
|
31 |
+
python -m uv pip install pytest
|
32 |
+
- name: Check for soft dependencies
|
33 |
+
run: |
|
34 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
35 |
+
pytest tests/others/test_dependencies.py
|
diffusers/.github/workflows/pr_flax_dependency_test.yml
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Run Flax dependency tests
|
2 |
+
|
3 |
+
on:
|
4 |
+
pull_request:
|
5 |
+
branches:
|
6 |
+
- main
|
7 |
+
paths:
|
8 |
+
- "src/diffusers/**.py"
|
9 |
+
push:
|
10 |
+
branches:
|
11 |
+
- main
|
12 |
+
|
13 |
+
concurrency:
|
14 |
+
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
15 |
+
cancel-in-progress: true
|
16 |
+
|
17 |
+
jobs:
|
18 |
+
check_flax_dependencies:
|
19 |
+
runs-on: ubuntu-22.04
|
20 |
+
steps:
|
21 |
+
- uses: actions/checkout@v3
|
22 |
+
- name: Set up Python
|
23 |
+
uses: actions/setup-python@v4
|
24 |
+
with:
|
25 |
+
python-version: "3.8"
|
26 |
+
- name: Install dependencies
|
27 |
+
run: |
|
28 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
29 |
+
python -m pip install --upgrade pip uv
|
30 |
+
python -m uv pip install -e .
|
31 |
+
python -m uv pip install "jax[cpu]>=0.2.16,!=0.3.2"
|
32 |
+
python -m uv pip install "flax>=0.4.1"
|
33 |
+
python -m uv pip install "jaxlib>=0.1.65"
|
34 |
+
python -m uv pip install pytest
|
35 |
+
- name: Check for soft dependencies
|
36 |
+
run: |
|
37 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
38 |
+
pytest tests/others/test_dependencies.py
|
diffusers/.github/workflows/pr_test_fetcher.yml
ADDED
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Fast tests for PRs - Test Fetcher
|
2 |
+
|
3 |
+
on: workflow_dispatch
|
4 |
+
|
5 |
+
env:
|
6 |
+
DIFFUSERS_IS_CI: yes
|
7 |
+
OMP_NUM_THREADS: 4
|
8 |
+
MKL_NUM_THREADS: 4
|
9 |
+
PYTEST_TIMEOUT: 60
|
10 |
+
|
11 |
+
concurrency:
|
12 |
+
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
13 |
+
cancel-in-progress: true
|
14 |
+
|
15 |
+
jobs:
|
16 |
+
setup_pr_tests:
|
17 |
+
name: Setup PR Tests
|
18 |
+
runs-on:
|
19 |
+
group: aws-general-8-plus
|
20 |
+
container:
|
21 |
+
image: diffusers/diffusers-pytorch-cpu
|
22 |
+
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
|
23 |
+
defaults:
|
24 |
+
run:
|
25 |
+
shell: bash
|
26 |
+
outputs:
|
27 |
+
matrix: ${{ steps.set_matrix.outputs.matrix }}
|
28 |
+
test_map: ${{ steps.set_matrix.outputs.test_map }}
|
29 |
+
steps:
|
30 |
+
- name: Checkout diffusers
|
31 |
+
uses: actions/checkout@v3
|
32 |
+
with:
|
33 |
+
fetch-depth: 0
|
34 |
+
- name: Install dependencies
|
35 |
+
run: |
|
36 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
37 |
+
python -m uv pip install -e [quality,test]
|
38 |
+
- name: Environment
|
39 |
+
run: |
|
40 |
+
python utils/print_env.py
|
41 |
+
echo $(git --version)
|
42 |
+
- name: Fetch Tests
|
43 |
+
run: |
|
44 |
+
python utils/tests_fetcher.py | tee test_preparation.txt
|
45 |
+
- name: Report fetched tests
|
46 |
+
uses: actions/upload-artifact@v3
|
47 |
+
with:
|
48 |
+
name: test_fetched
|
49 |
+
path: test_preparation.txt
|
50 |
+
- id: set_matrix
|
51 |
+
name: Create Test Matrix
|
52 |
+
# The `keys` is used as GitHub actions matrix for jobs, i.e. `models`, `pipelines`, etc.
|
53 |
+
# The `test_map` is used to get the actual identified test files under each key.
|
54 |
+
# If no test to run (so no `test_map.json` file), create a dummy map (empty matrix will fail)
|
55 |
+
run: |
|
56 |
+
if [ -f test_map.json ]; then
|
57 |
+
keys=$(python3 -c 'import json; fp = open("test_map.json"); test_map = json.load(fp); fp.close(); d = list(test_map.keys()); print(json.dumps(d))')
|
58 |
+
test_map=$(python3 -c 'import json; fp = open("test_map.json"); test_map = json.load(fp); fp.close(); print(json.dumps(test_map))')
|
59 |
+
else
|
60 |
+
keys=$(python3 -c 'keys = ["dummy"]; print(keys)')
|
61 |
+
test_map=$(python3 -c 'test_map = {"dummy": []}; print(test_map)')
|
62 |
+
fi
|
63 |
+
echo $keys
|
64 |
+
echo $test_map
|
65 |
+
echo "matrix=$keys" >> $GITHUB_OUTPUT
|
66 |
+
echo "test_map=$test_map" >> $GITHUB_OUTPUT
|
67 |
+
|
68 |
+
run_pr_tests:
|
69 |
+
name: Run PR Tests
|
70 |
+
needs: setup_pr_tests
|
71 |
+
if: contains(fromJson(needs.setup_pr_tests.outputs.matrix), 'dummy') != true
|
72 |
+
strategy:
|
73 |
+
fail-fast: false
|
74 |
+
max-parallel: 2
|
75 |
+
matrix:
|
76 |
+
modules: ${{ fromJson(needs.setup_pr_tests.outputs.matrix) }}
|
77 |
+
runs-on:
|
78 |
+
group: aws-general-8-plus
|
79 |
+
container:
|
80 |
+
image: diffusers/diffusers-pytorch-cpu
|
81 |
+
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
|
82 |
+
defaults:
|
83 |
+
run:
|
84 |
+
shell: bash
|
85 |
+
steps:
|
86 |
+
- name: Checkout diffusers
|
87 |
+
uses: actions/checkout@v3
|
88 |
+
with:
|
89 |
+
fetch-depth: 2
|
90 |
+
|
91 |
+
- name: Install dependencies
|
92 |
+
run: |
|
93 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
94 |
+
python -m pip install -e [quality,test]
|
95 |
+
python -m pip install accelerate
|
96 |
+
|
97 |
+
- name: Environment
|
98 |
+
run: |
|
99 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
100 |
+
python utils/print_env.py
|
101 |
+
|
102 |
+
- name: Run all selected tests on CPU
|
103 |
+
run: |
|
104 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
105 |
+
python -m pytest -n 2 --dist=loadfile -v --make-reports=${{ matrix.modules }}_tests_cpu ${{ fromJson(needs.setup_pr_tests.outputs.test_map)[matrix.modules] }}
|
106 |
+
|
107 |
+
- name: Failure short reports
|
108 |
+
if: ${{ failure() }}
|
109 |
+
continue-on-error: true
|
110 |
+
run: |
|
111 |
+
cat reports/${{ matrix.modules }}_tests_cpu_stats.txt
|
112 |
+
cat reports/${{ matrix.modules }}_tests_cpu_failures_short.txt
|
113 |
+
|
114 |
+
- name: Test suite reports artifacts
|
115 |
+
if: ${{ always() }}
|
116 |
+
uses: actions/upload-artifact@v3
|
117 |
+
with:
|
118 |
+
name: ${{ matrix.modules }}_test_reports
|
119 |
+
path: reports
|
120 |
+
|
121 |
+
run_staging_tests:
|
122 |
+
strategy:
|
123 |
+
fail-fast: false
|
124 |
+
matrix:
|
125 |
+
config:
|
126 |
+
- name: Hub tests for models, schedulers, and pipelines
|
127 |
+
framework: hub_tests_pytorch
|
128 |
+
runner: aws-general-8-plus
|
129 |
+
image: diffusers/diffusers-pytorch-cpu
|
130 |
+
report: torch_hub
|
131 |
+
|
132 |
+
name: ${{ matrix.config.name }}
|
133 |
+
runs-on:
|
134 |
+
group: ${{ matrix.config.runner }}
|
135 |
+
container:
|
136 |
+
image: ${{ matrix.config.image }}
|
137 |
+
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
|
138 |
+
|
139 |
+
defaults:
|
140 |
+
run:
|
141 |
+
shell: bash
|
142 |
+
|
143 |
+
steps:
|
144 |
+
- name: Checkout diffusers
|
145 |
+
uses: actions/checkout@v3
|
146 |
+
with:
|
147 |
+
fetch-depth: 2
|
148 |
+
|
149 |
+
- name: Install dependencies
|
150 |
+
run: |
|
151 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
152 |
+
python -m pip install -e [quality,test]
|
153 |
+
|
154 |
+
- name: Environment
|
155 |
+
run: |
|
156 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
157 |
+
python utils/print_env.py
|
158 |
+
|
159 |
+
- name: Run Hub tests for models, schedulers, and pipelines on a staging env
|
160 |
+
if: ${{ matrix.config.framework == 'hub_tests_pytorch' }}
|
161 |
+
run: |
|
162 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
163 |
+
HUGGINGFACE_CO_STAGING=true python -m pytest \
|
164 |
+
-m "is_staging_test" \
|
165 |
+
--make-reports=tests_${{ matrix.config.report }} \
|
166 |
+
tests
|
167 |
+
|
168 |
+
- name: Failure short reports
|
169 |
+
if: ${{ failure() }}
|
170 |
+
run: cat reports/tests_${{ matrix.config.report }}_failures_short.txt
|
171 |
+
|
172 |
+
- name: Test suite reports artifacts
|
173 |
+
if: ${{ always() }}
|
174 |
+
uses: actions/upload-artifact@v4
|
175 |
+
with:
|
176 |
+
name: pr_${{ matrix.config.report }}_test_reports
|
177 |
+
path: reports
|
diffusers/.github/workflows/pr_tests.yml
ADDED
@@ -0,0 +1,301 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Fast tests for PRs
|
2 |
+
|
3 |
+
on:
|
4 |
+
pull_request:
|
5 |
+
branches:
|
6 |
+
- main
|
7 |
+
paths:
|
8 |
+
- "src/diffusers/**.py"
|
9 |
+
- "benchmarks/**.py"
|
10 |
+
- "examples/**.py"
|
11 |
+
- "scripts/**.py"
|
12 |
+
- "tests/**.py"
|
13 |
+
- ".github/**.yml"
|
14 |
+
- "utils/**.py"
|
15 |
+
push:
|
16 |
+
branches:
|
17 |
+
- ci-*
|
18 |
+
|
19 |
+
concurrency:
|
20 |
+
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
21 |
+
cancel-in-progress: true
|
22 |
+
|
23 |
+
env:
|
24 |
+
DIFFUSERS_IS_CI: yes
|
25 |
+
HF_HUB_ENABLE_HF_TRANSFER: 1
|
26 |
+
OMP_NUM_THREADS: 4
|
27 |
+
MKL_NUM_THREADS: 4
|
28 |
+
PYTEST_TIMEOUT: 60
|
29 |
+
|
30 |
+
jobs:
|
31 |
+
check_code_quality:
|
32 |
+
runs-on: ubuntu-22.04
|
33 |
+
steps:
|
34 |
+
- uses: actions/checkout@v3
|
35 |
+
- name: Set up Python
|
36 |
+
uses: actions/setup-python@v4
|
37 |
+
with:
|
38 |
+
python-version: "3.8"
|
39 |
+
- name: Install dependencies
|
40 |
+
run: |
|
41 |
+
python -m pip install --upgrade pip
|
42 |
+
pip install .[quality]
|
43 |
+
- name: Check quality
|
44 |
+
run: make quality
|
45 |
+
- name: Check if failure
|
46 |
+
if: ${{ failure() }}
|
47 |
+
run: |
|
48 |
+
echo "Quality check failed. Please ensure the right dependency versions are installed with 'pip install -e .[quality]' and run 'make style && make quality'" >> $GITHUB_STEP_SUMMARY
|
49 |
+
|
50 |
+
check_repository_consistency:
|
51 |
+
needs: check_code_quality
|
52 |
+
runs-on: ubuntu-22.04
|
53 |
+
steps:
|
54 |
+
- uses: actions/checkout@v3
|
55 |
+
- name: Set up Python
|
56 |
+
uses: actions/setup-python@v4
|
57 |
+
with:
|
58 |
+
python-version: "3.8"
|
59 |
+
- name: Install dependencies
|
60 |
+
run: |
|
61 |
+
python -m pip install --upgrade pip
|
62 |
+
pip install .[quality]
|
63 |
+
- name: Check repo consistency
|
64 |
+
run: |
|
65 |
+
python utils/check_copies.py
|
66 |
+
python utils/check_dummies.py
|
67 |
+
make deps_table_check_updated
|
68 |
+
- name: Check if failure
|
69 |
+
if: ${{ failure() }}
|
70 |
+
run: |
|
71 |
+
echo "Repo consistency check failed. Please ensure the right dependency versions are installed with 'pip install -e .[quality]' and run 'make fix-copies'" >> $GITHUB_STEP_SUMMARY
|
72 |
+
|
73 |
+
run_fast_tests:
|
74 |
+
needs: [check_code_quality, check_repository_consistency]
|
75 |
+
strategy:
|
76 |
+
fail-fast: false
|
77 |
+
matrix:
|
78 |
+
config:
|
79 |
+
- name: Fast PyTorch Pipeline CPU tests
|
80 |
+
framework: pytorch_pipelines
|
81 |
+
runner: aws-highmemory-32-plus
|
82 |
+
image: diffusers/diffusers-pytorch-cpu
|
83 |
+
report: torch_cpu_pipelines
|
84 |
+
- name: Fast PyTorch Models & Schedulers CPU tests
|
85 |
+
framework: pytorch_models
|
86 |
+
runner: aws-general-8-plus
|
87 |
+
image: diffusers/diffusers-pytorch-cpu
|
88 |
+
report: torch_cpu_models_schedulers
|
89 |
+
- name: Fast Flax CPU tests
|
90 |
+
framework: flax
|
91 |
+
runner: aws-general-8-plus
|
92 |
+
image: diffusers/diffusers-flax-cpu
|
93 |
+
report: flax_cpu
|
94 |
+
- name: PyTorch Example CPU tests
|
95 |
+
framework: pytorch_examples
|
96 |
+
runner: aws-general-8-plus
|
97 |
+
image: diffusers/diffusers-pytorch-cpu
|
98 |
+
report: torch_example_cpu
|
99 |
+
|
100 |
+
name: ${{ matrix.config.name }}
|
101 |
+
|
102 |
+
runs-on:
|
103 |
+
group: ${{ matrix.config.runner }}
|
104 |
+
|
105 |
+
container:
|
106 |
+
image: ${{ matrix.config.image }}
|
107 |
+
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
|
108 |
+
|
109 |
+
defaults:
|
110 |
+
run:
|
111 |
+
shell: bash
|
112 |
+
|
113 |
+
steps:
|
114 |
+
- name: Checkout diffusers
|
115 |
+
uses: actions/checkout@v3
|
116 |
+
with:
|
117 |
+
fetch-depth: 2
|
118 |
+
|
119 |
+
- name: Install dependencies
|
120 |
+
run: |
|
121 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
122 |
+
python -m uv pip install -e [quality,test]
|
123 |
+
python -m uv pip install accelerate
|
124 |
+
|
125 |
+
- name: Environment
|
126 |
+
run: |
|
127 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
128 |
+
python utils/print_env.py
|
129 |
+
|
130 |
+
- name: Run fast PyTorch Pipeline CPU tests
|
131 |
+
if: ${{ matrix.config.framework == 'pytorch_pipelines' }}
|
132 |
+
run: |
|
133 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
134 |
+
python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile \
|
135 |
+
-s -v -k "not Flax and not Onnx" \
|
136 |
+
--make-reports=tests_${{ matrix.config.report }} \
|
137 |
+
tests/pipelines
|
138 |
+
|
139 |
+
- name: Run fast PyTorch Model Scheduler CPU tests
|
140 |
+
if: ${{ matrix.config.framework == 'pytorch_models' }}
|
141 |
+
run: |
|
142 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
143 |
+
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
144 |
+
-s -v -k "not Flax and not Onnx and not Dependency" \
|
145 |
+
--make-reports=tests_${{ matrix.config.report }} \
|
146 |
+
tests/models tests/schedulers tests/others
|
147 |
+
|
148 |
+
- name: Run fast Flax TPU tests
|
149 |
+
if: ${{ matrix.config.framework == 'flax' }}
|
150 |
+
run: |
|
151 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
152 |
+
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
153 |
+
-s -v -k "Flax" \
|
154 |
+
--make-reports=tests_${{ matrix.config.report }} \
|
155 |
+
tests
|
156 |
+
|
157 |
+
- name: Run example PyTorch CPU tests
|
158 |
+
if: ${{ matrix.config.framework == 'pytorch_examples' }}
|
159 |
+
run: |
|
160 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
161 |
+
python -m uv pip install peft timm
|
162 |
+
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
163 |
+
--make-reports=tests_${{ matrix.config.report }} \
|
164 |
+
examples
|
165 |
+
|
166 |
+
- name: Failure short reports
|
167 |
+
if: ${{ failure() }}
|
168 |
+
run: cat reports/tests_${{ matrix.config.report }}_failures_short.txt
|
169 |
+
|
170 |
+
- name: Test suite reports artifacts
|
171 |
+
if: ${{ always() }}
|
172 |
+
uses: actions/upload-artifact@v4
|
173 |
+
with:
|
174 |
+
name: pr_${{ matrix.config.framework }}_${{ matrix.config.report }}_test_reports
|
175 |
+
path: reports
|
176 |
+
|
177 |
+
run_staging_tests:
|
178 |
+
needs: [check_code_quality, check_repository_consistency]
|
179 |
+
strategy:
|
180 |
+
fail-fast: false
|
181 |
+
matrix:
|
182 |
+
config:
|
183 |
+
- name: Hub tests for models, schedulers, and pipelines
|
184 |
+
framework: hub_tests_pytorch
|
185 |
+
runner:
|
186 |
+
group: aws-general-8-plus
|
187 |
+
image: diffusers/diffusers-pytorch-cpu
|
188 |
+
report: torch_hub
|
189 |
+
|
190 |
+
name: ${{ matrix.config.name }}
|
191 |
+
|
192 |
+
runs-on: ${{ matrix.config.runner }}
|
193 |
+
|
194 |
+
container:
|
195 |
+
image: ${{ matrix.config.image }}
|
196 |
+
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
|
197 |
+
|
198 |
+
defaults:
|
199 |
+
run:
|
200 |
+
shell: bash
|
201 |
+
|
202 |
+
steps:
|
203 |
+
- name: Checkout diffusers
|
204 |
+
uses: actions/checkout@v3
|
205 |
+
with:
|
206 |
+
fetch-depth: 2
|
207 |
+
|
208 |
+
- name: Install dependencies
|
209 |
+
run: |
|
210 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
211 |
+
python -m uv pip install -e [quality,test]
|
212 |
+
|
213 |
+
- name: Environment
|
214 |
+
run: |
|
215 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
216 |
+
python utils/print_env.py
|
217 |
+
|
218 |
+
- name: Run Hub tests for models, schedulers, and pipelines on a staging env
|
219 |
+
if: ${{ matrix.config.framework == 'hub_tests_pytorch' }}
|
220 |
+
run: |
|
221 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
222 |
+
HUGGINGFACE_CO_STAGING=true python -m pytest \
|
223 |
+
-m "is_staging_test" \
|
224 |
+
--make-reports=tests_${{ matrix.config.report }} \
|
225 |
+
tests
|
226 |
+
|
227 |
+
- name: Failure short reports
|
228 |
+
if: ${{ failure() }}
|
229 |
+
run: cat reports/tests_${{ matrix.config.report }}_failures_short.txt
|
230 |
+
|
231 |
+
- name: Test suite reports artifacts
|
232 |
+
if: ${{ always() }}
|
233 |
+
uses: actions/upload-artifact@v4
|
234 |
+
with:
|
235 |
+
name: pr_${{ matrix.config.report }}_test_reports
|
236 |
+
path: reports
|
237 |
+
|
238 |
+
run_lora_tests:
|
239 |
+
needs: [check_code_quality, check_repository_consistency]
|
240 |
+
strategy:
|
241 |
+
fail-fast: false
|
242 |
+
|
243 |
+
name: LoRA tests with PEFT main
|
244 |
+
|
245 |
+
runs-on:
|
246 |
+
group: aws-general-8-plus
|
247 |
+
|
248 |
+
container:
|
249 |
+
image: diffusers/diffusers-pytorch-cpu
|
250 |
+
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
|
251 |
+
|
252 |
+
defaults:
|
253 |
+
run:
|
254 |
+
shell: bash
|
255 |
+
|
256 |
+
steps:
|
257 |
+
- name: Checkout diffusers
|
258 |
+
uses: actions/checkout@v3
|
259 |
+
with:
|
260 |
+
fetch-depth: 2
|
261 |
+
|
262 |
+
- name: Install dependencies
|
263 |
+
run: |
|
264 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
265 |
+
python -m uv pip install -e [quality,test]
|
266 |
+
# TODO (sayakpaul, DN6): revisit `--no-deps`
|
267 |
+
python -m pip install -U peft@git+https://github.com/huggingface/peft.git --no-deps
|
268 |
+
python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git --no-deps
|
269 |
+
python -m uv pip install -U tokenizers
|
270 |
+
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git --no-deps
|
271 |
+
|
272 |
+
- name: Environment
|
273 |
+
run: |
|
274 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
275 |
+
python utils/print_env.py
|
276 |
+
|
277 |
+
- name: Run fast PyTorch LoRA tests with PEFT
|
278 |
+
run: |
|
279 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
280 |
+
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
281 |
+
-s -v \
|
282 |
+
--make-reports=tests_peft_main \
|
283 |
+
tests/lora/
|
284 |
+
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
285 |
+
-s -v \
|
286 |
+
--make-reports=tests_models_lora_peft_main \
|
287 |
+
tests/models/ -k "lora"
|
288 |
+
|
289 |
+
- name: Failure short reports
|
290 |
+
if: ${{ failure() }}
|
291 |
+
run: |
|
292 |
+
cat reports/tests_lora_failures_short.txt
|
293 |
+
cat reports/tests_models_lora_failures_short.txt
|
294 |
+
|
295 |
+
- name: Test suite reports artifacts
|
296 |
+
if: ${{ always() }}
|
297 |
+
uses: actions/upload-artifact@v4
|
298 |
+
with:
|
299 |
+
name: pr_main_test_reports
|
300 |
+
path: reports
|
301 |
+
|
diffusers/.github/workflows/pr_torch_dependency_test.yml
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Run Torch dependency tests
|
2 |
+
|
3 |
+
on:
|
4 |
+
pull_request:
|
5 |
+
branches:
|
6 |
+
- main
|
7 |
+
paths:
|
8 |
+
- "src/diffusers/**.py"
|
9 |
+
push:
|
10 |
+
branches:
|
11 |
+
- main
|
12 |
+
|
13 |
+
concurrency:
|
14 |
+
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
15 |
+
cancel-in-progress: true
|
16 |
+
|
17 |
+
jobs:
|
18 |
+
check_torch_dependencies:
|
19 |
+
runs-on: ubuntu-22.04
|
20 |
+
steps:
|
21 |
+
- uses: actions/checkout@v3
|
22 |
+
- name: Set up Python
|
23 |
+
uses: actions/setup-python@v4
|
24 |
+
with:
|
25 |
+
python-version: "3.8"
|
26 |
+
- name: Install dependencies
|
27 |
+
run: |
|
28 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
29 |
+
python -m pip install --upgrade pip uv
|
30 |
+
python -m uv pip install -e .
|
31 |
+
python -m uv pip install torch torchvision torchaudio
|
32 |
+
python -m uv pip install pytest
|
33 |
+
- name: Check for soft dependencies
|
34 |
+
run: |
|
35 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
36 |
+
pytest tests/others/test_dependencies.py
|
diffusers/.github/workflows/push_tests.yml
ADDED
@@ -0,0 +1,392 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Fast GPU Tests on main
|
2 |
+
|
3 |
+
on:
|
4 |
+
workflow_dispatch:
|
5 |
+
push:
|
6 |
+
branches:
|
7 |
+
- main
|
8 |
+
paths:
|
9 |
+
- "src/diffusers/**.py"
|
10 |
+
- "examples/**.py"
|
11 |
+
- "tests/**.py"
|
12 |
+
|
13 |
+
env:
|
14 |
+
DIFFUSERS_IS_CI: yes
|
15 |
+
OMP_NUM_THREADS: 8
|
16 |
+
MKL_NUM_THREADS: 8
|
17 |
+
HF_HUB_ENABLE_HF_TRANSFER: 1
|
18 |
+
PYTEST_TIMEOUT: 600
|
19 |
+
PIPELINE_USAGE_CUTOFF: 50000
|
20 |
+
|
21 |
+
jobs:
|
22 |
+
setup_torch_cuda_pipeline_matrix:
|
23 |
+
name: Setup Torch Pipelines CUDA Slow Tests Matrix
|
24 |
+
runs-on:
|
25 |
+
group: aws-general-8-plus
|
26 |
+
container:
|
27 |
+
image: diffusers/diffusers-pytorch-cpu
|
28 |
+
outputs:
|
29 |
+
pipeline_test_matrix: ${{ steps.fetch_pipeline_matrix.outputs.pipeline_test_matrix }}
|
30 |
+
steps:
|
31 |
+
- name: Checkout diffusers
|
32 |
+
uses: actions/checkout@v3
|
33 |
+
with:
|
34 |
+
fetch-depth: 2
|
35 |
+
- name: Install dependencies
|
36 |
+
run: |
|
37 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
38 |
+
python -m uv pip install -e [quality,test]
|
39 |
+
- name: Environment
|
40 |
+
run: |
|
41 |
+
python utils/print_env.py
|
42 |
+
- name: Fetch Pipeline Matrix
|
43 |
+
id: fetch_pipeline_matrix
|
44 |
+
run: |
|
45 |
+
matrix=$(python utils/fetch_torch_cuda_pipeline_test_matrix.py)
|
46 |
+
echo $matrix
|
47 |
+
echo "pipeline_test_matrix=$matrix" >> $GITHUB_OUTPUT
|
48 |
+
- name: Pipeline Tests Artifacts
|
49 |
+
if: ${{ always() }}
|
50 |
+
uses: actions/upload-artifact@v4
|
51 |
+
with:
|
52 |
+
name: test-pipelines.json
|
53 |
+
path: reports
|
54 |
+
|
55 |
+
torch_pipelines_cuda_tests:
|
56 |
+
name: Torch Pipelines CUDA Tests
|
57 |
+
needs: setup_torch_cuda_pipeline_matrix
|
58 |
+
strategy:
|
59 |
+
fail-fast: false
|
60 |
+
max-parallel: 8
|
61 |
+
matrix:
|
62 |
+
module: ${{ fromJson(needs.setup_torch_cuda_pipeline_matrix.outputs.pipeline_test_matrix) }}
|
63 |
+
runs-on:
|
64 |
+
group: aws-g4dn-2xlarge
|
65 |
+
container:
|
66 |
+
image: diffusers/diffusers-pytorch-cuda
|
67 |
+
options: --shm-size "16gb" --ipc host --gpus 0
|
68 |
+
steps:
|
69 |
+
- name: Checkout diffusers
|
70 |
+
uses: actions/checkout@v3
|
71 |
+
with:
|
72 |
+
fetch-depth: 2
|
73 |
+
- name: NVIDIA-SMI
|
74 |
+
run: |
|
75 |
+
nvidia-smi
|
76 |
+
- name: Install dependencies
|
77 |
+
run: |
|
78 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
79 |
+
python -m uv pip install -e [quality,test]
|
80 |
+
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
81 |
+
- name: Environment
|
82 |
+
run: |
|
83 |
+
python utils/print_env.py
|
84 |
+
- name: PyTorch CUDA checkpoint tests on Ubuntu
|
85 |
+
env:
|
86 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
87 |
+
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
88 |
+
CUBLAS_WORKSPACE_CONFIG: :16:8
|
89 |
+
run: |
|
90 |
+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
91 |
+
-s -v -k "not Flax and not Onnx" \
|
92 |
+
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
|
93 |
+
tests/pipelines/${{ matrix.module }}
|
94 |
+
- name: Failure short reports
|
95 |
+
if: ${{ failure() }}
|
96 |
+
run: |
|
97 |
+
cat reports/tests_pipeline_${{ matrix.module }}_cuda_stats.txt
|
98 |
+
cat reports/tests_pipeline_${{ matrix.module }}_cuda_failures_short.txt
|
99 |
+
- name: Test suite reports artifacts
|
100 |
+
if: ${{ always() }}
|
101 |
+
uses: actions/upload-artifact@v4
|
102 |
+
with:
|
103 |
+
name: pipeline_${{ matrix.module }}_test_reports
|
104 |
+
path: reports
|
105 |
+
|
106 |
+
torch_cuda_tests:
|
107 |
+
name: Torch CUDA Tests
|
108 |
+
runs-on:
|
109 |
+
group: aws-g4dn-2xlarge
|
110 |
+
container:
|
111 |
+
image: diffusers/diffusers-pytorch-cuda
|
112 |
+
options: --shm-size "16gb" --ipc host --gpus 0
|
113 |
+
defaults:
|
114 |
+
run:
|
115 |
+
shell: bash
|
116 |
+
strategy:
|
117 |
+
fail-fast: false
|
118 |
+
max-parallel: 2
|
119 |
+
matrix:
|
120 |
+
module: [models, schedulers, lora, others, single_file]
|
121 |
+
steps:
|
122 |
+
- name: Checkout diffusers
|
123 |
+
uses: actions/checkout@v3
|
124 |
+
with:
|
125 |
+
fetch-depth: 2
|
126 |
+
|
127 |
+
- name: Install dependencies
|
128 |
+
run: |
|
129 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
130 |
+
python -m uv pip install -e [quality,test]
|
131 |
+
python -m uv pip install peft@git+https://github.com/huggingface/peft.git
|
132 |
+
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
133 |
+
|
134 |
+
- name: Environment
|
135 |
+
run: |
|
136 |
+
python utils/print_env.py
|
137 |
+
|
138 |
+
- name: Run PyTorch CUDA tests
|
139 |
+
env:
|
140 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
141 |
+
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
142 |
+
CUBLAS_WORKSPACE_CONFIG: :16:8
|
143 |
+
run: |
|
144 |
+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
145 |
+
-s -v -k "not Flax and not Onnx" \
|
146 |
+
--make-reports=tests_torch_cuda_${{ matrix.module }} \
|
147 |
+
tests/${{ matrix.module }}
|
148 |
+
|
149 |
+
- name: Failure short reports
|
150 |
+
if: ${{ failure() }}
|
151 |
+
run: |
|
152 |
+
cat reports/tests_torch_cuda_${{ matrix.module }}_stats.txt
|
153 |
+
cat reports/tests_torch_cuda_${{ matrix.module }}_failures_short.txt
|
154 |
+
|
155 |
+
- name: Test suite reports artifacts
|
156 |
+
if: ${{ always() }}
|
157 |
+
uses: actions/upload-artifact@v4
|
158 |
+
with:
|
159 |
+
name: torch_cuda_test_reports_${{ matrix.module }}
|
160 |
+
path: reports
|
161 |
+
|
162 |
+
flax_tpu_tests:
|
163 |
+
name: Flax TPU Tests
|
164 |
+
runs-on:
|
165 |
+
group: gcp-ct5lp-hightpu-8t
|
166 |
+
container:
|
167 |
+
image: diffusers/diffusers-flax-tpu
|
168 |
+
options: --shm-size "16gb" --ipc host --privileged ${{ vars.V5_LITEPOD_8_ENV}} -v /mnt/hf_cache:/mnt/hf_cache
|
169 |
+
defaults:
|
170 |
+
run:
|
171 |
+
shell: bash
|
172 |
+
steps:
|
173 |
+
- name: Checkout diffusers
|
174 |
+
uses: actions/checkout@v3
|
175 |
+
with:
|
176 |
+
fetch-depth: 2
|
177 |
+
|
178 |
+
- name: Install dependencies
|
179 |
+
run: |
|
180 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
181 |
+
python -m uv pip install -e [quality,test]
|
182 |
+
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
183 |
+
|
184 |
+
- name: Environment
|
185 |
+
run: |
|
186 |
+
python utils/print_env.py
|
187 |
+
|
188 |
+
- name: Run Flax TPU tests
|
189 |
+
env:
|
190 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
191 |
+
run: |
|
192 |
+
python -m pytest -n 0 \
|
193 |
+
-s -v -k "Flax" \
|
194 |
+
--make-reports=tests_flax_tpu \
|
195 |
+
tests/
|
196 |
+
|
197 |
+
- name: Failure short reports
|
198 |
+
if: ${{ failure() }}
|
199 |
+
run: |
|
200 |
+
cat reports/tests_flax_tpu_stats.txt
|
201 |
+
cat reports/tests_flax_tpu_failures_short.txt
|
202 |
+
|
203 |
+
- name: Test suite reports artifacts
|
204 |
+
if: ${{ always() }}
|
205 |
+
uses: actions/upload-artifact@v4
|
206 |
+
with:
|
207 |
+
name: flax_tpu_test_reports
|
208 |
+
path: reports
|
209 |
+
|
210 |
+
onnx_cuda_tests:
|
211 |
+
name: ONNX CUDA Tests
|
212 |
+
runs-on:
|
213 |
+
group: aws-g4dn-2xlarge
|
214 |
+
container:
|
215 |
+
image: diffusers/diffusers-onnxruntime-cuda
|
216 |
+
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --gpus 0
|
217 |
+
defaults:
|
218 |
+
run:
|
219 |
+
shell: bash
|
220 |
+
steps:
|
221 |
+
- name: Checkout diffusers
|
222 |
+
uses: actions/checkout@v3
|
223 |
+
with:
|
224 |
+
fetch-depth: 2
|
225 |
+
|
226 |
+
- name: Install dependencies
|
227 |
+
run: |
|
228 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
229 |
+
python -m uv pip install -e [quality,test]
|
230 |
+
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
231 |
+
|
232 |
+
- name: Environment
|
233 |
+
run: |
|
234 |
+
python utils/print_env.py
|
235 |
+
|
236 |
+
- name: Run ONNXRuntime CUDA tests
|
237 |
+
env:
|
238 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
239 |
+
run: |
|
240 |
+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
241 |
+
-s -v -k "Onnx" \
|
242 |
+
--make-reports=tests_onnx_cuda \
|
243 |
+
tests/
|
244 |
+
|
245 |
+
- name: Failure short reports
|
246 |
+
if: ${{ failure() }}
|
247 |
+
run: |
|
248 |
+
cat reports/tests_onnx_cuda_stats.txt
|
249 |
+
cat reports/tests_onnx_cuda_failures_short.txt
|
250 |
+
|
251 |
+
- name: Test suite reports artifacts
|
252 |
+
if: ${{ always() }}
|
253 |
+
uses: actions/upload-artifact@v4
|
254 |
+
with:
|
255 |
+
name: onnx_cuda_test_reports
|
256 |
+
path: reports
|
257 |
+
|
258 |
+
run_torch_compile_tests:
|
259 |
+
name: PyTorch Compile CUDA tests
|
260 |
+
|
261 |
+
runs-on:
|
262 |
+
group: aws-g4dn-2xlarge
|
263 |
+
|
264 |
+
container:
|
265 |
+
image: diffusers/diffusers-pytorch-compile-cuda
|
266 |
+
options: --gpus 0 --shm-size "16gb" --ipc host
|
267 |
+
|
268 |
+
steps:
|
269 |
+
- name: Checkout diffusers
|
270 |
+
uses: actions/checkout@v3
|
271 |
+
with:
|
272 |
+
fetch-depth: 2
|
273 |
+
|
274 |
+
- name: NVIDIA-SMI
|
275 |
+
run: |
|
276 |
+
nvidia-smi
|
277 |
+
- name: Install dependencies
|
278 |
+
run: |
|
279 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
280 |
+
python -m uv pip install -e [quality,test,training]
|
281 |
+
- name: Environment
|
282 |
+
run: |
|
283 |
+
python utils/print_env.py
|
284 |
+
- name: Run example tests on GPU
|
285 |
+
env:
|
286 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
287 |
+
RUN_COMPILE: yes
|
288 |
+
run: |
|
289 |
+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "compile" --make-reports=tests_torch_compile_cuda tests/
|
290 |
+
- name: Failure short reports
|
291 |
+
if: ${{ failure() }}
|
292 |
+
run: cat reports/tests_torch_compile_cuda_failures_short.txt
|
293 |
+
|
294 |
+
- name: Test suite reports artifacts
|
295 |
+
if: ${{ always() }}
|
296 |
+
uses: actions/upload-artifact@v4
|
297 |
+
with:
|
298 |
+
name: torch_compile_test_reports
|
299 |
+
path: reports
|
300 |
+
|
301 |
+
run_xformers_tests:
|
302 |
+
name: PyTorch xformers CUDA tests
|
303 |
+
|
304 |
+
runs-on:
|
305 |
+
group: aws-g4dn-2xlarge
|
306 |
+
|
307 |
+
container:
|
308 |
+
image: diffusers/diffusers-pytorch-xformers-cuda
|
309 |
+
options: --gpus 0 --shm-size "16gb" --ipc host
|
310 |
+
|
311 |
+
steps:
|
312 |
+
- name: Checkout diffusers
|
313 |
+
uses: actions/checkout@v3
|
314 |
+
with:
|
315 |
+
fetch-depth: 2
|
316 |
+
|
317 |
+
- name: NVIDIA-SMI
|
318 |
+
run: |
|
319 |
+
nvidia-smi
|
320 |
+
- name: Install dependencies
|
321 |
+
run: |
|
322 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
323 |
+
python -m uv pip install -e [quality,test,training]
|
324 |
+
- name: Environment
|
325 |
+
run: |
|
326 |
+
python utils/print_env.py
|
327 |
+
- name: Run example tests on GPU
|
328 |
+
env:
|
329 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
330 |
+
run: |
|
331 |
+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "xformers" --make-reports=tests_torch_xformers_cuda tests/
|
332 |
+
- name: Failure short reports
|
333 |
+
if: ${{ failure() }}
|
334 |
+
run: cat reports/tests_torch_xformers_cuda_failures_short.txt
|
335 |
+
|
336 |
+
- name: Test suite reports artifacts
|
337 |
+
if: ${{ always() }}
|
338 |
+
uses: actions/upload-artifact@v4
|
339 |
+
with:
|
340 |
+
name: torch_xformers_test_reports
|
341 |
+
path: reports
|
342 |
+
|
343 |
+
run_examples_tests:
|
344 |
+
name: Examples PyTorch CUDA tests on Ubuntu
|
345 |
+
|
346 |
+
runs-on:
|
347 |
+
group: aws-g4dn-2xlarge
|
348 |
+
|
349 |
+
container:
|
350 |
+
image: diffusers/diffusers-pytorch-cuda
|
351 |
+
options: --gpus 0 --shm-size "16gb" --ipc host
|
352 |
+
|
353 |
+
steps:
|
354 |
+
- name: Checkout diffusers
|
355 |
+
uses: actions/checkout@v3
|
356 |
+
with:
|
357 |
+
fetch-depth: 2
|
358 |
+
|
359 |
+
- name: NVIDIA-SMI
|
360 |
+
run: |
|
361 |
+
nvidia-smi
|
362 |
+
|
363 |
+
- name: Install dependencies
|
364 |
+
run: |
|
365 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
366 |
+
python -m uv pip install -e [quality,test,training]
|
367 |
+
|
368 |
+
- name: Environment
|
369 |
+
run: |
|
370 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
371 |
+
python utils/print_env.py
|
372 |
+
|
373 |
+
- name: Run example tests on GPU
|
374 |
+
env:
|
375 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
376 |
+
run: |
|
377 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
378 |
+
python -m uv pip install timm
|
379 |
+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v --make-reports=examples_torch_cuda examples/
|
380 |
+
|
381 |
+
- name: Failure short reports
|
382 |
+
if: ${{ failure() }}
|
383 |
+
run: |
|
384 |
+
cat reports/examples_torch_cuda_stats.txt
|
385 |
+
cat reports/examples_torch_cuda_failures_short.txt
|
386 |
+
|
387 |
+
- name: Test suite reports artifacts
|
388 |
+
if: ${{ always() }}
|
389 |
+
uses: actions/upload-artifact@v4
|
390 |
+
with:
|
391 |
+
name: examples_test_reports
|
392 |
+
path: reports
|
diffusers/.github/workflows/push_tests_fast.yml
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Fast tests on main
|
2 |
+
|
3 |
+
on:
|
4 |
+
push:
|
5 |
+
branches:
|
6 |
+
- main
|
7 |
+
paths:
|
8 |
+
- "src/diffusers/**.py"
|
9 |
+
- "examples/**.py"
|
10 |
+
- "tests/**.py"
|
11 |
+
|
12 |
+
concurrency:
|
13 |
+
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
14 |
+
cancel-in-progress: true
|
15 |
+
|
16 |
+
env:
|
17 |
+
DIFFUSERS_IS_CI: yes
|
18 |
+
HF_HOME: /mnt/cache
|
19 |
+
OMP_NUM_THREADS: 8
|
20 |
+
MKL_NUM_THREADS: 8
|
21 |
+
HF_HUB_ENABLE_HF_TRANSFER: 1
|
22 |
+
PYTEST_TIMEOUT: 600
|
23 |
+
RUN_SLOW: no
|
24 |
+
|
25 |
+
jobs:
|
26 |
+
run_fast_tests:
|
27 |
+
strategy:
|
28 |
+
fail-fast: false
|
29 |
+
matrix:
|
30 |
+
config:
|
31 |
+
- name: Fast PyTorch CPU tests on Ubuntu
|
32 |
+
framework: pytorch
|
33 |
+
runner: aws-general-8-plus
|
34 |
+
image: diffusers/diffusers-pytorch-cpu
|
35 |
+
report: torch_cpu
|
36 |
+
- name: Fast Flax CPU tests on Ubuntu
|
37 |
+
framework: flax
|
38 |
+
runner: aws-general-8-plus
|
39 |
+
image: diffusers/diffusers-flax-cpu
|
40 |
+
report: flax_cpu
|
41 |
+
- name: Fast ONNXRuntime CPU tests on Ubuntu
|
42 |
+
framework: onnxruntime
|
43 |
+
runner: aws-general-8-plus
|
44 |
+
image: diffusers/diffusers-onnxruntime-cpu
|
45 |
+
report: onnx_cpu
|
46 |
+
- name: PyTorch Example CPU tests on Ubuntu
|
47 |
+
framework: pytorch_examples
|
48 |
+
runner: aws-general-8-plus
|
49 |
+
image: diffusers/diffusers-pytorch-cpu
|
50 |
+
report: torch_example_cpu
|
51 |
+
|
52 |
+
name: ${{ matrix.config.name }}
|
53 |
+
|
54 |
+
runs-on:
|
55 |
+
group: ${{ matrix.config.runner }}
|
56 |
+
|
57 |
+
container:
|
58 |
+
image: ${{ matrix.config.image }}
|
59 |
+
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
|
60 |
+
|
61 |
+
defaults:
|
62 |
+
run:
|
63 |
+
shell: bash
|
64 |
+
|
65 |
+
steps:
|
66 |
+
- name: Checkout diffusers
|
67 |
+
uses: actions/checkout@v3
|
68 |
+
with:
|
69 |
+
fetch-depth: 2
|
70 |
+
|
71 |
+
- name: Install dependencies
|
72 |
+
run: |
|
73 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
74 |
+
python -m uv pip install -e [quality,test]
|
75 |
+
|
76 |
+
- name: Environment
|
77 |
+
run: |
|
78 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
79 |
+
python utils/print_env.py
|
80 |
+
|
81 |
+
- name: Run fast PyTorch CPU tests
|
82 |
+
if: ${{ matrix.config.framework == 'pytorch' }}
|
83 |
+
run: |
|
84 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
85 |
+
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
86 |
+
-s -v -k "not Flax and not Onnx" \
|
87 |
+
--make-reports=tests_${{ matrix.config.report }} \
|
88 |
+
tests/
|
89 |
+
|
90 |
+
- name: Run fast Flax TPU tests
|
91 |
+
if: ${{ matrix.config.framework == 'flax' }}
|
92 |
+
run: |
|
93 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
94 |
+
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
95 |
+
-s -v -k "Flax" \
|
96 |
+
--make-reports=tests_${{ matrix.config.report }} \
|
97 |
+
tests/
|
98 |
+
|
99 |
+
- name: Run fast ONNXRuntime CPU tests
|
100 |
+
if: ${{ matrix.config.framework == 'onnxruntime' }}
|
101 |
+
run: |
|
102 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
103 |
+
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
104 |
+
-s -v -k "Onnx" \
|
105 |
+
--make-reports=tests_${{ matrix.config.report }} \
|
106 |
+
tests/
|
107 |
+
|
108 |
+
- name: Run example PyTorch CPU tests
|
109 |
+
if: ${{ matrix.config.framework == 'pytorch_examples' }}
|
110 |
+
run: |
|
111 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
112 |
+
python -m uv pip install peft timm
|
113 |
+
python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
|
114 |
+
--make-reports=tests_${{ matrix.config.report }} \
|
115 |
+
examples
|
116 |
+
|
117 |
+
- name: Failure short reports
|
118 |
+
if: ${{ failure() }}
|
119 |
+
run: cat reports/tests_${{ matrix.config.report }}_failures_short.txt
|
120 |
+
|
121 |
+
- name: Test suite reports artifacts
|
122 |
+
if: ${{ always() }}
|
123 |
+
uses: actions/upload-artifact@v4
|
124 |
+
with:
|
125 |
+
name: pr_${{ matrix.config.report }}_test_reports
|
126 |
+
path: reports
|
diffusers/.github/workflows/push_tests_mps.yml
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Fast mps tests on main
|
2 |
+
|
3 |
+
on:
|
4 |
+
push:
|
5 |
+
branches:
|
6 |
+
- main
|
7 |
+
paths:
|
8 |
+
- "src/diffusers/**.py"
|
9 |
+
- "tests/**.py"
|
10 |
+
|
11 |
+
env:
|
12 |
+
DIFFUSERS_IS_CI: yes
|
13 |
+
HF_HOME: /mnt/cache
|
14 |
+
OMP_NUM_THREADS: 8
|
15 |
+
MKL_NUM_THREADS: 8
|
16 |
+
HF_HUB_ENABLE_HF_TRANSFER: 1
|
17 |
+
PYTEST_TIMEOUT: 600
|
18 |
+
RUN_SLOW: no
|
19 |
+
|
20 |
+
concurrency:
|
21 |
+
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
22 |
+
cancel-in-progress: true
|
23 |
+
|
24 |
+
jobs:
|
25 |
+
run_fast_tests_apple_m1:
|
26 |
+
name: Fast PyTorch MPS tests on MacOS
|
27 |
+
runs-on: macos-13-xlarge
|
28 |
+
|
29 |
+
steps:
|
30 |
+
- name: Checkout diffusers
|
31 |
+
uses: actions/checkout@v3
|
32 |
+
with:
|
33 |
+
fetch-depth: 2
|
34 |
+
|
35 |
+
- name: Clean checkout
|
36 |
+
shell: arch -arch arm64 bash {0}
|
37 |
+
run: |
|
38 |
+
git clean -fxd
|
39 |
+
|
40 |
+
- name: Setup miniconda
|
41 |
+
uses: ./.github/actions/setup-miniconda
|
42 |
+
with:
|
43 |
+
python-version: 3.9
|
44 |
+
|
45 |
+
- name: Install dependencies
|
46 |
+
shell: arch -arch arm64 bash {0}
|
47 |
+
run: |
|
48 |
+
${CONDA_RUN} python -m pip install --upgrade pip uv
|
49 |
+
${CONDA_RUN} python -m uv pip install -e ".[quality,test]"
|
50 |
+
${CONDA_RUN} python -m uv pip install torch torchvision torchaudio
|
51 |
+
${CONDA_RUN} python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate.git
|
52 |
+
${CONDA_RUN} python -m uv pip install transformers --upgrade
|
53 |
+
|
54 |
+
- name: Environment
|
55 |
+
shell: arch -arch arm64 bash {0}
|
56 |
+
run: |
|
57 |
+
${CONDA_RUN} python utils/print_env.py
|
58 |
+
|
59 |
+
- name: Run fast PyTorch tests on M1 (MPS)
|
60 |
+
shell: arch -arch arm64 bash {0}
|
61 |
+
env:
|
62 |
+
HF_HOME: /System/Volumes/Data/mnt/cache
|
63 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
64 |
+
run: |
|
65 |
+
${CONDA_RUN} python -m pytest -n 0 -s -v --make-reports=tests_torch_mps tests/
|
66 |
+
|
67 |
+
- name: Failure short reports
|
68 |
+
if: ${{ failure() }}
|
69 |
+
run: cat reports/tests_torch_mps_failures_short.txt
|
70 |
+
|
71 |
+
- name: Test suite reports artifacts
|
72 |
+
if: ${{ always() }}
|
73 |
+
uses: actions/upload-artifact@v4
|
74 |
+
with:
|
75 |
+
name: pr_torch_mps_test_reports
|
76 |
+
path: reports
|
diffusers/.github/workflows/pypi_publish.yaml
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Adapted from https://blog.deepjyoti30.dev/pypi-release-github-action
|
2 |
+
|
3 |
+
name: PyPI release
|
4 |
+
|
5 |
+
on:
|
6 |
+
workflow_dispatch:
|
7 |
+
push:
|
8 |
+
tags:
|
9 |
+
- "*"
|
10 |
+
|
11 |
+
jobs:
|
12 |
+
find-and-checkout-latest-branch:
|
13 |
+
runs-on: ubuntu-22.04
|
14 |
+
outputs:
|
15 |
+
latest_branch: ${{ steps.set_latest_branch.outputs.latest_branch }}
|
16 |
+
steps:
|
17 |
+
- name: Checkout Repo
|
18 |
+
uses: actions/checkout@v3
|
19 |
+
|
20 |
+
- name: Set up Python
|
21 |
+
uses: actions/setup-python@v4
|
22 |
+
with:
|
23 |
+
python-version: '3.8'
|
24 |
+
|
25 |
+
- name: Fetch latest branch
|
26 |
+
id: fetch_latest_branch
|
27 |
+
run: |
|
28 |
+
pip install -U requests packaging
|
29 |
+
LATEST_BRANCH=$(python utils/fetch_latest_release_branch.py)
|
30 |
+
echo "Latest branch: $LATEST_BRANCH"
|
31 |
+
echo "latest_branch=$LATEST_BRANCH" >> $GITHUB_ENV
|
32 |
+
|
33 |
+
- name: Set latest branch output
|
34 |
+
id: set_latest_branch
|
35 |
+
run: echo "::set-output name=latest_branch::${{ env.latest_branch }}"
|
36 |
+
|
37 |
+
release:
|
38 |
+
needs: find-and-checkout-latest-branch
|
39 |
+
runs-on: ubuntu-22.04
|
40 |
+
|
41 |
+
steps:
|
42 |
+
- name: Checkout Repo
|
43 |
+
uses: actions/checkout@v3
|
44 |
+
with:
|
45 |
+
ref: ${{ needs.find-and-checkout-latest-branch.outputs.latest_branch }}
|
46 |
+
|
47 |
+
- name: Setup Python
|
48 |
+
uses: actions/setup-python@v4
|
49 |
+
with:
|
50 |
+
python-version: "3.8"
|
51 |
+
|
52 |
+
- name: Install dependencies
|
53 |
+
run: |
|
54 |
+
python -m pip install --upgrade pip
|
55 |
+
pip install -U setuptools wheel twine
|
56 |
+
pip install -U torch --index-url https://download.pytorch.org/whl/cpu
|
57 |
+
pip install -U transformers
|
58 |
+
|
59 |
+
- name: Build the dist files
|
60 |
+
run: python setup.py bdist_wheel && python setup.py sdist
|
61 |
+
|
62 |
+
- name: Publish to the test PyPI
|
63 |
+
env:
|
64 |
+
TWINE_USERNAME: ${{ secrets.TEST_PYPI_USERNAME }}
|
65 |
+
TWINE_PASSWORD: ${{ secrets.TEST_PYPI_PASSWORD }}
|
66 |
+
run: twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/
|
67 |
+
|
68 |
+
- name: Test installing diffusers and importing
|
69 |
+
run: |
|
70 |
+
pip install diffusers && pip uninstall diffusers -y
|
71 |
+
pip install -i https://test.pypi.org/simple/ diffusers
|
72 |
+
python -c "from diffusers import __version__; print(__version__)"
|
73 |
+
python -c "from diffusers import DiffusionPipeline; pipe = DiffusionPipeline.from_pretrained('fusing/unet-ldm-dummy-update'); pipe()"
|
74 |
+
python -c "from diffusers import DiffusionPipeline; pipe = DiffusionPipeline.from_pretrained('hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=None); pipe('ah suh du')"
|
75 |
+
python -c "from diffusers import *"
|
76 |
+
|
77 |
+
- name: Publish to PyPI
|
78 |
+
env:
|
79 |
+
TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }}
|
80 |
+
TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
|
81 |
+
run: twine upload dist/* -r pypi
|
diffusers/.github/workflows/release_tests_fast.yml
ADDED
@@ -0,0 +1,446 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Duplicate workflow to push_tests.yml that is meant to run on release/patch branches as a final check
|
2 |
+
# Creating a duplicate workflow here is simpler than adding complex path/branch parsing logic to push_tests.yml
|
3 |
+
# Needs to be updated if push_tests.yml updated
|
4 |
+
name: (Release) Fast GPU Tests on main
|
5 |
+
|
6 |
+
on:
|
7 |
+
push:
|
8 |
+
branches:
|
9 |
+
- "v*.*.*-release"
|
10 |
+
- "v*.*.*-patch"
|
11 |
+
|
12 |
+
env:
|
13 |
+
DIFFUSERS_IS_CI: yes
|
14 |
+
OMP_NUM_THREADS: 8
|
15 |
+
MKL_NUM_THREADS: 8
|
16 |
+
PYTEST_TIMEOUT: 600
|
17 |
+
PIPELINE_USAGE_CUTOFF: 50000
|
18 |
+
|
19 |
+
jobs:
|
20 |
+
setup_torch_cuda_pipeline_matrix:
|
21 |
+
name: Setup Torch Pipelines CUDA Slow Tests Matrix
|
22 |
+
runs-on:
|
23 |
+
group: aws-general-8-plus
|
24 |
+
container:
|
25 |
+
image: diffusers/diffusers-pytorch-cpu
|
26 |
+
outputs:
|
27 |
+
pipeline_test_matrix: ${{ steps.fetch_pipeline_matrix.outputs.pipeline_test_matrix }}
|
28 |
+
steps:
|
29 |
+
- name: Checkout diffusers
|
30 |
+
uses: actions/checkout@v3
|
31 |
+
with:
|
32 |
+
fetch-depth: 2
|
33 |
+
- name: Install dependencies
|
34 |
+
run: |
|
35 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
36 |
+
python -m uv pip install -e [quality,test]
|
37 |
+
- name: Environment
|
38 |
+
run: |
|
39 |
+
python utils/print_env.py
|
40 |
+
- name: Fetch Pipeline Matrix
|
41 |
+
id: fetch_pipeline_matrix
|
42 |
+
run: |
|
43 |
+
matrix=$(python utils/fetch_torch_cuda_pipeline_test_matrix.py)
|
44 |
+
echo $matrix
|
45 |
+
echo "pipeline_test_matrix=$matrix" >> $GITHUB_OUTPUT
|
46 |
+
- name: Pipeline Tests Artifacts
|
47 |
+
if: ${{ always() }}
|
48 |
+
uses: actions/upload-artifact@v4
|
49 |
+
with:
|
50 |
+
name: test-pipelines.json
|
51 |
+
path: reports
|
52 |
+
|
53 |
+
torch_pipelines_cuda_tests:
|
54 |
+
name: Torch Pipelines CUDA Tests
|
55 |
+
needs: setup_torch_cuda_pipeline_matrix
|
56 |
+
strategy:
|
57 |
+
fail-fast: false
|
58 |
+
max-parallel: 8
|
59 |
+
matrix:
|
60 |
+
module: ${{ fromJson(needs.setup_torch_cuda_pipeline_matrix.outputs.pipeline_test_matrix) }}
|
61 |
+
runs-on:
|
62 |
+
group: aws-g4dn-2xlarge
|
63 |
+
container:
|
64 |
+
image: diffusers/diffusers-pytorch-cuda
|
65 |
+
options: --shm-size "16gb" --ipc host --gpus 0
|
66 |
+
steps:
|
67 |
+
- name: Checkout diffusers
|
68 |
+
uses: actions/checkout@v3
|
69 |
+
with:
|
70 |
+
fetch-depth: 2
|
71 |
+
- name: NVIDIA-SMI
|
72 |
+
run: |
|
73 |
+
nvidia-smi
|
74 |
+
- name: Install dependencies
|
75 |
+
run: |
|
76 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
77 |
+
python -m uv pip install -e [quality,test]
|
78 |
+
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
79 |
+
- name: Environment
|
80 |
+
run: |
|
81 |
+
python utils/print_env.py
|
82 |
+
- name: Slow PyTorch CUDA checkpoint tests on Ubuntu
|
83 |
+
env:
|
84 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
85 |
+
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
86 |
+
CUBLAS_WORKSPACE_CONFIG: :16:8
|
87 |
+
run: |
|
88 |
+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
89 |
+
-s -v -k "not Flax and not Onnx" \
|
90 |
+
--make-reports=tests_pipeline_${{ matrix.module }}_cuda \
|
91 |
+
tests/pipelines/${{ matrix.module }}
|
92 |
+
- name: Failure short reports
|
93 |
+
if: ${{ failure() }}
|
94 |
+
run: |
|
95 |
+
cat reports/tests_pipeline_${{ matrix.module }}_cuda_stats.txt
|
96 |
+
cat reports/tests_pipeline_${{ matrix.module }}_cuda_failures_short.txt
|
97 |
+
- name: Test suite reports artifacts
|
98 |
+
if: ${{ always() }}
|
99 |
+
uses: actions/upload-artifact@v4
|
100 |
+
with:
|
101 |
+
name: pipeline_${{ matrix.module }}_test_reports
|
102 |
+
path: reports
|
103 |
+
|
104 |
+
torch_cuda_tests:
|
105 |
+
name: Torch CUDA Tests
|
106 |
+
runs-on:
|
107 |
+
group: aws-g4dn-2xlarge
|
108 |
+
container:
|
109 |
+
image: diffusers/diffusers-pytorch-cuda
|
110 |
+
options: --shm-size "16gb" --ipc host --gpus 0
|
111 |
+
defaults:
|
112 |
+
run:
|
113 |
+
shell: bash
|
114 |
+
strategy:
|
115 |
+
fail-fast: false
|
116 |
+
max-parallel: 2
|
117 |
+
matrix:
|
118 |
+
module: [models, schedulers, lora, others, single_file]
|
119 |
+
steps:
|
120 |
+
- name: Checkout diffusers
|
121 |
+
uses: actions/checkout@v3
|
122 |
+
with:
|
123 |
+
fetch-depth: 2
|
124 |
+
|
125 |
+
- name: Install dependencies
|
126 |
+
run: |
|
127 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
128 |
+
python -m uv pip install -e [quality,test]
|
129 |
+
python -m uv pip install peft@git+https://github.com/huggingface/peft.git
|
130 |
+
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
131 |
+
|
132 |
+
- name: Environment
|
133 |
+
run: |
|
134 |
+
python utils/print_env.py
|
135 |
+
|
136 |
+
- name: Run PyTorch CUDA tests
|
137 |
+
env:
|
138 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
139 |
+
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
140 |
+
CUBLAS_WORKSPACE_CONFIG: :16:8
|
141 |
+
run: |
|
142 |
+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
143 |
+
-s -v -k "not Flax and not Onnx" \
|
144 |
+
--make-reports=tests_torch_${{ matrix.module }}_cuda \
|
145 |
+
tests/${{ matrix.module }}
|
146 |
+
|
147 |
+
- name: Failure short reports
|
148 |
+
if: ${{ failure() }}
|
149 |
+
run: |
|
150 |
+
cat reports/tests_torch_${{ matrix.module }}_cuda_stats.txt
|
151 |
+
cat reports/tests_torch_${{ matrix.module }}_cuda_failures_short.txt
|
152 |
+
|
153 |
+
- name: Test suite reports artifacts
|
154 |
+
if: ${{ always() }}
|
155 |
+
uses: actions/upload-artifact@v4
|
156 |
+
with:
|
157 |
+
name: torch_cuda_${{ matrix.module }}_test_reports
|
158 |
+
path: reports
|
159 |
+
|
160 |
+
torch_minimum_version_cuda_tests:
|
161 |
+
name: Torch Minimum Version CUDA Tests
|
162 |
+
runs-on:
|
163 |
+
group: aws-g4dn-2xlarge
|
164 |
+
container:
|
165 |
+
image: diffusers/diffusers-pytorch-minimum-cuda
|
166 |
+
options: --shm-size "16gb" --ipc host --gpus 0
|
167 |
+
defaults:
|
168 |
+
run:
|
169 |
+
shell: bash
|
170 |
+
steps:
|
171 |
+
- name: Checkout diffusers
|
172 |
+
uses: actions/checkout@v3
|
173 |
+
with:
|
174 |
+
fetch-depth: 2
|
175 |
+
|
176 |
+
- name: Install dependencies
|
177 |
+
run: |
|
178 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
179 |
+
python -m uv pip install -e [quality,test]
|
180 |
+
python -m uv pip install peft@git+https://github.com/huggingface/peft.git
|
181 |
+
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
182 |
+
|
183 |
+
- name: Environment
|
184 |
+
run: |
|
185 |
+
python utils/print_env.py
|
186 |
+
|
187 |
+
- name: Run PyTorch CUDA tests
|
188 |
+
env:
|
189 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
190 |
+
# https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
|
191 |
+
CUBLAS_WORKSPACE_CONFIG: :16:8
|
192 |
+
run: |
|
193 |
+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
194 |
+
-s -v -k "not Flax and not Onnx" \
|
195 |
+
--make-reports=tests_torch_minimum_cuda \
|
196 |
+
tests/models/test_modeling_common.py \
|
197 |
+
tests/pipelines/test_pipelines_common.py \
|
198 |
+
tests/pipelines/test_pipeline_utils.py \
|
199 |
+
tests/pipelines/test_pipelines.py \
|
200 |
+
tests/pipelines/test_pipelines_auto.py \
|
201 |
+
tests/schedulers/test_schedulers.py \
|
202 |
+
tests/others
|
203 |
+
|
204 |
+
- name: Failure short reports
|
205 |
+
if: ${{ failure() }}
|
206 |
+
run: |
|
207 |
+
cat reports/tests_torch_minimum_version_cuda_stats.txt
|
208 |
+
cat reports/tests_torch_minimum_version_cuda_failures_short.txt
|
209 |
+
|
210 |
+
- name: Test suite reports artifacts
|
211 |
+
if: ${{ always() }}
|
212 |
+
uses: actions/upload-artifact@v4
|
213 |
+
with:
|
214 |
+
name: torch_minimum_version_cuda_test_reports
|
215 |
+
path: reports
|
216 |
+
|
217 |
+
flax_tpu_tests:
|
218 |
+
name: Flax TPU Tests
|
219 |
+
runs-on: docker-tpu
|
220 |
+
container:
|
221 |
+
image: diffusers/diffusers-flax-tpu
|
222 |
+
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --privileged
|
223 |
+
defaults:
|
224 |
+
run:
|
225 |
+
shell: bash
|
226 |
+
steps:
|
227 |
+
- name: Checkout diffusers
|
228 |
+
uses: actions/checkout@v3
|
229 |
+
with:
|
230 |
+
fetch-depth: 2
|
231 |
+
|
232 |
+
- name: Install dependencies
|
233 |
+
run: |
|
234 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
235 |
+
python -m uv pip install -e [quality,test]
|
236 |
+
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
237 |
+
|
238 |
+
- name: Environment
|
239 |
+
run: |
|
240 |
+
python utils/print_env.py
|
241 |
+
|
242 |
+
- name: Run slow Flax TPU tests
|
243 |
+
env:
|
244 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
245 |
+
run: |
|
246 |
+
python -m pytest -n 0 \
|
247 |
+
-s -v -k "Flax" \
|
248 |
+
--make-reports=tests_flax_tpu \
|
249 |
+
tests/
|
250 |
+
|
251 |
+
- name: Failure short reports
|
252 |
+
if: ${{ failure() }}
|
253 |
+
run: |
|
254 |
+
cat reports/tests_flax_tpu_stats.txt
|
255 |
+
cat reports/tests_flax_tpu_failures_short.txt
|
256 |
+
|
257 |
+
- name: Test suite reports artifacts
|
258 |
+
if: ${{ always() }}
|
259 |
+
uses: actions/upload-artifact@v4
|
260 |
+
with:
|
261 |
+
name: flax_tpu_test_reports
|
262 |
+
path: reports
|
263 |
+
|
264 |
+
onnx_cuda_tests:
|
265 |
+
name: ONNX CUDA Tests
|
266 |
+
runs-on:
|
267 |
+
group: aws-g4dn-2xlarge
|
268 |
+
container:
|
269 |
+
image: diffusers/diffusers-onnxruntime-cuda
|
270 |
+
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --gpus 0
|
271 |
+
defaults:
|
272 |
+
run:
|
273 |
+
shell: bash
|
274 |
+
steps:
|
275 |
+
- name: Checkout diffusers
|
276 |
+
uses: actions/checkout@v3
|
277 |
+
with:
|
278 |
+
fetch-depth: 2
|
279 |
+
|
280 |
+
- name: Install dependencies
|
281 |
+
run: |
|
282 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
283 |
+
python -m uv pip install -e [quality,test]
|
284 |
+
pip uninstall accelerate -y && python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
|
285 |
+
|
286 |
+
- name: Environment
|
287 |
+
run: |
|
288 |
+
python utils/print_env.py
|
289 |
+
|
290 |
+
- name: Run slow ONNXRuntime CUDA tests
|
291 |
+
env:
|
292 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
293 |
+
run: |
|
294 |
+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
|
295 |
+
-s -v -k "Onnx" \
|
296 |
+
--make-reports=tests_onnx_cuda \
|
297 |
+
tests/
|
298 |
+
|
299 |
+
- name: Failure short reports
|
300 |
+
if: ${{ failure() }}
|
301 |
+
run: |
|
302 |
+
cat reports/tests_onnx_cuda_stats.txt
|
303 |
+
cat reports/tests_onnx_cuda_failures_short.txt
|
304 |
+
|
305 |
+
- name: Test suite reports artifacts
|
306 |
+
if: ${{ always() }}
|
307 |
+
uses: actions/upload-artifact@v4
|
308 |
+
with:
|
309 |
+
name: onnx_cuda_test_reports
|
310 |
+
path: reports
|
311 |
+
|
312 |
+
run_torch_compile_tests:
|
313 |
+
name: PyTorch Compile CUDA tests
|
314 |
+
|
315 |
+
runs-on:
|
316 |
+
group: aws-g4dn-2xlarge
|
317 |
+
|
318 |
+
container:
|
319 |
+
image: diffusers/diffusers-pytorch-compile-cuda
|
320 |
+
options: --gpus 0 --shm-size "16gb" --ipc host
|
321 |
+
|
322 |
+
steps:
|
323 |
+
- name: Checkout diffusers
|
324 |
+
uses: actions/checkout@v3
|
325 |
+
with:
|
326 |
+
fetch-depth: 2
|
327 |
+
|
328 |
+
- name: NVIDIA-SMI
|
329 |
+
run: |
|
330 |
+
nvidia-smi
|
331 |
+
- name: Install dependencies
|
332 |
+
run: |
|
333 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
334 |
+
python -m uv pip install -e [quality,test,training]
|
335 |
+
- name: Environment
|
336 |
+
run: |
|
337 |
+
python utils/print_env.py
|
338 |
+
- name: Run example tests on GPU
|
339 |
+
env:
|
340 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
341 |
+
RUN_COMPILE: yes
|
342 |
+
run: |
|
343 |
+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "compile" --make-reports=tests_torch_compile_cuda tests/
|
344 |
+
- name: Failure short reports
|
345 |
+
if: ${{ failure() }}
|
346 |
+
run: cat reports/tests_torch_compile_cuda_failures_short.txt
|
347 |
+
|
348 |
+
- name: Test suite reports artifacts
|
349 |
+
if: ${{ always() }}
|
350 |
+
uses: actions/upload-artifact@v4
|
351 |
+
with:
|
352 |
+
name: torch_compile_test_reports
|
353 |
+
path: reports
|
354 |
+
|
355 |
+
run_xformers_tests:
|
356 |
+
name: PyTorch xformers CUDA tests
|
357 |
+
|
358 |
+
runs-on:
|
359 |
+
group: aws-g4dn-2xlarge
|
360 |
+
|
361 |
+
container:
|
362 |
+
image: diffusers/diffusers-pytorch-xformers-cuda
|
363 |
+
options: --gpus 0 --shm-size "16gb" --ipc host
|
364 |
+
|
365 |
+
steps:
|
366 |
+
- name: Checkout diffusers
|
367 |
+
uses: actions/checkout@v3
|
368 |
+
with:
|
369 |
+
fetch-depth: 2
|
370 |
+
|
371 |
+
- name: NVIDIA-SMI
|
372 |
+
run: |
|
373 |
+
nvidia-smi
|
374 |
+
- name: Install dependencies
|
375 |
+
run: |
|
376 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
377 |
+
python -m uv pip install -e [quality,test,training]
|
378 |
+
- name: Environment
|
379 |
+
run: |
|
380 |
+
python utils/print_env.py
|
381 |
+
- name: Run example tests on GPU
|
382 |
+
env:
|
383 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
384 |
+
run: |
|
385 |
+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "xformers" --make-reports=tests_torch_xformers_cuda tests/
|
386 |
+
- name: Failure short reports
|
387 |
+
if: ${{ failure() }}
|
388 |
+
run: cat reports/tests_torch_xformers_cuda_failures_short.txt
|
389 |
+
|
390 |
+
- name: Test suite reports artifacts
|
391 |
+
if: ${{ always() }}
|
392 |
+
uses: actions/upload-artifact@v4
|
393 |
+
with:
|
394 |
+
name: torch_xformers_test_reports
|
395 |
+
path: reports
|
396 |
+
|
397 |
+
run_examples_tests:
|
398 |
+
name: Examples PyTorch CUDA tests on Ubuntu
|
399 |
+
|
400 |
+
runs-on:
|
401 |
+
group: aws-g4dn-2xlarge
|
402 |
+
|
403 |
+
container:
|
404 |
+
image: diffusers/diffusers-pytorch-cuda
|
405 |
+
options: --gpus 0 --shm-size "16gb" --ipc host
|
406 |
+
|
407 |
+
steps:
|
408 |
+
- name: Checkout diffusers
|
409 |
+
uses: actions/checkout@v3
|
410 |
+
with:
|
411 |
+
fetch-depth: 2
|
412 |
+
|
413 |
+
- name: NVIDIA-SMI
|
414 |
+
run: |
|
415 |
+
nvidia-smi
|
416 |
+
|
417 |
+
- name: Install dependencies
|
418 |
+
run: |
|
419 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
420 |
+
python -m uv pip install -e [quality,test,training]
|
421 |
+
|
422 |
+
- name: Environment
|
423 |
+
run: |
|
424 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
425 |
+
python utils/print_env.py
|
426 |
+
|
427 |
+
- name: Run example tests on GPU
|
428 |
+
env:
|
429 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
430 |
+
run: |
|
431 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
432 |
+
python -m uv pip install timm
|
433 |
+
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v --make-reports=examples_torch_cuda examples/
|
434 |
+
|
435 |
+
- name: Failure short reports
|
436 |
+
if: ${{ failure() }}
|
437 |
+
run: |
|
438 |
+
cat reports/examples_torch_cuda_stats.txt
|
439 |
+
cat reports/examples_torch_cuda_failures_short.txt
|
440 |
+
|
441 |
+
- name: Test suite reports artifacts
|
442 |
+
if: ${{ always() }}
|
443 |
+
uses: actions/upload-artifact@v4
|
444 |
+
with:
|
445 |
+
name: examples_test_reports
|
446 |
+
path: reports
|
diffusers/.github/workflows/run_tests_from_a_pr.yml
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Check running SLOW tests from a PR (only GPU)
|
2 |
+
|
3 |
+
on:
|
4 |
+
workflow_dispatch:
|
5 |
+
inputs:
|
6 |
+
docker_image:
|
7 |
+
default: 'diffusers/diffusers-pytorch-cuda'
|
8 |
+
description: 'Name of the Docker image'
|
9 |
+
required: true
|
10 |
+
branch:
|
11 |
+
description: 'PR Branch to test on'
|
12 |
+
required: true
|
13 |
+
test:
|
14 |
+
description: 'Tests to run (e.g.: `tests/models`).'
|
15 |
+
required: true
|
16 |
+
|
17 |
+
env:
|
18 |
+
DIFFUSERS_IS_CI: yes
|
19 |
+
IS_GITHUB_CI: "1"
|
20 |
+
HF_HOME: /mnt/cache
|
21 |
+
OMP_NUM_THREADS: 8
|
22 |
+
MKL_NUM_THREADS: 8
|
23 |
+
PYTEST_TIMEOUT: 600
|
24 |
+
RUN_SLOW: yes
|
25 |
+
|
26 |
+
jobs:
|
27 |
+
run_tests:
|
28 |
+
name: "Run a test on our runner from a PR"
|
29 |
+
runs-on:
|
30 |
+
group: aws-g4dn-2xlarge
|
31 |
+
container:
|
32 |
+
image: ${{ github.event.inputs.docker_image }}
|
33 |
+
options: --gpus 0 --privileged --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
34 |
+
|
35 |
+
steps:
|
36 |
+
- name: Validate test files input
|
37 |
+
id: validate_test_files
|
38 |
+
env:
|
39 |
+
PY_TEST: ${{ github.event.inputs.test }}
|
40 |
+
run: |
|
41 |
+
if [[ ! "$PY_TEST" =~ ^tests/ ]]; then
|
42 |
+
echo "Error: The input string must start with 'tests/'."
|
43 |
+
exit 1
|
44 |
+
fi
|
45 |
+
|
46 |
+
if [[ ! "$PY_TEST" =~ ^tests/(models|pipelines) ]]; then
|
47 |
+
echo "Error: The input string must contain either 'models' or 'pipelines' after 'tests/'."
|
48 |
+
exit 1
|
49 |
+
fi
|
50 |
+
|
51 |
+
if [[ "$PY_TEST" == *";"* ]]; then
|
52 |
+
echo "Error: The input string must not contain ';'."
|
53 |
+
exit 1
|
54 |
+
fi
|
55 |
+
echo "$PY_TEST"
|
56 |
+
|
57 |
+
- name: Checkout PR branch
|
58 |
+
uses: actions/checkout@v4
|
59 |
+
with:
|
60 |
+
ref: ${{ github.event.inputs.branch }}
|
61 |
+
repository: ${{ github.event.pull_request.head.repo.full_name }}
|
62 |
+
|
63 |
+
|
64 |
+
- name: Install pytest
|
65 |
+
run: |
|
66 |
+
python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
|
67 |
+
python -m uv pip install -e [quality,test]
|
68 |
+
python -m uv pip install peft
|
69 |
+
|
70 |
+
- name: Run tests
|
71 |
+
env:
|
72 |
+
PY_TEST: ${{ github.event.inputs.test }}
|
73 |
+
run: |
|
74 |
+
pytest "$PY_TEST"
|
diffusers/.github/workflows/ssh-pr-runner.yml
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: SSH into PR runners
|
2 |
+
|
3 |
+
on:
|
4 |
+
workflow_dispatch:
|
5 |
+
inputs:
|
6 |
+
docker_image:
|
7 |
+
description: 'Name of the Docker image'
|
8 |
+
required: true
|
9 |
+
|
10 |
+
env:
|
11 |
+
IS_GITHUB_CI: "1"
|
12 |
+
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
13 |
+
HF_HOME: /mnt/cache
|
14 |
+
DIFFUSERS_IS_CI: yes
|
15 |
+
OMP_NUM_THREADS: 8
|
16 |
+
MKL_NUM_THREADS: 8
|
17 |
+
RUN_SLOW: yes
|
18 |
+
|
19 |
+
jobs:
|
20 |
+
ssh_runner:
|
21 |
+
name: "SSH"
|
22 |
+
runs-on:
|
23 |
+
group: aws-highmemory-32-plus
|
24 |
+
container:
|
25 |
+
image: ${{ github.event.inputs.docker_image }}
|
26 |
+
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface/diffusers:/mnt/cache/ --privileged
|
27 |
+
|
28 |
+
steps:
|
29 |
+
- name: Checkout diffusers
|
30 |
+
uses: actions/checkout@v3
|
31 |
+
with:
|
32 |
+
fetch-depth: 2
|
33 |
+
|
34 |
+
- name: Tailscale # In order to be able to SSH when a test fails
|
35 |
+
uses: huggingface/tailscale-action@main
|
36 |
+
with:
|
37 |
+
authkey: ${{ secrets.TAILSCALE_SSH_AUTHKEY }}
|
38 |
+
slackChannel: ${{ secrets.SLACK_CIFEEDBACK_CHANNEL }}
|
39 |
+
slackToken: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
40 |
+
waitForSSH: true
|
diffusers/.github/workflows/ssh-runner.yml
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: SSH into GPU runners
|
2 |
+
|
3 |
+
on:
|
4 |
+
workflow_dispatch:
|
5 |
+
inputs:
|
6 |
+
runner_type:
|
7 |
+
description: 'Type of runner to test (aws-g6-4xlarge-plus: a10, aws-g4dn-2xlarge: t4, aws-g6e-xlarge-plus: L40)'
|
8 |
+
type: choice
|
9 |
+
required: true
|
10 |
+
options:
|
11 |
+
- aws-g6-4xlarge-plus
|
12 |
+
- aws-g4dn-2xlarge
|
13 |
+
- aws-g6e-xlarge-plus
|
14 |
+
docker_image:
|
15 |
+
description: 'Name of the Docker image'
|
16 |
+
required: true
|
17 |
+
|
18 |
+
env:
|
19 |
+
IS_GITHUB_CI: "1"
|
20 |
+
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
21 |
+
HF_HOME: /mnt/cache
|
22 |
+
DIFFUSERS_IS_CI: yes
|
23 |
+
OMP_NUM_THREADS: 8
|
24 |
+
MKL_NUM_THREADS: 8
|
25 |
+
RUN_SLOW: yes
|
26 |
+
|
27 |
+
jobs:
|
28 |
+
ssh_runner:
|
29 |
+
name: "SSH"
|
30 |
+
runs-on:
|
31 |
+
group: "${{ github.event.inputs.runner_type }}"
|
32 |
+
container:
|
33 |
+
image: ${{ github.event.inputs.docker_image }}
|
34 |
+
options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface/diffusers:/mnt/cache/ --gpus 0 --privileged
|
35 |
+
|
36 |
+
steps:
|
37 |
+
- name: Checkout diffusers
|
38 |
+
uses: actions/checkout@v3
|
39 |
+
with:
|
40 |
+
fetch-depth: 2
|
41 |
+
|
42 |
+
- name: NVIDIA-SMI
|
43 |
+
run: |
|
44 |
+
nvidia-smi
|
45 |
+
|
46 |
+
- name: Tailscale # In order to be able to SSH when a test fails
|
47 |
+
uses: huggingface/tailscale-action@main
|
48 |
+
with:
|
49 |
+
authkey: ${{ secrets.TAILSCALE_SSH_AUTHKEY }}
|
50 |
+
slackChannel: ${{ secrets.SLACK_CIFEEDBACK_CHANNEL }}
|
51 |
+
slackToken: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
52 |
+
waitForSSH: true
|
diffusers/.github/workflows/stale.yml
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Stale Bot
|
2 |
+
|
3 |
+
on:
|
4 |
+
schedule:
|
5 |
+
- cron: "0 15 * * *"
|
6 |
+
|
7 |
+
jobs:
|
8 |
+
close_stale_issues:
|
9 |
+
name: Close Stale Issues
|
10 |
+
if: github.repository == 'huggingface/diffusers'
|
11 |
+
runs-on: ubuntu-22.04
|
12 |
+
permissions:
|
13 |
+
issues: write
|
14 |
+
pull-requests: write
|
15 |
+
env:
|
16 |
+
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
17 |
+
steps:
|
18 |
+
- uses: actions/checkout@v2
|
19 |
+
|
20 |
+
- name: Setup Python
|
21 |
+
uses: actions/setup-python@v1
|
22 |
+
with:
|
23 |
+
python-version: 3.8
|
24 |
+
|
25 |
+
- name: Install requirements
|
26 |
+
run: |
|
27 |
+
pip install PyGithub
|
28 |
+
- name: Close stale issues
|
29 |
+
run: |
|
30 |
+
python utils/stale.py
|
diffusers/.github/workflows/trufflehog.yml
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
on:
|
2 |
+
push:
|
3 |
+
|
4 |
+
name: Secret Leaks
|
5 |
+
|
6 |
+
jobs:
|
7 |
+
trufflehog:
|
8 |
+
runs-on: ubuntu-22.04
|
9 |
+
steps:
|
10 |
+
- name: Checkout code
|
11 |
+
uses: actions/checkout@v4
|
12 |
+
with:
|
13 |
+
fetch-depth: 0
|
14 |
+
- name: Secret Scanning
|
15 |
+
uses: trufflesecurity/trufflehog@main
|
diffusers/.github/workflows/typos.yml
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Check typos
|
2 |
+
|
3 |
+
on:
|
4 |
+
workflow_dispatch:
|
5 |
+
|
6 |
+
jobs:
|
7 |
+
build:
|
8 |
+
runs-on: ubuntu-22.04
|
9 |
+
|
10 |
+
steps:
|
11 |
+
- uses: actions/checkout@v3
|
12 |
+
|
13 |
+
- name: typos-action
|
14 |
+
uses: crate-ci/[email protected]
|
diffusers/.github/workflows/update_metadata.yml
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Update Diffusers metadata
|
2 |
+
|
3 |
+
on:
|
4 |
+
workflow_dispatch:
|
5 |
+
push:
|
6 |
+
branches:
|
7 |
+
- main
|
8 |
+
- update_diffusers_metadata*
|
9 |
+
|
10 |
+
jobs:
|
11 |
+
update_metadata:
|
12 |
+
runs-on: ubuntu-22.04
|
13 |
+
defaults:
|
14 |
+
run:
|
15 |
+
shell: bash -l {0}
|
16 |
+
|
17 |
+
steps:
|
18 |
+
- uses: actions/checkout@v3
|
19 |
+
|
20 |
+
- name: Setup environment
|
21 |
+
run: |
|
22 |
+
pip install --upgrade pip
|
23 |
+
pip install datasets pandas
|
24 |
+
pip install .[torch]
|
25 |
+
|
26 |
+
- name: Update metadata
|
27 |
+
env:
|
28 |
+
HF_TOKEN: ${{ secrets.SAYAK_HF_TOKEN }}
|
29 |
+
run: |
|
30 |
+
python utils/update_metadata.py --commit_sha ${{ github.sha }}
|
diffusers/.github/workflows/upload_pr_documentation.yml
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Upload PR Documentation
|
2 |
+
|
3 |
+
on:
|
4 |
+
workflow_run:
|
5 |
+
workflows: ["Build PR Documentation"]
|
6 |
+
types:
|
7 |
+
- completed
|
8 |
+
|
9 |
+
jobs:
|
10 |
+
build:
|
11 |
+
uses: huggingface/doc-builder/.github/workflows/upload_pr_documentation.yml@main
|
12 |
+
with:
|
13 |
+
package_name: diffusers
|
14 |
+
secrets:
|
15 |
+
hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
|
16 |
+
comment_bot_token: ${{ secrets.COMMENT_BOT_TOKEN }}
|