Skip to content

Commit 785c431

Browse files
committed
add docker-slim
1 parent 9dfad6f commit 785c431

File tree

416 files changed

+76899
-8
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

416 files changed

+76899
-8
lines changed

README.md

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -146,11 +146,4 @@ If you don't want to deploy the load balancing server but still want to test the
146146
Finally, you can call your http API(test it using postman).
147147

148148
# Part 2: Deploy using Runpod Serverless
149-
Check `handler.py` where I realized a demo handler for txt2img.
150-
151-
deploy steps:
152-
- get a GPU server, download this repository and model files
153-
- install docker: https://docs.docker.com/engine/install/ubuntu/#set-up-the-repository
154-
- cd to the project directory(which contains `Dockerfile`)
155-
- docker build && docker tag && docker push (these are not bash commands!)
156-
- create serverless Endpoint on runpod: https://runpod.io?ref=bz40v32s
149+
see `sd-docker-slim`

sd-docker-slim/Dockerfile

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
FROM python:3.10-slim
2+
3+
# install docker on ubuntu: https://docs.docker.com/engine/install/ubuntu/#set-up-the-repository
4+
# build image: docker build -t sd-multi .
5+
# docker image ls
6+
# dockerhub create repo
7+
# login on ubuntu: docker login
8+
# tag: docker tag sd-multi wolverinn/sd_multi_demo:v1
9+
# push: docker push wolverinn/sd_multi_demo:v1
10+
11+
WORKDIR /
12+
13+
COPY requirements.txt /
14+
# COPY torch-1.13.1+cu117-cp38-cp38-linux_x86_64.whl .
15+
16+
RUN apt-get update && apt-get install -y libgl1-mesa-glx && \
17+
apt-get install -y libglib2.0-0 && \
18+
apt-get install -y git && \
19+
rm -rf /var/lib/apt/lists/*
20+
21+
RUN pip3 install runpod==0.9.12 && \
22+
pip3 install -r requirements.txt && \
23+
# pip3 install torch-1.13.1+cu117-cp38-cp38-linux_x86_64.whl && \
24+
pip3 install torch==1.13.1+cu117 --extra-index-url https://download.pytorch.org/whl/cu117 && \
25+
pip3 install torchvision==0.14.1+cu117 --extra-index-url https://download.pytorch.org/whl/cu117 && \
26+
# apt-get remove -y git && \
27+
# rm -rf torch-1.13.1+cu117-cp38-cp38-linux_x86_64.whl && \
28+
pip3 cache purge
29+
30+
COPY . /
31+
RUN chmod +x docker_entrypoint.sh
32+
33+
# COPY . /
34+
35+
ENTRYPOINT ["/docker_entrypoint.sh"]
36+
37+
CMD [ "python", "-u", "/handler.py" ]

sd-docker-slim/README.md

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
# stable-diffusion docker slim
2+
3+
# Features
4+
5+
- deploy on runpod serverless
6+
- text2img, img2img, list models
7+
- upload models at any time, takes effect immediately
8+
9+
# Deploy Steps
10+
11+
## Build and upload docker
12+
- get a Ubuntu machine with GPU, download the project files
13+
- install docker on ubuntu: https://docs.docker.com/engine/install/ubuntu/#set-up-the-repository
14+
- cd to the directory with `Dockerfile`
15+
- to make your custom docker, modify `handler.py` for uploading outputs & supporting more API ...
16+
- `docker build -t sd-multi .`
17+
- `docker login`
18+
- docker tag and then docker push
19+
20+
## Create a storage on runpod
21+
- upload model files to storage volume. under `/workspace` you should upload the whole `models` directory. The directory structure should look like this:
22+
23+
```
24+
/workspace
25+
- /models
26+
- /VAE
27+
- /Lora
28+
- /Stable-diffusion
29+
...
30+
```
31+
32+
## Deploy on runpod serverless
33+
check the official guide: https://docs.runpod.io/docs/template-creation
34+
35+
# Test your API
36+
check out `test_runpod.py`
37+
38+
## API definition
39+
checkout `idl.yaml`

sd-docker-slim/config.json

Lines changed: 146 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,146 @@
1+
{
2+
"samples_save": true,
3+
"samples_format": "png",
4+
"samples_filename_pattern": "",
5+
"save_images_add_number": true,
6+
"grid_save": true,
7+
"grid_format": "png",
8+
"grid_extended_filename": false,
9+
"grid_only_if_multiple": true,
10+
"grid_prevent_empty_spots": false,
11+
"n_rows": -1,
12+
"enable_pnginfo": true,
13+
"save_txt": false,
14+
"save_images_before_face_restoration": false,
15+
"save_images_before_highres_fix": false,
16+
"save_images_before_color_correction": false,
17+
"jpeg_quality": 80,
18+
"export_for_4chan": true,
19+
"img_downscale_threshold": 4.0,
20+
"target_side_length": 4000,
21+
"use_original_name_batch": true,
22+
"use_upscaler_name_as_suffix": false,
23+
"save_selected_only": true,
24+
"do_not_add_watermark": false,
25+
"temp_dir": "",
26+
"clean_temp_dir_at_start": false,
27+
"outdir_samples": "",
28+
"outdir_txt2img_samples": "outputs/txt2img-images",
29+
"outdir_img2img_samples": "outputs/img2img-images",
30+
"outdir_extras_samples": "outputs/extras-images",
31+
"outdir_grids": "",
32+
"outdir_txt2img_grids": "outputs/txt2img-grids",
33+
"outdir_img2img_grids": "outputs/img2img-grids",
34+
"outdir_save": "log/images",
35+
"save_to_dirs": true,
36+
"grid_save_to_dirs": true,
37+
"use_save_to_dirs_for_ui": false,
38+
"directories_filename_pattern": "[date]",
39+
"directories_max_prompt_words": 8,
40+
"ESRGAN_tile": 192,
41+
"ESRGAN_tile_overlap": 8,
42+
"realesrgan_enabled_models": [
43+
"R-ESRGAN 4x+",
44+
"R-ESRGAN 4x+ Anime6B"
45+
],
46+
"upscaler_for_img2img": null,
47+
"face_restoration_model": "CodeFormer",
48+
"code_former_weight": 0.5,
49+
"face_restoration_unload": false,
50+
"show_warnings": false,
51+
"memmon_poll_rate": 8,
52+
"samples_log_stdout": false,
53+
"multiple_tqdm": true,
54+
"print_hypernet_extra": false,
55+
"unload_models_when_training": false,
56+
"pin_memory": false,
57+
"save_optimizer_state": false,
58+
"save_training_settings_to_txt": true,
59+
"dataset_filename_word_regex": "",
60+
"dataset_filename_join_string": " ",
61+
"training_image_repeats_per_epoch": 1,
62+
"training_write_csv_every": 500,
63+
"training_xattention_optimizations": false,
64+
"training_enable_tensorboard": false,
65+
"training_tensorboard_save_images": false,
66+
"training_tensorboard_flush_every": 120,
67+
"sd_checkpoint_cache": 0,
68+
"sd_vae_checkpoint_cache": 0,
69+
"sd_vae": "Automatic",
70+
"sd_vae_as_default": true,
71+
"inpainting_mask_weight": 1.0,
72+
"initial_noise_multiplier": 1.0,
73+
"img2img_color_correction": false,
74+
"img2img_fix_steps": false,
75+
"img2img_background_color": "#ffffff",
76+
"enable_quantization": false,
77+
"enable_emphasis": true,
78+
"enable_batch_seeds": true,
79+
"comma_padding_backtrack": 20,
80+
"CLIP_stop_at_last_layers": 1,
81+
"upcast_attn": false,
82+
"use_old_emphasis_implementation": false,
83+
"use_old_karras_scheduler_sigmas": false,
84+
"no_dpmpp_sde_batch_determinism": false,
85+
"use_old_hires_fix_width_height": false,
86+
"interrogate_keep_models_in_memory": false,
87+
"interrogate_return_ranks": false,
88+
"interrogate_clip_num_beams": 1,
89+
"interrogate_clip_min_length": 24,
90+
"interrogate_clip_max_length": 48,
91+
"interrogate_clip_dict_limit": 1500,
92+
"interrogate_clip_skip_categories": [],
93+
"interrogate_deepbooru_score_threshold": 0.5,
94+
"deepbooru_sort_alpha": true,
95+
"deepbooru_use_spaces": false,
96+
"deepbooru_escape": true,
97+
"deepbooru_filter_tags": "",
98+
"extra_networks_default_view": "cards",
99+
"extra_networks_default_multiplier": 1.0,
100+
"sd_hypernetwork": "None",
101+
"return_grid": true,
102+
"do_not_show_images": false,
103+
"add_model_hash_to_info": true,
104+
"add_model_name_to_info": true,
105+
"disable_weights_auto_swap": true,
106+
"send_seed": true,
107+
"send_size": true,
108+
"font": "",
109+
"js_modal_lightbox": true,
110+
"js_modal_lightbox_initially_zoomed": true,
111+
"show_progress_in_title": true,
112+
"samplers_in_dropdown": true,
113+
"dimensions_and_batch_together": true,
114+
"keyedit_precision_attention": 0.1,
115+
"keyedit_precision_extra": 0.05,
116+
"quicksettings": "sd_model_checkpoint",
117+
"ui_reorder": "inpaint, sampler, checkboxes, hires_fix, dimensions, cfg, seed, batch, override_settings, scripts",
118+
"ui_extra_networks_tab_reorder": "",
119+
"localization": "zh_CN",
120+
"show_progressbar": true,
121+
"live_previews_enable": true,
122+
"show_progress_grid": true,
123+
"show_progress_every_n_steps": 10,
124+
"show_progress_type": "Approx NN",
125+
"live_preview_content": "Prompt",
126+
"live_preview_refresh_period": 1000,
127+
"hide_samplers": [],
128+
"eta_ddim": 0.0,
129+
"eta_ancestral": 1.0,
130+
"ddim_discretize": "uniform",
131+
"s_churn": 0.0,
132+
"s_tmin": 0.0,
133+
"s_noise": 1.0,
134+
"eta_noise_seed_delta": 0,
135+
"always_discard_next_to_last_sigma": false,
136+
"postprocessing_enable_in_main_ui": [],
137+
"postprocessing_operation_order": [],
138+
"upscaling_max_images_in_cache": 5,
139+
"disabled_extensions": [],
140+
"ldsr_steps": 100,
141+
"ldsr_cached": false,
142+
"SWIN_tile": 192,
143+
"SWIN_tile_overlap": 8,
144+
"sd_lora": "None",
145+
"lora_apply_to_outputs": false
146+
}
Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
model:
2+
base_learning_rate: 1.0e-04
3+
target: ldm.models.diffusion.ddpm.LatentDiffusion
4+
params:
5+
linear_start: 0.00085
6+
linear_end: 0.0120
7+
num_timesteps_cond: 1
8+
log_every_t: 200
9+
timesteps: 1000
10+
first_stage_key: "jpg"
11+
cond_stage_key: "txt"
12+
image_size: 64
13+
channels: 4
14+
cond_stage_trainable: false # Note: different from the one we trained before
15+
conditioning_key: crossattn
16+
monitor: val/loss_simple_ema
17+
scale_factor: 0.18215
18+
use_ema: False
19+
20+
scheduler_config: # 10000 warmup steps
21+
target: ldm.lr_scheduler.LambdaLinearScheduler
22+
params:
23+
warm_up_steps: [ 10000 ]
24+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25+
f_start: [ 1.e-6 ]
26+
f_max: [ 1. ]
27+
f_min: [ 1. ]
28+
29+
unet_config:
30+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31+
params:
32+
image_size: 32 # unused
33+
in_channels: 4
34+
out_channels: 4
35+
model_channels: 320
36+
attention_resolutions: [ 4, 2, 1 ]
37+
num_res_blocks: 2
38+
channel_mult: [ 1, 2, 4, 4 ]
39+
num_heads: 8
40+
use_spatial_transformer: True
41+
transformer_depth: 1
42+
context_dim: 768
43+
use_checkpoint: True
44+
legacy: False
45+
46+
first_stage_config:
47+
target: ldm.models.autoencoder.AutoencoderKL
48+
params:
49+
embed_dim: 4
50+
monitor: val/rec_loss
51+
ddconfig:
52+
double_z: true
53+
z_channels: 4
54+
resolution: 256
55+
in_channels: 3
56+
out_ch: 3
57+
ch: 128
58+
ch_mult:
59+
- 1
60+
- 2
61+
- 4
62+
- 4
63+
num_res_blocks: 2
64+
attn_resolutions: []
65+
dropout: 0.0
66+
lossconfig:
67+
target: torch.nn.Identity
68+
69+
cond_stage_config:
70+
target: modules.xlmr.BertSeriesModelWithTransformation
71+
params:
72+
name: "XLMR-Large"

0 commit comments

Comments
 (0)