From 7c9621231059ed1d065721abdb97583a2e313cca Mon Sep 17 00:00:00 2001 From: Aaron Gokaslan Date: Tue, 5 Apr 2022 19:05:03 -0400 Subject: [PATCH] Optimize memory usage during text2image inference * Allows one to run the models on Google Colab --- scripts/txt2img.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/txt2img.py b/scripts/txt2img.py index dc377525e..e66958870 100644 --- a/scripts/txt2img.py +++ b/scripts/txt2img.py @@ -3,7 +3,7 @@ import numpy as np from omegaconf import OmegaConf from PIL import Image -from tqdm import tqdm, trange +from tqdm.auto import tqdm, trange from einops import rearrange from torchvision.utils import make_grid @@ -13,7 +13,7 @@ def load_model_from_config(config, ckpt, verbose=False): print(f"Loading model from {ckpt}") - pl_sd = torch.load(ckpt, map_location="cpu") + pl_sd = torch.load(ckpt, map_location="cuda") sd = pl_sd["state_dict"] model = instantiate_from_config(config.model) m, u = model.load_state_dict(sd, strict=False)