From 3c1b20e15ab80c76a3910525d4da9b5c0502ad23 Mon Sep 17 00:00:00 2001 From: Harry Yang Date: Fri, 27 Sep 2024 16:51:01 +0000 Subject: [PATCH 1/2] Fix bug to use all CPU cores 1. on my VM running inference with CPU is only using one core, instead of all 16. delete `torch.set_default_tensor_type(torch.BFloat16Tensor)` make it using all 16 cores 2. reduce default max_seq_len to 128 --- inference.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/inference.py b/inference.py index 4492aed..4bf9afa 100644 --- a/inference.py +++ b/inference.py @@ -42,8 +42,6 @@ def build(checkpoints_dir: str, tokenizer_path: str, load_model: bool, max_seq_l if device == "cuda": torch.set_default_tensor_type(torch.cuda.HalfTensor) - else: - torch.set_default_tensor_type(torch.BFloat16Tensor) model = Transformer(model_args).to(device) @@ -156,7 +154,7 @@ def _sample_top_p(self, probs, p): checkpoints_dir='llama-2-7b/', tokenizer_path='tokenizer.model', load_model=True, - max_seq_len=1024, + max_seq_len=128, max_batch_size=len(prompts), device=device ) From b3c12a105eff4ee980e7b8d1da3728d6583a5cea Mon Sep 17 00:00:00 2001 From: Harry Yang Date: Fri, 27 Sep 2024 20:38:04 +0000 Subject: [PATCH 2/2] Define device in text_completion * `device` is undefined --- inference.py | 1 + 1 file changed, 1 insertion(+) diff --git a/inference.py b/inference.py index 4bf9afa..bde373a 100644 --- a/inference.py +++ b/inference.py @@ -54,6 +54,7 @@ def build(checkpoints_dir: str, tokenizer_path: str, load_model: bool, max_seq_l return LLaMA(model, tokenizer, model_args) def text_completion(self, prompts: list[str], temperature: float = 0.6, top_p: float = 0.9, max_gen_len: Optional[int] = None): + device = self.args.device if max_gen_len is None: max_gen_len = self.args.max_seq_len - 1 # Convert each prompt into tokens