Skip to content

Commit c1c5d25

Browse files
authored
Fixing broken tests (#130)
* Update build.yml * Debug tests * More prints * test * no more firing counts * No tests * Constructor again * No asserts * add prints * check space better * now correct * local * local * Add back all tests * cloud * why * python * import pytest? * fix imports * smaller pythia * remove print * remove -s --------- Co-authored-by: SrGonao <goncalo@eleuther.ai>
1 parent 494df86 commit c1c5d25

File tree

4 files changed

+6
-8
lines changed

4 files changed

+6
-8
lines changed

.github/workflows/build.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ jobs:
1717
- uses: actions/checkout@v4
1818
- uses: actions/setup-python@v5
1919
with:
20-
python-version: "3.10"
20+
python-version: "3.12"
2121
- name: Install dependencies
2222
run: |
2323
python -m pip install --upgrade pip
@@ -40,7 +40,7 @@ jobs:
4040
fetch-depth: 0
4141
- uses: actions/setup-python@v5
4242
with:
43-
python-version: "3.10"
43+
python-version: "3.12"
4444
- name: Install dependencies
4545
run: pip install build twine
4646
- name: Semantic Release

tests/conftest.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -35,14 +35,14 @@
3535

3636
@pytest.fixture(scope="module")
3737
def tokenizer() -> PreTrainedTokenizer | PreTrainedTokenizerFast:
38-
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/pythia-160m")
38+
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/pythia-70m")
3939
tokenizer.pad_token = tokenizer.eos_token
4040
return tokenizer
4141

4242

4343
@pytest.fixture(scope="module")
4444
def model() -> PreTrainedModel:
45-
model = AutoModel.from_pretrained("EleutherAI/pythia-160m")
45+
model = AutoModel.from_pretrained("EleutherAI/pythia-70m")
4646
return model
4747

4848

@@ -73,7 +73,7 @@ def cache_setup(tmp_path_factory, mock_dataset: torch.Tensor, model: PreTrainedM
7373
sampler_cfg=SamplerConfig(),
7474
cache_cfg=cache_cfg,
7575
model="EleutherAI/pythia-160m",
76-
sparse_model="EleutherAI/sae-pythia-160m-32k",
76+
sparse_model="EleutherAI/sae-pythia-70m-32k",
7777
hookpoints=["layers.1"],
7878
)
7979
hookpoint_to_sparse_encode, _ = load_hooks_sparse_coders(model, run_cfg_gemma)

tests/test_latents/test_cache.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,6 @@ def test_config_file(cache_setup: dict[str, Any]):
7171
with open(config_path, "r") as f:
7272
config = json.load(f)
7373
cache_cfg = cache_setup["cache_cfg"]
74-
7574
assert config["batch_size"] == cache_cfg.batch_size, "Config batch_size mismatch"
76-
assert config["cache_ctx_len"] == cache_cfg.cache_ctx_len, "Cache_ctx_len mismatch"
75+
assert config["cache_ctx_len"] == cache_cfg.cache_ctx_len, "ctx_len mismatch"
7776
assert config["n_tokens"] == cache_cfg.n_tokens, "Config n_tokens mismatch"

tests/test_latents/test_constructor.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,6 @@ def test_save_load_cache(
4040
tokens: Int[Tensor, "examples ctx_len"] = dataset.load_tokens() # type: ignore
4141
assert (tokens == cache_setup["tokens"][: len(tokens)]).all()
4242
for record in dataset:
43-
print(record)
4443
assert len(record.train) <= sampler_cfg.n_examples_train
4544
assert len(record.test) <= sampler_cfg.n_examples_test
4645

0 commit comments

Comments
 (0)