Skip to content

Commit 80be865

Browse files
committed
more skip
1 parent 7778a83 commit 80be865

File tree

7 files changed

+30
-3
lines changed

7 files changed

+30
-3
lines changed

.github/workflows/reusable-test.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ jobs:
3939
- name: Install dependencies for Python ${{ matrix.python-version }}
4040
run: |
4141
uv python pin ${{ matrix.python-version }}
42-
uv sync --group test --extra sentence-transformers --extra openai
42+
uv sync --group test
4343
4444
- name: Run tests
4545
run: |

src/autointent/_wrappers/embedder/sentence_transformers.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -240,6 +240,7 @@ def train(self, utterances: list[str], labels: ListOfLabels, config: EmbedderFin
240240
# Lazy import sentence-transformers training components (only needed for fine-tuning)
241241
st = require("sentence_transformers", extra="sentence-transformers")
242242
transformers = require("transformers", extra="transformers")
243+
require("accelerate", extra="transformers")
243244

244245
x_train, x_val, y_train, y_val = train_test_split(utterances, labels, test_size=config.val_fraction)
245246
tr_ds = Dataset.from_dict({"text": x_train, "label": y_train})

tests/embedder/test_fine_tuned.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import numpy as np
2+
import pytest
23

34
from autointent._wrappers.embedder.sentence_transformers import SentenceTransformerEmbeddingBackend
45
from autointent.configs import EmbedderFineTuningConfig, HFModelConfig, SentenceTransformerEmbeddingConfig
@@ -7,6 +8,8 @@
78

89
def test_model_updates_after_training(dataset):
910
"""Test that model weights actually change after training"""
11+
pytest.importorskip("accelerate", reason="Accelerate library is required for this test")
12+
1013
data_handler = DataHandler(dataset)
1114

1215
hf_config = HFModelConfig(model_name="intfloat/multilingual-e5-small", batch_size=8, trust_remote_code=True)

tests/embedder/test_fine_tuned_dump_load.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,8 @@
1414

1515
def test_finetune_dump_load(dataset, on_windows):
1616
"""Test scenario: fine-tune -> dump -> load."""
17+
pytest.importorskip("accelerate", reason="Accelerate library is required for this test")
18+
1719
data_handler = DataHandler(dataset)
1820

1921
# Setup config for fine-tuning
@@ -69,6 +71,8 @@ def test_finetune_dump_load(dataset, on_windows):
6971

7072
def test_dump_load_finetune(dataset, on_windows):
7173
"""Test scenario: dump -> load -> fine-tune."""
74+
pytest.importorskip("accelerate", reason="Accelerate library is required for this test")
75+
7276
data_handler = DataHandler(dataset)
7377

7478
# Setup config
@@ -121,6 +125,8 @@ def test_dump_load_finetune(dataset, on_windows):
121125

122126
def test_load_from_disk_finetune_dump_load(dataset, on_windows):
123127
"""Test scenario: load sentence transformer from disk -> fine-tune -> dump -> load."""
128+
pytest.importorskip("accelerate", reason="Accelerate library is required for this test")
129+
124130
from sentence_transformers import SentenceTransformer
125131

126132
data_handler = DataHandler(dataset)
@@ -180,6 +186,8 @@ def test_load_from_disk_finetune_dump_load(dataset, on_windows):
180186

181187
def test_embeddings_consistency_across_workflows(dataset, on_windows):
182188
"""Test that different workflows produce consistent results when starting from same model."""
189+
pytest.importorskip("accelerate", reason="Accelerate library is required for this test")
190+
183191
data_handler = DataHandler(dataset)
184192

185193
# Common config
@@ -224,6 +232,7 @@ def test_embeddings_consistency_across_workflows(dataset, on_windows):
224232

225233
def test_multiple_dump_load_cycles_after_finetuning(dataset, on_windows):
226234
"""Test that multiple dump/load cycles preserve fine-tuned model state."""
235+
pytest.importorskip("accelerate", reason="Accelerate library is required for this test")
227236
data_handler = DataHandler(dataset)
228237

229238
hf_config = HFModelConfig(model_name="intfloat/multilingual-e5-small", batch_size=4, trust_remote_code=True)

tests/embedder/test_sentence_transformers_backend.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,8 @@ def test_hash_calculation(self, st_backend: SentenceTransformerEmbeddingBackend)
7272

7373
def test_training_functionality(self, st_backend: SentenceTransformerEmbeddingBackend):
7474
"""Test basic training functionality."""
75+
pytest.importorskip("accelerate", reason="Accelerate library is required for this test")
76+
7577
# Simple training data
7678
utterances = [
7779
"Hello world",

tests/modules/scoring/test_bert.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,8 @@
1313

1414
def test_bert_scorer_dump_load(dataset):
1515
"""Test that BertScorer can be saved and loaded while preserving predictions."""
16+
pytest.importorskip("accelerate", reason="Accelerate library is required for this test")
17+
1618
data_handler = DataHandler(dataset)
1719

1820
# Create and train scorer
@@ -57,6 +59,8 @@ def test_bert_scorer_dump_load(dataset):
5759

5860
def test_bert_prediction(dataset):
5961
"""Test that the transformer model can fit and make predictions."""
62+
pytest.importorskip("accelerate", reason="Accelerate library is required for this test")
63+
6064
data_handler = DataHandler(dataset)
6165

6266
scorer = BertScorer(classification_model_config="prajjwal1/bert-tiny", num_train_epochs=1, batch_size=8)
@@ -94,6 +98,8 @@ def test_bert_prediction(dataset):
9498

9599
def test_bert_cache_clearing(dataset):
96100
"""Test that the transformer model properly handles cache clearing."""
101+
pytest.importorskip("accelerate", reason="Accelerate library is required for this test")
102+
97103
data_handler = DataHandler(dataset)
98104

99105
scorer = BertScorer(classification_model_config="prajjwal1/bert-tiny", num_train_epochs=1, batch_size=8)

tests/pipeline/test_inference.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,6 @@
77
from autointent.custom_types import NodeType
88
from tests.conftest import get_search_space, setup_environment
99

10-
pytest.importorskip("peft")
11-
1210

1311
@pytest.fixture
1412
def project_dir(task_type):
@@ -32,6 +30,8 @@ def project_dir(task_type):
3230
],
3331
)
3432
def test_inference_from_config(dataset, task_type, project_dir):
33+
pytest.importorskip("peft")
34+
3535
search_space = get_search_space(task_type)
3636

3737
pipeline_optimizer = Pipeline.from_search_space(search_space)
@@ -85,6 +85,8 @@ def test_inference_from_config(dataset, task_type, project_dir):
8585
],
8686
)
8787
def test_inference_on_the_fly(dataset, task_type, project_dir):
88+
pytest.importorskip("peft")
89+
8890
search_space = get_search_space(task_type)
8991

9092
pipeline = Pipeline.from_search_space(search_space)
@@ -118,6 +120,8 @@ def test_inference_on_the_fly(dataset, task_type, project_dir):
118120

119121

120122
def test_load_with_overrided_params(dataset):
123+
pytest.importorskip("peft")
124+
121125
project_dir = setup_environment() / "test_inference" / "override"
122126
search_space = get_search_space("light")
123127

@@ -158,6 +162,8 @@ def test_load_with_overrided_params(dataset):
158162

159163

160164
def test_no_saving(dataset):
165+
pytest.importorskip("peft")
166+
161167
project_dir = setup_environment() / "test_inference" / "no_saving"
162168
search_space = get_search_space("light")
163169

0 commit comments

Comments
 (0)