Skip to content

Commit 319da7a

Browse files
committed
hopefully ,final?
1 parent 9c67ac4 commit 319da7a

File tree

4 files changed

+9
-11
lines changed

4 files changed

+9
-11
lines changed

tests/pipelines/kandinsky3/test_kandinsky3.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ def get_dummy_components(self, time_cond_proj_dim=None):
109109
movq = self.dummy_movq
110110
torch.manual_seed(0)
111111
config = AutoConfig.from_pretrained("hf-internal-testing/tiny-random-t5")
112-
text_encoder = T5EncoderModel(config)
112+
text_encoder = T5EncoderModel(config).eval()
113113

114114
torch.manual_seed(0)
115115
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5")
@@ -156,7 +156,7 @@ def test_kandinsky3(self):
156156

157157
assert image.shape == (1, 16, 16, 3)
158158

159-
expected_slice = np.array([0.3768, 0.4373, 0.4865, 0.4890, 0.4299, 0.5122, 0.4921, 0.4924, 0.5599])
159+
expected_slice = np.array([0.3301, 0.3106, 0.4827, 0.5204, 0.4242, 0.4987, 0.4665, 0.5120, 0.5558])
160160

161161
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, (
162162
f" expected_slice {expected_slice}, but got {image_slice.flatten()}"

tests/pipelines/kandinsky3/test_kandinsky3_img2img.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,7 @@ def get_dummy_components(self, time_cond_proj_dim=None):
120120
movq = self.dummy_movq
121121
torch.manual_seed(0)
122122
config = AutoConfig.from_pretrained("hf-internal-testing/tiny-random-t5")
123-
text_encoder = T5EncoderModel(config)
123+
text_encoder = T5EncoderModel(config).eval()
124124

125125
torch.manual_seed(0)
126126
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5")
@@ -158,7 +158,7 @@ def get_dummy_inputs(self, device, seed=0):
158158
def test_dict_tuple_outputs_equivalent(self):
159159
expected_slice = None
160160
if torch_device == "cpu":
161-
expected_slice = np.array([0.5762, 0.6112, 0.4150, 0.6018, 0.6167, 0.4626, 0.5426, 0.5641, 0.6536])
161+
expected_slice = np.array([0.5261, 0.5688, 0.4093, 0.4865, 0.5326, 0.4480, 0.5064, 0.5113, 0.6222])
162162
super().test_dict_tuple_outputs_equivalent(expected_slice=expected_slice)
163163

164164
def test_kandinsky3_img2img(self):
@@ -178,9 +178,7 @@ def test_kandinsky3_img2img(self):
178178

179179
assert image.shape == (1, 64, 64, 3)
180180

181-
expected_slice = np.array(
182-
[0.576259, 0.6132097, 0.41703486, 0.603196, 0.62062526, 0.4655338, 0.5434324, 0.5660727, 0.65433365]
183-
)
181+
expected_slice = np.array([0.5261, 0.5688, 0.4093, 0.4865, 0.5326, 0.4480, 0.5064, 0.5113, 0.6222])
184182

185183
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, (
186184
f" expected_slice {expected_slice}, but got {image_slice.flatten()}"

tests/pipelines/qwenimage/test_qwenimage.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ def get_dummy_components(self):
113113
vision_start_token_id=151652,
114114
vision_token_id=151654,
115115
)
116-
text_encoder = Qwen2_5_VLForConditionalGeneration(config)
116+
text_encoder = Qwen2_5_VLForConditionalGeneration(config).eval()
117117
tokenizer = Qwen2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration")
118118

119119
components = {
@@ -160,7 +160,7 @@ def test_inference(self):
160160
self.assertEqual(generated_image.shape, (3, 32, 32))
161161

162162
# fmt: off
163-
expected_slice = torch.tensor([0.5646, 0.6369, 0.6019, 0.5640, 0.5830, 0.5520, 0.5717, 0.6315, 0.4167, 0.3563, 0.5640, 0.4849, 0.4961, 0.5237, 0.4084, 0.5014])
163+
expected_slice = torch.tensor([0.5633, 0.6368, 0.6015, 0.5637, 0.5817, 0.5528, 0.5718, 0.6326, 0.4147, 0.3556, 0.5623, 0.4833, 0.4971, 0.5262, 0.4087, 0.5021])
164164
# fmt: on
165165

166166
generated_slice = generated_image.flatten()

tests/pipelines/qwenimage/test_qwenimage_edit.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ def get_dummy_components(self):
115115
vision_start_token_id=151652,
116116
vision_token_id=151654,
117117
)
118-
text_encoder = Qwen2_5_VLForConditionalGeneration(config)
118+
text_encoder = Qwen2_5_VLForConditionalGeneration(config).eval()
119119
tokenizer = Qwen2Tokenizer.from_pretrained(tiny_ckpt_id)
120120

121121
components = {
@@ -163,7 +163,7 @@ def test_inference(self):
163163
self.assertEqual(generated_image.shape, (3, 32, 32))
164164

165165
# fmt: off
166-
expected_slice = torch.tensor([0.5640, 0.6350, 0.6003, 0.5606, 0.5801, 0.5502, 0.5757, 0.6388, 0.4174, 0.3590, 0.5647, 0.4891, 0.4975, 0.5256, 0.4088, 0.4991])
166+
expected_slice = torch.tensor([0.5637, 0.6341, 0.6001, 0.5620, 0.5794, 0.5498, 0.5757, 0.6389, 0.4174, 0.3597, 0.5649, 0.4894, 0.4969, 0.5255, 0.4083, 0.4986])
167167
# fmt: on
168168

169169
generated_slice = generated_image.flatten()

0 commit comments

Comments
 (0)