forked from yt-project/yt
-
Notifications
You must be signed in to change notification settings - Fork 0
/
conftest.py
451 lines (408 loc) · 16.5 KB
/
conftest.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
import os
import shutil
import sys
import tempfile
from importlib.metadata import version
from importlib.util import find_spec
from pathlib import Path
import pytest
import yaml
from packaging.version import Version
from yt.config import ytcfg
from yt.utilities.answer_testing.testing_utilities import (
_compare_raw_arrays,
_hash_results,
_save_raw_arrays,
_save_result,
_streamline_for_io,
data_dir_load,
)
MPL_VERSION = Version(version("matplotlib"))
NUMPY_VERSION = Version(version("numpy"))
PILLOW_VERSION = Version(version("pillow"))
# setuptools does not ship with the standard lib starting in Python 3.12, so we need to
# be resilient if it's not available at runtime
if find_spec("setuptools") is not None:
SETUPTOOLS_VERSION = Version(version("setuptools"))
else:
SETUPTOOLS_VERSION = None
if find_spec("pandas") is not None:
PANDAS_VERSION = Version(version("pandas"))
else:
PANDAS_VERSION = None
def pytest_addoption(parser):
"""
Lets options be passed to test functions.
"""
parser.addoption(
"--with-answer-testing",
action="store_true",
)
parser.addoption(
"--answer-store",
action="store_true",
)
parser.addoption(
"--answer-raw-arrays",
action="store_true",
)
parser.addoption(
"--raw-answer-store",
action="store_true",
)
parser.addoption(
"--force-overwrite",
action="store_true",
)
parser.addoption(
"--no-hash",
action="store_true",
)
parser.addoption("--local-dir", default=None, help="Where answers are saved.")
# Tell pytest about the local-dir option in the ini files. This
# option is used for creating the answer directory on CI
parser.addini(
"local-dir",
default=str(Path(__file__).parent / "answer-store"),
help="answer directory.",
)
parser.addini(
"test_data_dir",
default=ytcfg.get("yt", "test_data_dir"),
help="Directory where data for tests is stored.",
)
def pytest_configure(config):
r"""
Reads in the tests/tests.yaml file. This file contains a list of
each answer test's answer file (including the changeset number).
"""
# Register custom marks for answer tests and big data
config.addinivalue_line("markers", "answer_test: Run the answer tests.")
config.addinivalue_line(
"markers", "big_data: Run answer tests that require large data files."
)
for value in (
# treat most warnings as errors
"error",
# >>> warnings emitted by testing frameworks, or in testing contexts
# we still have some yield-based tests, awaiting for transition into pytest
"ignore::pytest.PytestCollectionWarning",
# matplotlib warnings related to the Agg backend which is used in CI, not much we can do about it
"ignore:Matplotlib is currently using agg, which is a non-GUI backend, so cannot show the figure.:UserWarning",
r"ignore:tight_layout.+falling back to Agg renderer:UserWarning",
#
# >>> warnings from wrong values passed to numpy
# these should normally be curated out of the test suite but they are too numerous
# to deal with in a reasonable time at the moment.
"ignore:invalid value encountered in log10:RuntimeWarning",
"ignore:divide by zero encountered in log10:RuntimeWarning",
#
# >>> there are many places in yt (most notably at the frontend level)
# where we open files but never explicitly close them
# Although this is in general bad practice, it can be intentional and
# justified in contexts where reading speeds should be optimized.
# It is not clear at the time of writing how to approach this,
# so I'm going to ignore this class of warnings altogether for now.
"ignore:unclosed file.*:ResourceWarning",
):
config.addinivalue_line("filterwarnings", value)
if SETUPTOOLS_VERSION is not None and SETUPTOOLS_VERSION >= Version("67.3.0"):
# may be triggered by multiple dependencies
# see https://github.com/glue-viz/glue/issues/2364
# see https://github.com/matplotlib/matplotlib/issues/25244
config.addinivalue_line(
"filterwarnings",
r"ignore:(Deprecated call to `pkg_resources\.declare_namespace\('.*'\)`\.\n)?"
r"Implementing implicit namespace packages \(as specified in PEP 420\) "
r"is preferred to `pkg_resources\.declare_namespace`\.:DeprecationWarning",
)
if SETUPTOOLS_VERSION is not None and SETUPTOOLS_VERSION >= Version("67.5.0"):
# may be triggered by multiple dependencies
# see https://github.com/glue-viz/glue/issues/2364
# see https://github.com/matplotlib/matplotlib/issues/25244
config.addinivalue_line(
"filterwarnings",
"ignore:pkg_resources is deprecated as an API:DeprecationWarning",
)
if MPL_VERSION < Version("3.5.2") and PILLOW_VERSION >= Version("9.1"):
# see https://github.com/matplotlib/matplotlib/pull/22766
config.addinivalue_line(
"filterwarnings",
r"ignore:NONE is deprecated and will be removed in Pillow 10 \(2023-07-01\)\. "
r"Use Resampling\.NEAREST or Dither\.NONE instead\.:DeprecationWarning",
)
config.addinivalue_line(
"filterwarnings",
r"ignore:ADAPTIVE is deprecated and will be removed in Pillow 10 \(2023-07-01\)\. "
r"Use Palette\.ADAPTIVE instead\.:DeprecationWarning",
)
if NUMPY_VERSION >= Version("1.25"):
if find_spec("h5py") is not None and (
Version(version("h5py")) < Version("3.9")
):
# https://github.com/h5py/h5py/pull/2242
config.addinivalue_line(
"filterwarnings",
"ignore:`product` is deprecated as of NumPy 1.25.0"
":DeprecationWarning",
)
if PANDAS_VERSION is not None and PANDAS_VERSION >= Version("2.2.0"):
config.addinivalue_line(
"filterwarnings",
r"ignore:\s*Pyarrow will become a required dependency of pandas:DeprecationWarning",
)
if sys.version_info >= (3, 12):
# already patched (but not released) upstream:
# https://github.com/dateutil/dateutil/pull/1285
config.addinivalue_line(
"filterwarnings",
r"ignore:datetime\.datetime\.utcfromtimestamp\(\) is deprecated:DeprecationWarning",
)
if find_spec("ratarmount"):
# On Python 3.12+, there is a deprecation warning when calling os.fork()
# in a multi-threaded process. We use this mechanism to mount archives.
config.addinivalue_line(
"filterwarnings",
r"ignore:This process \(pid=\d+\) is multi-threaded, use of fork\(\) "
r"may lead to deadlocks in the child\."
":DeprecationWarning",
)
def pytest_collection_modifyitems(config, items):
r"""
Decide which tests to skip based on command-line options.
"""
# Set up the skip marks
skip_answer = pytest.mark.skip(reason="--with-answer-testing not set.")
skip_unit = pytest.mark.skip(reason="Running answer tests, so skipping unit tests.")
skip_big = pytest.mark.skip(reason="--answer-big-data not set.")
# Loop over every collected test function
for item in items:
# If it's an answer test and the appropriate CL option hasn't
# been set, skip it
if "answer_test" in item.keywords and not config.getoption(
"--with-answer-testing"
):
item.add_marker(skip_answer)
# If it's an answer test that requires big data and the CL
# option hasn't been set, skip it
if (
"big_data" in item.keywords
and not config.getoption("--with-answer-testing")
and not config.getoption("--answer-big-data")
):
item.add_marker(skip_big)
if "answer_test" not in item.keywords and config.getoption(
"--with-answer-testing"
):
item.add_marker(skip_unit)
def pytest_itemcollected(item):
# Customize pytest-mpl decorator to add sensible defaults
mpl_marker = item.get_closest_marker("mpl_image_compare")
if mpl_marker is not None:
# in a future version, pytest-mpl may gain an option for doing this:
# https://github.com/matplotlib/pytest-mpl/pull/181
mpl_marker.kwargs.setdefault("tolerance", 0.5)
def _param_list(request):
r"""
Saves the non-ds, non-fixture function arguments for saving to
the answer file.
"""
# pytest treats parameterized arguments as fixtures, so there's no
# clean way to separate them out from other other fixtures (that I
# know of), so we do it explicitly
blacklist = [
"hashing",
"answer_file",
"request",
"answer_compare",
"temp_dir",
"orbit_traj",
"etc_traj",
]
test_params = {}
for key, val in request.node.funcargs.items():
if key not in blacklist:
# For plotwindow, the callback arg is a tuple and the second
# element contains a memory address, so we need to drop it.
# The first element is the callback name, which is all that's
# needed
if key == "callback":
val = val[0]
test_params[key] = str(val)
# Convert python-specific data objects (such as tuples) to a more
# io-friendly format (in order to not have python-specific anchors
# in the answer yaml file)
test_params = _streamline_for_io(test_params)
return test_params
def _get_answer_files(request):
"""
Gets the path to where the hashed and raw answers are saved.
"""
answer_file = f"{request.cls.__name__}_{request.cls.answer_version}.yaml"
raw_answer_file = f"{request.cls.__name__}_{request.cls.answer_version}.h5"
# Add the local-dir aspect of the path. If there's a command line value,
# have that override the ini file value
clLocalDir = request.config.getoption("--local-dir")
iniLocalDir = request.config.getini("local-dir")
if clLocalDir is not None:
answer_file = os.path.join(os.path.expanduser(clLocalDir), answer_file)
raw_answer_file = os.path.join(os.path.expanduser(clLocalDir), raw_answer_file)
else:
answer_file = os.path.join(os.path.expanduser(iniLocalDir), answer_file)
raw_answer_file = os.path.join(os.path.expanduser(iniLocalDir), raw_answer_file)
# Make sure we don't overwrite unless we mean to
overwrite = request.config.getoption("--force-overwrite")
storing = request.config.getoption("--answer-store")
raw_storing = request.config.getoption("--raw-answer-store")
raw = request.config.getoption("--answer-raw-arrays")
if os.path.exists(answer_file) and storing and not overwrite:
raise FileExistsError(
"Use `--force-overwrite` to overwrite an existing answer file."
)
if os.path.exists(raw_answer_file) and raw_storing and raw and not overwrite:
raise FileExistsError(
"Use `--force-overwrite` to overwrite an existing raw answer file."
)
# If we do mean to overwrite, do so here by deleting the original file
if os.path.exists(answer_file) and storing and overwrite:
os.remove(answer_file)
if os.path.exists(raw_answer_file) and raw_storing and raw and overwrite:
os.remove(raw_answer_file)
print(os.path.abspath(answer_file))
return answer_file, raw_answer_file
@pytest.fixture(scope="function")
def hashing(request):
r"""
Handles initialization, generation, and saving of answer test
result hashes.
"""
no_hash = request.config.getoption("--no-hash")
store_hash = request.config.getoption("--answer-store")
raw = request.config.getoption("--answer-raw-arrays")
raw_store = request.config.getoption("--raw-answer-store")
# This check is so that, when checking if the answer file exists in
# _get_answer_files, we don't continuously fail. With this check,
# _get_answer_files is called once per class, despite this having function
# scope
if request.cls.answer_file is None:
request.cls.answer_file, request.cls.raw_answer_file = _get_answer_files(
request
)
if not no_hash and not store_hash and request.cls.saved_hashes is None:
try:
with open(request.cls.answer_file) as fd:
request.cls.saved_hashes = yaml.safe_load(fd)
except FileNotFoundError:
module_filename = f"{request.function.__module__.replace('.', os.sep)}.py"
with open(f"generate_test_{os.getpid()}.txt", "a") as fp:
fp.write(f"{module_filename}::{request.cls.__name__}\n")
pytest.fail(msg="Answer file not found.", pytrace=False)
request.cls.hashes = {}
# Load the saved answers if we're comparing. We don't do this for the raw
# answers because those are huge
yield
# Get arguments and their values passed to the test (e.g., axis, field, etc.)
params = _param_list(request)
# Hash the test results. Don't save to request.cls.hashes so we still have
# raw data, in case we want to work with that
hashes = _hash_results(request.cls.hashes)
# Add the other test parameters
hashes.update(params)
# Add the function name as the "master" key to the hashes dict
hashes = {request.node.name: hashes}
# Save hashes
if not no_hash and store_hash:
_save_result(hashes, request.cls.answer_file)
# Compare hashes
elif not no_hash and not store_hash:
try:
for test_name, test_hash in hashes.items():
assert test_name in request.cls.saved_hashes
assert test_hash == request.cls.saved_hashes[test_name]
except AssertionError:
pytest.fail(f"Comparison failure: {request.node.name}", pytrace=False)
# Save raw data
if raw and raw_store:
_save_raw_arrays(
request.cls.hashes, request.cls.raw_answer_file, request.node.name
)
# Compare raw data. This is done one test at a time because the
# arrays can get quite large and storing everything in memory would
# be bad
if raw and not raw_store:
_compare_raw_arrays(
request.cls.hashes, request.cls.raw_answer_file, request.node.name
)
@pytest.fixture(scope="function")
def temp_dir():
r"""
Creates a temporary directory needed by certain tests.
"""
curdir = os.getcwd()
if int(os.environ.get("GENERATE_YTDATA", 0)):
tmpdir = os.getcwd()
else:
tmpdir = tempfile.mkdtemp()
os.chdir(tmpdir)
yield tmpdir
os.chdir(curdir)
if tmpdir != curdir:
shutil.rmtree(tmpdir)
@pytest.fixture(scope="class")
def ds(request):
# data_dir_load can take the cls, args, and kwargs. These optional
# arguments, if present, are given in a dictionary as the second
# element of the list
if isinstance(request.param, str):
ds_fn = request.param
opts = {}
else:
ds_fn, opts = request.param
try:
return data_dir_load(
ds_fn, cls=opts.get("cls"), args=opts.get("args"), kwargs=opts.get("kwargs")
)
except FileNotFoundError:
return pytest.skip(f"Data file: `{request.param}` not found.")
@pytest.fixture(scope="class")
def field(request):
"""
Fixture for returning the field. Needed because indirect=True is
used for loading the datasets.
"""
return request.param
@pytest.fixture(scope="class")
def dobj(request):
"""
Fixture for returning the ds_obj. Needed because indirect=True is
used for loading the datasets.
"""
return request.param
@pytest.fixture(scope="class")
def axis(request):
"""
Fixture for returning the axis. Needed because indirect=True is
used for loading the datasets.
"""
return request.param
@pytest.fixture(scope="class")
def weight(request):
"""
Fixture for returning the weight_field. Needed because
indirect=True is used for loading the datasets.
"""
return request.param
@pytest.fixture(scope="class")
def ds_repr(request):
"""
Fixture for returning the string representation of a dataset.
Needed because indirect=True is used for loading the datasets.
"""
return request.param
@pytest.fixture(scope="class")
def Npart(request):
"""
Fixture for returning the number of particles in a dataset.
Needed because indirect=True is used for loading the datasets.
"""
return request.param