Logging stage consumes custom record formatter class #89
49 tests run, 26 passed, 0 skipped, 23 failed.
Annotations
Check failure on line 44 in tests\test_addoptions.py
github-actions / Test Report (windows-latest, 3.11)
test_addoptions.test_fluentd_logged_parameters
ValueError: Pytest terminal summary report not found
Raw output
monkeypatch = <_pytest.monkeypatch.MonkeyPatch object at 0x0000024A1BE24810>
run_mocked_pytest = (<function runpytest.<locals>.runpytest at 0x0000024A1BDC7B00>, <MagicMock name='FluentSender()' id='2517319061200'>)
session_uuid = UUID('4a10c3aa-3b98-4394-a8fb-7122a426af67')
logging_content = 'Logged from test_base'
pyfile_testcase = "\nimport logging\n\ndef test_base():\n LOGGER = logging.getLogger()\n LOGGER.info('Logged from test_base')\n LOGGER.warning('Logged from test_base')\n assert True\n"
def test_fluentd_logged_parameters(
monkeypatch, run_mocked_pytest, session_uuid, logging_content, pyfile_testcase
):
runpytest, fluent_sender = run_mocked_pytest
monkeypatch.setattr(uuid, "uuid4", lambda: uuid.UUID(FAKE_TEST_UUID))
result = runpytest(
f"--session-uuid={session_uuid}",
f"--fluentd-tag={FLUENTD_TAG}",
f"--fluentd-label={FLUENTD_LABEL}",
"--extend-logging",
pyfile=pyfile_testcase,
)
call_args = fluent_sender.emit_with_time.call_args_list
> result.assert_outcomes(passed=1)
D:\a\pytest-fluent\pytest-fluent\tests\test_addoptions.py:44:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
D:\a\pytest-fluent\pytest-fluent\.tox\test\Lib\site-packages\_pytest\pytester.py:567: in parseoutcomes
return self.parse_summary_nouns(self.outlines)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
cls = <class '_pytest.pytester.RunResult'>, lines = []
@classmethod
def parse_summary_nouns(cls, lines) -> Dict[str, int]:
"""Extract the nouns from a pytest terminal summary line.
It always returns the plural noun for consistency::
======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ====
Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``.
"""
for line in reversed(lines):
if rex_session_duration.search(line):
outcomes = rex_outcome.findall(line)
ret = {noun: int(count) for (count, noun) in outcomes}
break
else:
> raise ValueError("Pytest terminal summary report not found")
E ValueError: Pytest terminal summary report not found
D:\a\pytest-fluent\pytest-fluent\.tox\test\Lib\site-packages\_pytest\pytester.py:585: ValueError
Check failure on line 124 in tests\test_addoptions.py
github-actions / Test Report (windows-latest, 3.11)
test_addoptions.test_fluentd_with_options_and_timestamp_enabled_shows_timestamp_field_in_output
ValueError: Pytest terminal summary report not found
Raw output
run_mocked_pytest = (<function runpytest.<locals>.runpytest at 0x0000024A1BDC4D60>, <MagicMock name='FluentSender()' id='2517316807056'>)
fluentd_sender = <MagicMock name='FluentSender()' id='2517316807056'>
session_uuid = UUID('4a10c3aa-3b98-4394-a8fb-7122a426af67')
pyfile_testcase = "\nimport logging\n\ndef test_base():\n LOGGER = logging.getLogger()\n LOGGER.info('Logged from test_base')\n LOGGER.warning('Logged from test_base')\n assert True\n"
def test_fluentd_with_options_and_timestamp_enabled_shows_timestamp_field_in_output(
run_mocked_pytest, fluentd_sender, session_uuid, pyfile_testcase
):
runpytest, fluentd_sender = run_mocked_pytest
result = runpytest(
f"--session-uuid={session_uuid}",
f"--fluentd-tag={FLUENTD_TAG}",
f"--fluentd-label={FLUENTD_LABEL}",
"--fluentd-timestamp=@timestamp",
"--extend-logging",
pyfile=pyfile_testcase,
)
> result.assert_outcomes(passed=1)
D:\a\pytest-fluent\pytest-fluent\tests\test_addoptions.py:124:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
D:\a\pytest-fluent\pytest-fluent\.tox\test\Lib\site-packages\_pytest\pytester.py:567: in parseoutcomes
return self.parse_summary_nouns(self.outlines)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
cls = <class '_pytest.pytester.RunResult'>, lines = []
@classmethod
def parse_summary_nouns(cls, lines) -> Dict[str, int]:
"""Extract the nouns from a pytest terminal summary line.
It always returns the plural noun for consistency::
======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ====
Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``.
"""
for line in reversed(lines):
if rex_session_duration.search(line):
outcomes = rex_outcome.findall(line)
ret = {noun: int(count) for (count, noun) in outcomes}
break
else:
> raise ValueError("Pytest terminal summary report not found")
E ValueError: Pytest terminal summary report not found
D:\a\pytest-fluent\pytest-fluent\.tox\test\Lib\site-packages\_pytest\pytester.py:585: ValueError
Check failure on line 141 in tests\test_addoptions.py
github-actions / Test Report (windows-latest, 3.11)
test_addoptions.test_fluentd_with_timestamp_enabled_shows_timestamp_field_in_output
ValueError: Pytest terminal summary report not found
Raw output
run_mocked_pytest = (<function runpytest.<locals>.runpytest at 0x0000024A1BEAD4E0>, <MagicMock name='FluentSender()' id='2517317147216'>)
session_uuid = UUID('4a10c3aa-3b98-4394-a8fb-7122a426af67')
pyfile_testcase = "\nimport logging\n\ndef test_base():\n LOGGER = logging.getLogger()\n LOGGER.info('Logged from test_base')\n LOGGER.warning('Logged from test_base')\n assert True\n"
def test_fluentd_with_timestamp_enabled_shows_timestamp_field_in_output(
run_mocked_pytest, session_uuid, pyfile_testcase
):
runpytest, fluentd_sender = run_mocked_pytest
result = runpytest(
f"--session-uuid={session_uuid}",
"--fluentd-timestamp=@timestamp",
pyfile=pyfile_testcase,
)
> result.assert_outcomes(passed=1)
D:\a\pytest-fluent\pytest-fluent\tests\test_addoptions.py:141:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
D:\a\pytest-fluent\pytest-fluent\.tox\test\Lib\site-packages\_pytest\pytester.py:567: in parseoutcomes
return self.parse_summary_nouns(self.outlines)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
cls = <class '_pytest.pytester.RunResult'>, lines = []
@classmethod
def parse_summary_nouns(cls, lines) -> Dict[str, int]:
"""Extract the nouns from a pytest terminal summary line.
It always returns the plural noun for consistency::
======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ====
Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``.
"""
for line in reversed(lines):
if rex_session_duration.search(line):
outcomes = rex_outcome.findall(line)
ret = {noun: int(count) for (count, noun) in outcomes}
break
else:
> raise ValueError("Pytest terminal summary report not found")
E ValueError: Pytest terminal summary report not found
D:\a\pytest-fluent\pytest-fluent\.tox\test\Lib\site-packages\_pytest\pytester.py:585: ValueError
Check failure on line 18 in tests\test_docstrings.py
github-actions / Test Report (windows-latest, 3.11)
test_docstrings.test_add_docstrings
ValueError: Pytest terminal summary report not found
Raw output
run_mocked_pytest = (<function runpytest.<locals>.runpytest at 0x0000024A1BD55580>, <MagicMock name='FluentSender()' id='2517318140304'>)
session_uuid = UUID('4a10c3aa-3b98-4394-a8fb-7122a426af67')
def test_add_docstrings(run_mocked_pytest, session_uuid):
runpytest, fluent_sender = run_mocked_pytest
result = runpytest(
f"--session-uuid={session_uuid}",
"--add-docstrings",
pyfile=f"""
def test_base():
'''
{TEST_DOCSTRING}
'''
assert True
""",
)
call_args = fluent_sender.emit_with_time.call_args_list
> result.assert_outcomes(passed=1)
D:\a\pytest-fluent\pytest-fluent\tests\test_docstrings.py:18:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
D:\a\pytest-fluent\pytest-fluent\.tox\test\Lib\site-packages\_pytest\pytester.py:567: in parseoutcomes
return self.parse_summary_nouns(self.outlines)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
cls = <class '_pytest.pytester.RunResult'>, lines = []
@classmethod
def parse_summary_nouns(cls, lines) -> Dict[str, int]:
"""Extract the nouns from a pytest terminal summary line.
It always returns the plural noun for consistency::
======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ====
Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``.
"""
for line in reversed(lines):
if rex_session_duration.search(line):
outcomes = rex_outcome.findall(line)
ret = {noun: int(count) for (count, noun) in outcomes}
break
else:
> raise ValueError("Pytest terminal summary report not found")
E ValueError: Pytest terminal summary report not found
D:\a\pytest-fluent\pytest-fluent\.tox\test\Lib\site-packages\_pytest\pytester.py:585: ValueError
Check failure on line 38 in tests\test_docstrings.py
github-actions / Test Report (windows-latest, 3.11)
test_docstrings.test_docstrings_disabled
ValueError: Pytest terminal summary report not found
Raw output
run_mocked_pytest = (<function runpytest.<locals>.runpytest at 0x0000024A1BD54680>, <MagicMock name='FluentSender()' id='2517272578704'>)
session_uuid = UUID('4a10c3aa-3b98-4394-a8fb-7122a426af67')
def test_docstrings_disabled(run_mocked_pytest, session_uuid):
runpytest, fluent_sender = run_mocked_pytest
result = runpytest(
f"--session-uuid={session_uuid}",
pyfile=f"""
def test_base():
'''
{TEST_DOCSTRING}
'''
assert True
""",
)
call_args = fluent_sender.emit_with_time.call_args_list
> result.assert_outcomes(passed=1)
D:\a\pytest-fluent\pytest-fluent\tests\test_docstrings.py:38:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
D:\a\pytest-fluent\pytest-fluent\.tox\test\Lib\site-packages\_pytest\pytester.py:567: in parseoutcomes
return self.parse_summary_nouns(self.outlines)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
cls = <class '_pytest.pytester.RunResult'>, lines = []
@classmethod
def parse_summary_nouns(cls, lines) -> Dict[str, int]:
"""Extract the nouns from a pytest terminal summary line.
It always returns the plural noun for consistency::
======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ====
Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``.
"""
for line in reversed(lines):
if rex_session_duration.search(line):
outcomes = rex_outcome.findall(line)
ret = {noun: int(count) for (count, noun) in outcomes}
break
else:
> raise ValueError("Pytest terminal summary report not found")
E ValueError: Pytest terminal summary report not found
D:\a\pytest-fluent\pytest-fluent\.tox\test\Lib\site-packages\_pytest\pytester.py:585: ValueError
Check failure on line 55 in tests\test_docstrings.py
github-actions / Test Report (windows-latest, 3.11)
test_docstrings.test_missing_docstring
ValueError: Pytest terminal summary report not found
Raw output
run_mocked_pytest = (<function runpytest.<locals>.runpytest at 0x0000024A191CB920>, <MagicMock name='FluentSender()' id='2517319365136'>)
session_uuid = UUID('4a10c3aa-3b98-4394-a8fb-7122a426af67')
def test_missing_docstring(run_mocked_pytest, session_uuid):
runpytest, fluent_sender = run_mocked_pytest
result = runpytest(
f"--session-uuid={session_uuid}",
"--add-docstrings",
pyfile="""
def test_base():
assert True
""",
)
call_args = fluent_sender.emit_with_time.call_args_list
> result.assert_outcomes(passed=1)
D:\a\pytest-fluent\pytest-fluent\tests\test_docstrings.py:55:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
D:\a\pytest-fluent\pytest-fluent\.tox\test\Lib\site-packages\_pytest\pytester.py:567: in parseoutcomes
return self.parse_summary_nouns(self.outlines)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
cls = <class '_pytest.pytester.RunResult'>, lines = []
@classmethod
def parse_summary_nouns(cls, lines) -> Dict[str, int]:
"""Extract the nouns from a pytest terminal summary line.
It always returns the plural noun for consistency::
======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ====
Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``.
"""
for line in reversed(lines):
if rex_session_duration.search(line):
outcomes = rex_outcome.findall(line)
ret = {noun: int(count) for (count, noun) in outcomes}
break
else:
> raise ValueError("Pytest terminal summary report not found")
E ValueError: Pytest terminal summary report not found
D:\a\pytest-fluent\pytest-fluent\.tox\test\Lib\site-packages\_pytest\pytester.py:585: ValueError
Check failure on line 14 in tests\test_fixtures.py
github-actions / Test Report (windows-latest, 3.11)
test_fixtures.test_get_logger
ValueError: Pytest terminal summary report not found
Raw output
run_mocked_pytest = (<function runpytest.<locals>.runpytest at 0x0000024A1BEAF4C0>, <MagicMock name='FluentSender()' id='2517318537104'>)
session_uuid = UUID('4a10c3aa-3b98-4394-a8fb-7122a426af67')
logging_content = 'Logged from test_base'
def test_get_logger(run_mocked_pytest, session_uuid, logging_content):
runpytest, fluent_sender = run_mocked_pytest
result = runpytest(
f"--session-uuid={session_uuid}",
"--extend-logging",
pyfile=f"""
def test_base(get_logger):
LOGGER = get_logger('my.Logger')
LOGGER.info('{logging_content}')
assert True
""",
)
call_args = fluent_sender.emit_with_time.call_args_list
> result.assert_outcomes(passed=1)
D:\a\pytest-fluent\pytest-fluent\tests\test_fixtures.py:14:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
D:\a\pytest-fluent\pytest-fluent\.tox\test\Lib\site-packages\_pytest\pytester.py:567: in parseoutcomes
return self.parse_summary_nouns(self.outlines)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
cls = <class '_pytest.pytester.RunResult'>, lines = []
@classmethod
def parse_summary_nouns(cls, lines) -> Dict[str, int]:
"""Extract the nouns from a pytest terminal summary line.
It always returns the plural noun for consistency::
======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ====
Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``.
"""
for line in reversed(lines):
if rex_session_duration.search(line):
outcomes = rex_outcome.findall(line)
ret = {noun: int(count) for (count, noun) in outcomes}
break
else:
> raise ValueError("Pytest terminal summary report not found")
E ValueError: Pytest terminal summary report not found
D:\a\pytest-fluent\pytest-fluent\.tox\test\Lib\site-packages\_pytest\pytester.py:585: ValueError
Check failure on line 39 in tests\test_fixtures.py
github-actions / Test Report (windows-latest, 3.11)
test_fixtures.test_uid_fixtures
ValueError: Pytest terminal summary report not found
Raw output
run_mocked_pytest = (<function runpytest.<locals>.runpytest at 0x0000024A1BF054E0>, <MagicMock name='FluentSender()' id='2517318302096'>)
session_uuid = UUID('4a10c3aa-3b98-4394-a8fb-7122a426af67')
def test_uid_fixtures(run_mocked_pytest, session_uuid):
runpytest, _ = run_mocked_pytest
result = runpytest(
f"--session-uuid={session_uuid}",
pyfile="""
from pytest_fluent import get_session_uid, get_test_uid
def test_base(session_uid, test_uid):
assert session_uid == get_session_uid()
assert test_uid == get_test_uid()
""",
)
> result.assert_outcomes(passed=1)
D:\a\pytest-fluent\pytest-fluent\tests\test_fixtures.py:39:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
D:\a\pytest-fluent\pytest-fluent\.tox\test\Lib\site-packages\_pytest\pytester.py:567: in parseoutcomes
return self.parse_summary_nouns(self.outlines)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
cls = <class '_pytest.pytester.RunResult'>, lines = []
@classmethod
def parse_summary_nouns(cls, lines) -> Dict[str, int]:
"""Extract the nouns from a pytest terminal summary line.
It always returns the plural noun for consistency::
======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ====
Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``.
"""
for line in reversed(lines):
if rex_session_duration.search(line):
outcomes = rex_outcome.findall(line)
ret = {noun: int(count) for (count, noun) in outcomes}
break
else:
> raise ValueError("Pytest terminal summary report not found")
E ValueError: Pytest terminal summary report not found
D:\a\pytest-fluent\pytest-fluent\.tox\test\Lib\site-packages\_pytest\pytester.py:585: ValueError
Check failure on line 53 in tests\test_ini_configuration.py
github-actions / Test Report (windows-latest, 3.11)
test_ini_configuration.test_ini_setting[tox_ini]
Failed: remains unmatched: '*passed*'
Raw output
pytester = <Pytester WindowsPath('C:/Users/runneradmin/AppData/Local/Temp/pytest-of-unknown/pytest-0/test_ini_setting0')>
run_mocked_pytest = (<function runpytest.<locals>.runpytest at 0x0000024A1BF072E0>, <MagicMock name='FluentSender()' id='2517319170704'>)
session_uuid = UUID('4a10c3aa-3b98-4394-a8fb-7122a426af67')
ini_path = 'tox_ini'
request = <FixtureRequest for <Function test_ini_setting[tox_ini]>>
@pytest.mark.parametrize("ini_path", ["tox_ini", "pyprojtoml_ini", "pytest_ini"])
def test_ini_setting(pytester, run_mocked_pytest, session_uuid, ini_path, request):
runpytest, _ = run_mocked_pytest
ini_file = request.getfixturevalue(ini_path)
logger.debug("Generated ini file: %s", ini_file)
filename = make_py_file(pytester, session_uuid, TAG, LABEL, PORT, HOSTNAME, True)
logger.debug("Generated python module: %s", filename)
result = runpytest("-v")
> result.stdout.fnmatch_lines(
[
"*passed*",
]
)
E Failed: remains unmatched: '*passed*'
D:\a\pytest-fluent\pytest-fluent\tests\test_ini_configuration.py:53: Failed
Check failure on line 53 in tests\test_ini_configuration.py
github-actions / Test Report (windows-latest, 3.11)
test_ini_configuration.test_ini_setting[pyprojtoml_ini]
Failed: remains unmatched: '*passed*'
Raw output
pytester = <Pytester WindowsPath('C:/Users/runneradmin/AppData/Local/Temp/pytest-of-unknown/pytest-0/test_ini_setting1')>
run_mocked_pytest = (<function runpytest.<locals>.runpytest at 0x0000024A1BF06E80>, <MagicMock name='FluentSender()' id='2517318067280'>)
session_uuid = UUID('4a10c3aa-3b98-4394-a8fb-7122a426af67')
ini_path = 'pyprojtoml_ini'
request = <FixtureRequest for <Function test_ini_setting[pyprojtoml_ini]>>
@pytest.mark.parametrize("ini_path", ["tox_ini", "pyprojtoml_ini", "pytest_ini"])
def test_ini_setting(pytester, run_mocked_pytest, session_uuid, ini_path, request):
runpytest, _ = run_mocked_pytest
ini_file = request.getfixturevalue(ini_path)
logger.debug("Generated ini file: %s", ini_file)
filename = make_py_file(pytester, session_uuid, TAG, LABEL, PORT, HOSTNAME, True)
logger.debug("Generated python module: %s", filename)
result = runpytest("-v")
> result.stdout.fnmatch_lines(
[
"*passed*",
]
)
E Failed: remains unmatched: '*passed*'
D:\a\pytest-fluent\pytest-fluent\tests\test_ini_configuration.py:53: Failed
Check failure on line 53 in tests\test_ini_configuration.py
github-actions / Test Report (windows-latest, 3.11)
test_ini_configuration.test_ini_setting[pytest_ini]
Failed: remains unmatched: '*passed*'
Raw output
pytester = <Pytester WindowsPath('C:/Users/runneradmin/AppData/Local/Temp/pytest-of-unknown/pytest-0/test_ini_setting2')>
run_mocked_pytest = (<function runpytest.<locals>.runpytest at 0x0000024A1BF042C0>, <MagicMock name='FluentSender()' id='2517316807056'>)
session_uuid = UUID('4a10c3aa-3b98-4394-a8fb-7122a426af67')
ini_path = 'pytest_ini'
request = <FixtureRequest for <Function test_ini_setting[pytest_ini]>>
@pytest.mark.parametrize("ini_path", ["tox_ini", "pyprojtoml_ini", "pytest_ini"])
def test_ini_setting(pytester, run_mocked_pytest, session_uuid, ini_path, request):
runpytest, _ = run_mocked_pytest
ini_file = request.getfixturevalue(ini_path)
logger.debug("Generated ini file: %s", ini_file)
filename = make_py_file(pytester, session_uuid, TAG, LABEL, PORT, HOSTNAME, True)
logger.debug("Generated python module: %s", filename)
result = runpytest("-v")
> result.stdout.fnmatch_lines(
[
"*passed*",
]
)
E Failed: remains unmatched: '*passed*'
D:\a\pytest-fluent\pytest-fluent\tests\test_ini_configuration.py:53: Failed
Check failure on line 80 in tests\test_ini_configuration.py
github-actions / Test Report (windows-latest, 3.11)
test_ini_configuration.test_cli_args_precedence[tox_ini]
Failed: remains unmatched: '*passed*'
Raw output
pytester = <Pytester WindowsPath('C:/Users/runneradmin/AppData/Local/Temp/pytest-of-unknown/pytest-0/test_cli_args_precedence0')>
run_mocked_pytest = (<function runpytest.<locals>.runpytest at 0x0000024A1BD54E00>, <MagicMock name='FluentSender()' id='2517318878736'>)
ini_path = 'tox_ini'
request = <FixtureRequest for <Function test_cli_args_precedence[tox_ini]>>
@pytest.mark.parametrize("ini_path", ["tox_ini", "pyprojtoml_ini", "pytest_ini"])
def test_cli_args_precedence(pytester, run_mocked_pytest, ini_path, request):
runpytest, _ = run_mocked_pytest
fluent_tag = "dummytest"
fluent_label = "pytester"
fluent_port = 65535
ini_file = request.getfixturevalue(ini_path)
logger.debug("Generated ini file: %s", ini_file)
filename = make_py_file(
pytester, tag=fluent_tag, label=fluent_label, port=fluent_port, is_logging=True
)
logger.debug("Generated python module: %s", filename)
result = runpytest(
f"--fluentd-tag={fluent_tag}",
f"--fluentd-label={fluent_label}",
f"--fluentd-port={fluent_port}",
)
> result.stdout.fnmatch_lines(
[
"*passed*",
]
)
E Failed: remains unmatched: '*passed*'
D:\a\pytest-fluent\pytest-fluent\tests\test_ini_configuration.py:80: Failed
Check failure on line 80 in tests\test_ini_configuration.py
github-actions / Test Report (windows-latest, 3.11)
test_ini_configuration.test_cli_args_precedence[pyprojtoml_ini]
Failed: remains unmatched: '*passed*'
Raw output
pytester = <Pytester WindowsPath('C:/Users/runneradmin/AppData/Local/Temp/pytest-of-unknown/pytest-0/test_cli_args_precedence1')>
run_mocked_pytest = (<function runpytest.<locals>.runpytest at 0x0000024A1BF06B60>, <MagicMock name='FluentSender()' id='2517317963152'>)
ini_path = 'pyprojtoml_ini'
request = <FixtureRequest for <Function test_cli_args_precedence[pyprojtoml_ini]>>
@pytest.mark.parametrize("ini_path", ["tox_ini", "pyprojtoml_ini", "pytest_ini"])
def test_cli_args_precedence(pytester, run_mocked_pytest, ini_path, request):
runpytest, _ = run_mocked_pytest
fluent_tag = "dummytest"
fluent_label = "pytester"
fluent_port = 65535
ini_file = request.getfixturevalue(ini_path)
logger.debug("Generated ini file: %s", ini_file)
filename = make_py_file(
pytester, tag=fluent_tag, label=fluent_label, port=fluent_port, is_logging=True
)
logger.debug("Generated python module: %s", filename)
result = runpytest(
f"--fluentd-tag={fluent_tag}",
f"--fluentd-label={fluent_label}",
f"--fluentd-port={fluent_port}",
)
> result.stdout.fnmatch_lines(
[
"*passed*",
]
)
E Failed: remains unmatched: '*passed*'
D:\a\pytest-fluent\pytest-fluent\tests\test_ini_configuration.py:80: Failed
Check failure on line 80 in tests\test_ini_configuration.py
github-actions / Test Report (windows-latest, 3.11)
test_ini_configuration.test_cli_args_precedence[pytest_ini]
Failed: remains unmatched: '*passed*'
Raw output
pytester = <Pytester WindowsPath('C:/Users/runneradmin/AppData/Local/Temp/pytest-of-unknown/pytest-0/test_cli_args_precedence2')>
run_mocked_pytest = (<function runpytest.<locals>.runpytest at 0x0000024A1BD579C0>, <MagicMock name='FluentSender()' id='2517317859280'>)
ini_path = 'pytest_ini'
request = <FixtureRequest for <Function test_cli_args_precedence[pytest_ini]>>
@pytest.mark.parametrize("ini_path", ["tox_ini", "pyprojtoml_ini", "pytest_ini"])
def test_cli_args_precedence(pytester, run_mocked_pytest, ini_path, request):
runpytest, _ = run_mocked_pytest
fluent_tag = "dummytest"
fluent_label = "pytester"
fluent_port = 65535
ini_file = request.getfixturevalue(ini_path)
logger.debug("Generated ini file: %s", ini_file)
filename = make_py_file(
pytester, tag=fluent_tag, label=fluent_label, port=fluent_port, is_logging=True
)
logger.debug("Generated python module: %s", filename)
result = runpytest(
f"--fluentd-tag={fluent_tag}",
f"--fluentd-label={fluent_label}",
f"--fluentd-port={fluent_port}",
)
> result.stdout.fnmatch_lines(
[
"*passed*",
]
)
E Failed: remains unmatched: '*passed*'
D:\a\pytest-fluent\pytest-fluent\tests\test_ini_configuration.py:80: Failed
Check failure on line 93 in tests\test_ini_configuration.py
github-actions / Test Report (windows-latest, 3.11)
test_ini_configuration.test_commandline_args
Failed: remains unmatched: '*passed*'
Raw output
pytester = <Pytester WindowsPath('C:/Users/runneradmin/AppData/Local/Temp/pytest-of-unknown/pytest-0/test_commandline_args0')>
run_mocked_pytest = (<function runpytest.<locals>.runpytest at 0x0000024A1BD565C0>, <MagicMock name='FluentSender()' id='2517318592208'>)
def test_commandline_args(pytester, run_mocked_pytest):
runpytest, _ = run_mocked_pytest
filename = make_py_file(pytester, tag=TAG, is_logging=True)
logger.debug("Generated python module: %s", filename)
result = runpytest("--extend-logging", f"--fluentd-tag={TAG}")
> result.stdout.fnmatch_lines(
[
"*passed*",
]
)
E Failed: remains unmatched: '*passed*'
D:\a\pytest-fluent\pytest-fluent\tests\test_ini_configuration.py:93: Failed
Check failure on line 11 in tests\test_reporting.py
github-actions / Test Report (windows-latest, 3.11)
test_reporting.test_data_reporter_base_with_passed
ValueError: Pytest terminal summary report not found
Raw output
run_mocked_pytest = (<function runpytest.<locals>.runpytest at 0x0000024A191DC360>, <MagicMock name='FluentSender()' id='2517319160592'>)
session_uuid = UUID('4a10c3aa-3b98-4394-a8fb-7122a426af67')
def test_data_reporter_base_with_passed(run_mocked_pytest, session_uuid):
runpytest, fluent_sender = run_mocked_pytest
result = runpytest(
f"--session-uuid={session_uuid}",
pyfile="""
def test_base():
assert True
""",
)
call_args = fluent_sender.emit_with_time.call_args_list
> result.assert_outcomes(passed=1)
D:\a\pytest-fluent\pytest-fluent\tests\test_reporting.py:11:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
D:\a\pytest-fluent\pytest-fluent\.tox\test\Lib\site-packages\_pytest\pytester.py:567: in parseoutcomes
return self.parse_summary_nouns(self.outlines)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
cls = <class '_pytest.pytester.RunResult'>, lines = []
@classmethod
def parse_summary_nouns(cls, lines) -> Dict[str, int]:
"""Extract the nouns from a pytest terminal summary line.
It always returns the plural noun for consistency::
======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ====
Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``.
"""
for line in reversed(lines):
if rex_session_duration.search(line):
outcomes = rex_outcome.findall(line)
ret = {noun: int(count) for (count, noun) in outcomes}
break
else:
> raise ValueError("Pytest terminal summary report not found")
E ValueError: Pytest terminal summary report not found
D:\a\pytest-fluent\pytest-fluent\.tox\test\Lib\site-packages\_pytest\pytester.py:585: ValueError
Check failure on line 51 in tests\test_reporting.py
github-actions / Test Report (windows-latest, 3.11)
test_reporting.test_data_reporter_xdist_passed
ValueError: Pytest terminal summary report not found
Raw output
run_mocked_pytest = (<function runpytest.<locals>.runpytest at 0x0000024A1BF051C0>, <MagicMock name='FluentSender()' id='2517319095248'>)
session_uuid = UUID('4a10c3aa-3b98-4394-a8fb-7122a426af67')
def test_data_reporter_xdist_passed(run_mocked_pytest, session_uuid):
runpytest, fluent_sender = run_mocked_pytest
result = runpytest(
"-n 2",
f"--session-uuid={session_uuid}",
pyfile="""
def test_base_group_one():
assert True
def test_base_group_two():
assert True
def test_base_group_three():
assert True
def test_base_group_four():
assert True
def test_base_group_five():
assert True
def test_base_group_six():
assert True
""",
)
call_args = fluent_sender.emit_with_time.call_args_list
> result.assert_outcomes(passed=6)
D:\a\pytest-fluent\pytest-fluent\tests\test_reporting.py:51:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
D:\a\pytest-fluent\pytest-fluent\.tox\test\Lib\site-packages\_pytest\pytester.py:567: in parseoutcomes
return self.parse_summary_nouns(self.outlines)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
cls = <class '_pytest.pytester.RunResult'>, lines = []
@classmethod
def parse_summary_nouns(cls, lines) -> Dict[str, int]:
"""Extract the nouns from a pytest terminal summary line.
It always returns the plural noun for consistency::
======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ====
Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``.
"""
for line in reversed(lines):
if rex_session_duration.search(line):
outcomes = rex_outcome.findall(line)
ret = {noun: int(count) for (count, noun) in outcomes}
break
else:
> raise ValueError("Pytest terminal summary report not found")
E ValueError: Pytest terminal summary report not found
D:\a\pytest-fluent\pytest-fluent\.tox\test\Lib\site-packages\_pytest\pytester.py:585: ValueError
Check failure on line 90 in tests\test_reporting.py
github-actions / Test Report (windows-latest, 3.11)
test_reporting.test_data_reporter_base_with_xfail
assert 0 > 0
+ where 0 = len([])
Raw output
run_mocked_pytest = (<function runpytest.<locals>.runpytest at 0x0000024A1B8E11C0>, <MagicMock name='FluentSender()' id='2517319016336'>)
session_uuid = UUID('4a10c3aa-3b98-4394-a8fb-7122a426af67')
def test_data_reporter_base_with_xfail(run_mocked_pytest, session_uuid):
runpytest, fluent_sender = run_mocked_pytest
_ = runpytest(
f"--session-uuid={session_uuid}",
pyfile="""
import pytest
@pytest.mark.xfail
def test_base():
assert False
""",
)
call_args = fluent_sender.emit_with_time.call_args_list
> assert len(call_args) > 0
E assert 0 > 0
E + where 0 = len([])
D:\a\pytest-fluent\pytest-fluent\tests\test_reporting.py:90: AssertionError
Check failure on line 108 in tests\test_reporting.py
github-actions / Test Report (windows-latest, 3.11)
test_reporting.test_data_reporter_base_with_exception
assert 0 > 0
+ where 0 = len([])
Raw output
run_mocked_pytest = (<function runpytest.<locals>.runpytest at 0x0000024A1BEAE980>, <MagicMock name='FluentSender()' id='2517318890256'>)
session_uuid = UUID('4a10c3aa-3b98-4394-a8fb-7122a426af67')
def test_data_reporter_base_with_exception(run_mocked_pytest, session_uuid):
runpytest, fluent_sender = run_mocked_pytest
_ = runpytest(
f"--session-uuid={session_uuid}",
pyfile="""
def test_base():
raise Exception('TestException')
assert True
""",
)
call_args = fluent_sender.emit_with_time.call_args_list
> assert len(call_args) > 0
E assert 0 > 0
E + where 0 = len([])
D:\a\pytest-fluent\pytest-fluent\tests\test_reporting.py:108: AssertionError
Check failure on line 134 in tests\test_reporting.py
github-actions / Test Report (windows-latest, 3.11)
test_reporting.test_data_reporter_base_with_setup_exception
assert 0 > 0
+ where 0 = len([])
Raw output
run_mocked_pytest = (<function runpytest.<locals>.runpytest at 0x0000024A1BEAF600>, <MagicMock name='FluentSender()' id='2517318148944'>)
session_uuid = UUID('4a10c3aa-3b98-4394-a8fb-7122a426af67')
def test_data_reporter_base_with_setup_exception(run_mocked_pytest, session_uuid):
runpytest, fluent_sender = run_mocked_pytest
_ = runpytest(
f"--session-uuid={session_uuid}",
pyfile="""
import pytest
@pytest.fixture
def my_value() -> str:
val = '1'
raise ValueError('Value is wrong')
return val
def test_base(my_value):
raise Exception('TestException')
assert True
""",
)
call_args = fluent_sender.emit_with_time.call_args_list
> assert len(call_args) > 0
E assert 0 > 0
E + where 0 = len([])
D:\a\pytest-fluent\pytest-fluent\tests\test_reporting.py:134: AssertionError
Check failure on line 168 in tests\test_reporting_patching.py
github-actions / Test Report (windows-latest, 3.11)
test_reporting_patching.test_data_reporter_with_patched_values[patch_file_content0-expected_result0-True]
ValueError: Pytest terminal summary report not found
Raw output
pytester = <Pytester WindowsPath('C:/Users/runneradmin/AppData/Local/Temp/pytest-of-unknown/pytest-0/test_data_reporter_with_patched_values0')>
run_mocked_pytest = (<function runpytest.<locals>.runpytest at 0x0000024A1BECA980>, <MagicMock name='FluentSender()' id='2517319198032'>)
session_uuid = UUID('4a10c3aa-3b98-4394-a8fb-7122a426af67')
patch_file_content = {'all': {'label': '<fluentd-label>', 'tag': '<fluentd-tag>'}}
expected_result = [{'sessionId': '4a10c3aa-3b98-4394-a8fb-7122a426af67', 'stage': 'session', 'status': 'start'}, {'name': 'test_data_rep...e', 'status': 'finish'}, {'sessionId': '4a10c3aa-3b98-4394-a8fb-7122a426af67', 'stage': 'session', 'status': 'finish'}]
extend_logging = True
@pytest.mark.parametrize(
"patch_file_content,expected_result,extend_logging",
[
(
{"all": {"tag": "<fluentd-tag>", "label": "<fluentd-label>"}},
[
{
"status": "start",
"stage": "session",
"sessionId": str(SESSION_UUID),
},
{
"status": "start",
"stage": "testcase",
"sessionId": str(SESSION_UUID),
"name": "test_data_reporter_with_patched_values.py::test_base",
},
{
"type": "logging",
"where": "test_data_reporter_with_patched_values.test_base",
"level": "INFO",
"stack_trace": "None",
"message": "Test running",
"sessionId": str(SESSION_UUID),
"stage": "testcase",
},
{
"name": "test_data_reporter_with_patched_values.py::test_base",
"outcome": "passed",
"stage": "testcase",
"when": "call",
"sessionId": str(SESSION_UUID),
},
{
"status": "finish",
"stage": "testcase",
"sessionId": str(SESSION_UUID),
"name": "test_data_reporter_with_patched_values.py::test_base",
},
{
"status": "finish",
"stage": "session",
"sessionId": str(SESSION_UUID),
},
],
True,
),
(
{
"all": {
"tag": "<fluentd-tag>",
"label": "<fluentd-label>",
"replace": {
"keys": {"status": "state", "sessionId": "id"},
},
"drop": ["stage", "state"],
},
"pytest_runtest_logreport": {
"replace": {
"values": {"passed": "pass", "failed": "fail"},
},
"add": {"stop_info": "Testcase finished"},
"drop": ["when"],
},
},
[
{
"id": str(SESSION_UUID),
},
{
"id": str(SESSION_UUID),
"name": "test_data_reporter_with_patched_values.py::test_base",
},
{
"type": "logging",
"where": "test_data_reporter_with_patched_values.test_base",
"level": "INFO",
"stack_trace": "None",
"message": "Test running",
"id": str(SESSION_UUID),
},
{
"name": "test_data_reporter_with_patched_values.py::test_base",
"outcome": "pass",
"id": str(SESSION_UUID),
"stop_info": "Testcase finished",
},
{
"id": str(SESSION_UUID),
"name": "test_data_reporter_with_patched_values.py::test_base",
},
{
"id": str(SESSION_UUID),
},
],
True,
),
(
{
"all": {
"tag": "",
"label": "",
"replace": {
"keys": {"status": "state", "sessionId": "id"},
},
},
"pytest_runtest_logreport": {
"tag": "<fluentd-tag>",
"label": "<fluentd-label>",
"replace": {
"values": {"passed": "pass", "failed": "fail"},
},
"add": {"stop_info": "Testcase finished"},
"drop": ["when"],
},
},
[
{
"name": "test_data_reporter_with_patched_values.py::test_base",
"outcome": "pass",
"stage": "testcase",
"id": str(SESSION_UUID),
"stop_info": "Testcase finished",
}
],
False,
),
],
)
def test_data_reporter_with_patched_values(
pytester,
run_mocked_pytest,
session_uuid,
patch_file_content,
expected_result,
extend_logging,
):
runpytest, fluent_sender = run_mocked_pytest
pytester.makefile(".json", patch_file=json.dumps(patch_file_content))
log_content = "Test running"
args = [
f"--session-uuid={session_uuid}",
"--stage-settings=patch_file.json",
]
if extend_logging:
args.append("--extend-logging")
result = runpytest(
*args,
pyfile=f"""
import logging
def test_base():
logger = logging.getLogger()
logger.info("{log_content}")
assert True
""",
)
call_args = fluent_sender.emit_with_time.call_args_list
call_args = [x[0][2] for x in call_args]
> result.assert_outcomes(passed=1)
D:\a\pytest-fluent\pytest-fluent\tests\test_reporting_patching.py:168:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
D:\a\pytest-fluent\pytest-fluent\.tox\test\Lib\site-packages\_pytest\pytester.py:567: in parseoutcomes
return self.parse_summary_nouns(self.outlines)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
cls = <class '_pytest.pytester.RunResult'>, lines = []
@classmethod
def parse_summary_nouns(cls, lines) -> Dict[str, int]:
"""Extract the nouns from a pytest terminal summary line.
It always returns the plural noun for consistency::
======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ====
Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``.
"""
for line in reversed(lines):
if rex_session_duration.search(line):
outcomes = rex_outcome.findall(line)
ret = {noun: int(count) for (count, noun) in outcomes}
break
else:
> raise ValueError("Pytest terminal summary report not found")
E ValueError: Pytest terminal summary report not found
D:\a\pytest-fluent\pytest-fluent\.tox\test\Lib\site-packages\_pytest\pytester.py:585: ValueError
Check failure on line 168 in tests\test_reporting_patching.py
github-actions / Test Report (windows-latest, 3.11)
test_reporting_patching.test_data_reporter_with_patched_values[patch_file_content1-expected_result1-True]
ValueError: Pytest terminal summary report not found
Raw output
pytester = <Pytester WindowsPath('C:/Users/runneradmin/AppData/Local/Temp/pytest-of-unknown/pytest-0/test_data_reporter_with_patched_values1')>
run_mocked_pytest = (<function runpytest.<locals>.runpytest at 0x0000024A1BEAEE80>, <MagicMock name='FluentSender()' id='2517319033552'>)
session_uuid = UUID('4a10c3aa-3b98-4394-a8fb-7122a426af67')
patch_file_content = {'all': {'drop': ['stage', 'state'], 'label': '<fluentd-label>', 'replace': {'keys': {'sessionId': 'id', 'status': 'st...d': {'stop_info': 'Testcase finished'}, 'drop': ['when'], 'replace': {'values': {'failed': 'fail', 'passed': 'pass'}}}}
expected_result = [{'id': '4a10c3aa-3b98-4394-a8fb-7122a426af67'}, {'id': '4a10c3aa-3b98-4394-a8fb-7122a426af67', 'name': 'test_data_rep...af67', 'name': 'test_data_reporter_with_patched_values.py::test_base'}, {'id': '4a10c3aa-3b98-4394-a8fb-7122a426af67'}]
extend_logging = True
@pytest.mark.parametrize(
"patch_file_content,expected_result,extend_logging",
[
(
{"all": {"tag": "<fluentd-tag>", "label": "<fluentd-label>"}},
[
{
"status": "start",
"stage": "session",
"sessionId": str(SESSION_UUID),
},
{
"status": "start",
"stage": "testcase",
"sessionId": str(SESSION_UUID),
"name": "test_data_reporter_with_patched_values.py::test_base",
},
{
"type": "logging",
"where": "test_data_reporter_with_patched_values.test_base",
"level": "INFO",
"stack_trace": "None",
"message": "Test running",
"sessionId": str(SESSION_UUID),
"stage": "testcase",
},
{
"name": "test_data_reporter_with_patched_values.py::test_base",
"outcome": "passed",
"stage": "testcase",
"when": "call",
"sessionId": str(SESSION_UUID),
},
{
"status": "finish",
"stage": "testcase",
"sessionId": str(SESSION_UUID),
"name": "test_data_reporter_with_patched_values.py::test_base",
},
{
"status": "finish",
"stage": "session",
"sessionId": str(SESSION_UUID),
},
],
True,
),
(
{
"all": {
"tag": "<fluentd-tag>",
"label": "<fluentd-label>",
"replace": {
"keys": {"status": "state", "sessionId": "id"},
},
"drop": ["stage", "state"],
},
"pytest_runtest_logreport": {
"replace": {
"values": {"passed": "pass", "failed": "fail"},
},
"add": {"stop_info": "Testcase finished"},
"drop": ["when"],
},
},
[
{
"id": str(SESSION_UUID),
},
{
"id": str(SESSION_UUID),
"name": "test_data_reporter_with_patched_values.py::test_base",
},
{
"type": "logging",
"where": "test_data_reporter_with_patched_values.test_base",
"level": "INFO",
"stack_trace": "None",
"message": "Test running",
"id": str(SESSION_UUID),
},
{
"name": "test_data_reporter_with_patched_values.py::test_base",
"outcome": "pass",
"id": str(SESSION_UUID),
"stop_info": "Testcase finished",
},
{
"id": str(SESSION_UUID),
"name": "test_data_reporter_with_patched_values.py::test_base",
},
{
"id": str(SESSION_UUID),
},
],
True,
),
(
{
"all": {
"tag": "",
"label": "",
"replace": {
"keys": {"status": "state", "sessionId": "id"},
},
},
"pytest_runtest_logreport": {
"tag": "<fluentd-tag>",
"label": "<fluentd-label>",
"replace": {
"values": {"passed": "pass", "failed": "fail"},
},
"add": {"stop_info": "Testcase finished"},
"drop": ["when"],
},
},
[
{
"name": "test_data_reporter_with_patched_values.py::test_base",
"outcome": "pass",
"stage": "testcase",
"id": str(SESSION_UUID),
"stop_info": "Testcase finished",
}
],
False,
),
],
)
def test_data_reporter_with_patched_values(
pytester,
run_mocked_pytest,
session_uuid,
patch_file_content,
expected_result,
extend_logging,
):
runpytest, fluent_sender = run_mocked_pytest
pytester.makefile(".json", patch_file=json.dumps(patch_file_content))
log_content = "Test running"
args = [
f"--session-uuid={session_uuid}",
"--stage-settings=patch_file.json",
]
if extend_logging:
args.append("--extend-logging")
result = runpytest(
*args,
pyfile=f"""
import logging
def test_base():
logger = logging.getLogger()
logger.info("{log_content}")
assert True
""",
)
call_args = fluent_sender.emit_with_time.call_args_list
call_args = [x[0][2] for x in call_args]
> result.assert_outcomes(passed=1)
D:\a\pytest-fluent\pytest-fluent\tests\test_reporting_patching.py:168:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
D:\a\pytest-fluent\pytest-fluent\.tox\test\Lib\site-packages\_pytest\pytester.py:567: in parseoutcomes
return self.parse_summary_nouns(self.outlines)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
cls = <class '_pytest.pytester.RunResult'>, lines = []
@classmethod
def parse_summary_nouns(cls, lines) -> Dict[str, int]:
"""Extract the nouns from a pytest terminal summary line.
It always returns the plural noun for consistency::
======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ====
Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``.
"""
for line in reversed(lines):
if rex_session_duration.search(line):
outcomes = rex_outcome.findall(line)
ret = {noun: int(count) for (count, noun) in outcomes}
break
else:
> raise ValueError("Pytest terminal summary report not found")
E ValueError: Pytest terminal summary report not found
D:\a\pytest-fluent\pytest-fluent\.tox\test\Lib\site-packages\_pytest\pytester.py:585: ValueError
Check failure on line 168 in tests\test_reporting_patching.py
github-actions / Test Report (windows-latest, 3.11)
test_reporting_patching.test_data_reporter_with_patched_values[patch_file_content2-expected_result2-False]
ValueError: Pytest terminal summary report not found
Raw output
pytester = <Pytester WindowsPath('C:/Users/runneradmin/AppData/Local/Temp/pytest-of-unknown/pytest-0/test_data_reporter_with_patched_values2')>
run_mocked_pytest = (<function runpytest.<locals>.runpytest at 0x0000024A1BCD2340>, <MagicMock name='FluentSender()' id='2517320291280'>)
session_uuid = UUID('4a10c3aa-3b98-4394-a8fb-7122a426af67')
patch_file_content = {'all': {'label': '', 'replace': {'keys': {'sessionId': 'id', 'status': 'state'}}, 'tag': ''}, 'pytest_runtest_logrepo...hed'}, 'drop': ['when'], 'label': '<fluentd-label>', 'replace': {'values': {'failed': 'fail', 'passed': 'pass'}}, ...}}
expected_result = [{'id': '4a10c3aa-3b98-4394-a8fb-7122a426af67', 'name': 'test_data_reporter_with_patched_values.py::test_base', 'outcome': 'pass', 'stage': 'testcase', ...}]
extend_logging = False
@pytest.mark.parametrize(
"patch_file_content,expected_result,extend_logging",
[
(
{"all": {"tag": "<fluentd-tag>", "label": "<fluentd-label>"}},
[
{
"status": "start",
"stage": "session",
"sessionId": str(SESSION_UUID),
},
{
"status": "start",
"stage": "testcase",
"sessionId": str(SESSION_UUID),
"name": "test_data_reporter_with_patched_values.py::test_base",
},
{
"type": "logging",
"where": "test_data_reporter_with_patched_values.test_base",
"level": "INFO",
"stack_trace": "None",
"message": "Test running",
"sessionId": str(SESSION_UUID),
"stage": "testcase",
},
{
"name": "test_data_reporter_with_patched_values.py::test_base",
"outcome": "passed",
"stage": "testcase",
"when": "call",
"sessionId": str(SESSION_UUID),
},
{
"status": "finish",
"stage": "testcase",
"sessionId": str(SESSION_UUID),
"name": "test_data_reporter_with_patched_values.py::test_base",
},
{
"status": "finish",
"stage": "session",
"sessionId": str(SESSION_UUID),
},
],
True,
),
(
{
"all": {
"tag": "<fluentd-tag>",
"label": "<fluentd-label>",
"replace": {
"keys": {"status": "state", "sessionId": "id"},
},
"drop": ["stage", "state"],
},
"pytest_runtest_logreport": {
"replace": {
"values": {"passed": "pass", "failed": "fail"},
},
"add": {"stop_info": "Testcase finished"},
"drop": ["when"],
},
},
[
{
"id": str(SESSION_UUID),
},
{
"id": str(SESSION_UUID),
"name": "test_data_reporter_with_patched_values.py::test_base",
},
{
"type": "logging",
"where": "test_data_reporter_with_patched_values.test_base",
"level": "INFO",
"stack_trace": "None",
"message": "Test running",
"id": str(SESSION_UUID),
},
{
"name": "test_data_reporter_with_patched_values.py::test_base",
"outcome": "pass",
"id": str(SESSION_UUID),
"stop_info": "Testcase finished",
},
{
"id": str(SESSION_UUID),
"name": "test_data_reporter_with_patched_values.py::test_base",
},
{
"id": str(SESSION_UUID),
},
],
True,
),
(
{
"all": {
"tag": "",
"label": "",
"replace": {
"keys": {"status": "state", "sessionId": "id"},
},
},
"pytest_runtest_logreport": {
"tag": "<fluentd-tag>",
"label": "<fluentd-label>",
"replace": {
"values": {"passed": "pass", "failed": "fail"},
},
"add": {"stop_info": "Testcase finished"},
"drop": ["when"],
},
},
[
{
"name": "test_data_reporter_with_patched_values.py::test_base",
"outcome": "pass",
"stage": "testcase",
"id": str(SESSION_UUID),
"stop_info": "Testcase finished",
}
],
False,
),
],
)
def test_data_reporter_with_patched_values(
pytester,
run_mocked_pytest,
session_uuid,
patch_file_content,
expected_result,
extend_logging,
):
runpytest, fluent_sender = run_mocked_pytest
pytester.makefile(".json", patch_file=json.dumps(patch_file_content))
log_content = "Test running"
args = [
f"--session-uuid={session_uuid}",
"--stage-settings=patch_file.json",
]
if extend_logging:
args.append("--extend-logging")
result = runpytest(
*args,
pyfile=f"""
import logging
def test_base():
logger = logging.getLogger()
logger.info("{log_content}")
assert True
""",
)
call_args = fluent_sender.emit_with_time.call_args_list
call_args = [x[0][2] for x in call_args]
> result.assert_outcomes(passed=1)
D:\a\pytest-fluent\pytest-fluent\tests\test_reporting_patching.py:168:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
D:\a\pytest-fluent\pytest-fluent\.tox\test\Lib\site-packages\_pytest\pytester.py:567: in parseoutcomes
return self.parse_summary_nouns(self.outlines)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
cls = <class '_pytest.pytester.RunResult'>, lines = []
@classmethod
def parse_summary_nouns(cls, lines) -> Dict[str, int]:
"""Extract the nouns from a pytest terminal summary line.
It always returns the plural noun for consistency::
======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ====
Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``.
"""
for line in reversed(lines):
if rex_session_duration.search(line):
outcomes = rex_outcome.findall(line)
ret = {noun: int(count) for (count, noun) in outcomes}
break
else:
> raise ValueError("Pytest terminal summary report not found")
E ValueError: Pytest terminal summary report not found
D:\a\pytest-fluent\pytest-fluent\.tox\test\Lib\site-packages\_pytest\pytester.py:585: ValueError