From f267db3bb62f993c332da447bb9dd04b28c80e4a Mon Sep 17 00:00:00 2001 From: David Michaels Date: Tue, 29 Oct 2024 12:20:21 -0400 Subject: [PATCH] debugging ga for test_aggregation.py::test_aggregation_facet --- src/encoded/tests/test_access_key.py | 175 -- src/encoded/tests/test_auth0.py | 406 --- src/encoded/tests/test_authentication.py | 103 - src/encoded/tests/test_batch_download.py | 43 - .../tests/test_clear_db_es_contents.py | 297 --- src/encoded/tests/test_create_mapping.py | 213 -- src/encoded/tests/test_dependencies.py | 109 - src/encoded/tests/test_download.py | 233 -- src/encoded/tests/test_edw_hash.py | 18 - src/encoded/tests/test_embedding.py | 166 -- src/encoded/tests/test_file.py | 1491 ----------- src/encoded/tests/test_file_drs.py | 76 - src/encoded/tests/test_fixtures.py | 138 - .../tests/test_fourfront_submission.py | 14 - src/encoded/tests/test_generate_ontology.py | 1375 ---------- src/encoded/tests/test_graph.py | 18 - src/encoded/tests/test_higlass.py | 2275 ----------------- src/encoded/tests/test_indexing.py | 488 ---- src/encoded/tests/test_init.py | 6 - src/encoded/tests/test_inserts.py | 76 - src/encoded/tests/test_key.py | 58 - src/encoded/tests/test_link.py | 85 - src/encoded/tests/test_load_access_key.py | 19 - src/encoded/tests/test_loadxl.py | 190 -- src/encoded/tests/test_misc.py | 13 - src/encoded/tests/test_owltools.py | 180 -- src/encoded/tests/test_permissions.py | 1264 --------- src/encoded/tests/test_post_put_patch.py | 402 --- src/encoded/tests/test_purge_item_type.py | 70 - src/encoded/tests/test_root.py | 31 - src/encoded/tests/test_schema_formats.py | 115 - src/encoded/tests/test_schemas.py | 217 -- src/encoded/tests/test_search.py | 1101 -------- src/encoded/tests/test_server_defaults.py | 46 - src/encoded/tests/test_static_page.py | 148 -- src/encoded/tests/test_types_access_key.py | 181 -- src/encoded/tests/test_types_antibody.py | 41 - src/encoded/tests/test_types_award.py | 53 - src/encoded/tests/test_types_badge.py | 62 - src/encoded/tests/test_types_bio_feature.py | 191 -- src/encoded/tests/test_types_biosample.py | 335 --- src/encoded/tests/test_types_biosource.py | 331 --- src/encoded/tests/test_types_experiment.py | 984 ------- src/encoded/tests/test_types_gene.py | 203 -- src/encoded/tests/test_types_imaging.py | 78 - src/encoded/tests/test_types_individual.py | 100 - .../tests/test_types_init_collections.py | 255 -- .../test_types_microscope_configuration.py | 33 - src/encoded/tests/test_types_modification.py | 41 - src/encoded/tests/test_types_ontology_term.py | 68 - src/encoded/tests/test_types_protocol.py | 66 - src/encoded/tests/test_types_publication.py | 157 -- .../tests/test_types_quality_metric.py | 36 - src/encoded/tests/test_types_tracking_item.py | 39 - src/encoded/tests/test_types_treatment.py | 90 - src/encoded/tests/test_types_user.py | 63 - src/encoded/tests/test_types_workflow.py | 96 - src/encoded/tests/test_upgrade_antibody.py | 25 - src/encoded/tests/test_upgrade_biosample.py | 21 - .../test_upgrade_biosample_cell_culture.py | 21 - src/encoded/tests/test_upgrade_biosource.py | 45 - .../tests/test_upgrade_data_release_update.py | 90 - src/encoded/tests/test_upgrade_experiment.py | 259 -- .../tests/test_upgrade_experiment_set.py | 42 - src/encoded/tests/test_upgrade_file.py | 141 - .../tests/test_upgrade_imaging_path.py | 38 - .../tests/test_upgrade_modification.py | 25 - .../tests/test_upgrade_ontology_term.py | 22 - src/encoded/tests/test_upgrade_publication.py | 24 - src/encoded/tests/test_upgrade_target.py | 22 - .../tests/test_upgrade_tracking_item.py | 37 - src/encoded/tests/test_upgrade_treatment.py | 24 - src/encoded/tests/test_upgrade_workflow.py | 670 ----- .../tests/test_upgrade_workflow_run.py | 110 - src/encoded/tests/test_util.py | 68 - src/encoded/tests/test_validation_errors.py | 34 - src/encoded/tests/test_views.py | 305 --- 77 files changed, 17185 deletions(-) delete mode 100644 src/encoded/tests/test_access_key.py delete mode 100644 src/encoded/tests/test_auth0.py delete mode 100644 src/encoded/tests/test_authentication.py delete mode 100644 src/encoded/tests/test_batch_download.py delete mode 100644 src/encoded/tests/test_clear_db_es_contents.py delete mode 100644 src/encoded/tests/test_create_mapping.py delete mode 100644 src/encoded/tests/test_dependencies.py delete mode 100644 src/encoded/tests/test_download.py delete mode 100644 src/encoded/tests/test_edw_hash.py delete mode 100644 src/encoded/tests/test_embedding.py delete mode 100644 src/encoded/tests/test_file.py delete mode 100644 src/encoded/tests/test_file_drs.py delete mode 100644 src/encoded/tests/test_fixtures.py delete mode 100644 src/encoded/tests/test_fourfront_submission.py delete mode 100644 src/encoded/tests/test_generate_ontology.py delete mode 100644 src/encoded/tests/test_graph.py delete mode 100644 src/encoded/tests/test_higlass.py delete mode 100644 src/encoded/tests/test_indexing.py delete mode 100644 src/encoded/tests/test_init.py delete mode 100644 src/encoded/tests/test_inserts.py delete mode 100644 src/encoded/tests/test_key.py delete mode 100644 src/encoded/tests/test_link.py delete mode 100644 src/encoded/tests/test_load_access_key.py delete mode 100644 src/encoded/tests/test_loadxl.py delete mode 100644 src/encoded/tests/test_misc.py delete mode 100644 src/encoded/tests/test_owltools.py delete mode 100644 src/encoded/tests/test_permissions.py delete mode 100644 src/encoded/tests/test_post_put_patch.py delete mode 100644 src/encoded/tests/test_purge_item_type.py delete mode 100644 src/encoded/tests/test_root.py delete mode 100644 src/encoded/tests/test_schema_formats.py delete mode 100644 src/encoded/tests/test_schemas.py delete mode 100644 src/encoded/tests/test_search.py delete mode 100644 src/encoded/tests/test_server_defaults.py delete mode 100644 src/encoded/tests/test_static_page.py delete mode 100644 src/encoded/tests/test_types_access_key.py delete mode 100644 src/encoded/tests/test_types_antibody.py delete mode 100644 src/encoded/tests/test_types_award.py delete mode 100644 src/encoded/tests/test_types_badge.py delete mode 100644 src/encoded/tests/test_types_bio_feature.py delete mode 100644 src/encoded/tests/test_types_biosample.py delete mode 100644 src/encoded/tests/test_types_biosource.py delete mode 100644 src/encoded/tests/test_types_experiment.py delete mode 100644 src/encoded/tests/test_types_gene.py delete mode 100644 src/encoded/tests/test_types_imaging.py delete mode 100644 src/encoded/tests/test_types_individual.py delete mode 100644 src/encoded/tests/test_types_init_collections.py delete mode 100644 src/encoded/tests/test_types_microscope_configuration.py delete mode 100644 src/encoded/tests/test_types_modification.py delete mode 100644 src/encoded/tests/test_types_ontology_term.py delete mode 100644 src/encoded/tests/test_types_protocol.py delete mode 100644 src/encoded/tests/test_types_publication.py delete mode 100644 src/encoded/tests/test_types_quality_metric.py delete mode 100644 src/encoded/tests/test_types_tracking_item.py delete mode 100644 src/encoded/tests/test_types_treatment.py delete mode 100644 src/encoded/tests/test_types_user.py delete mode 100644 src/encoded/tests/test_types_workflow.py delete mode 100644 src/encoded/tests/test_upgrade_antibody.py delete mode 100644 src/encoded/tests/test_upgrade_biosample.py delete mode 100644 src/encoded/tests/test_upgrade_biosample_cell_culture.py delete mode 100644 src/encoded/tests/test_upgrade_biosource.py delete mode 100644 src/encoded/tests/test_upgrade_data_release_update.py delete mode 100644 src/encoded/tests/test_upgrade_experiment.py delete mode 100644 src/encoded/tests/test_upgrade_experiment_set.py delete mode 100644 src/encoded/tests/test_upgrade_file.py delete mode 100644 src/encoded/tests/test_upgrade_imaging_path.py delete mode 100644 src/encoded/tests/test_upgrade_modification.py delete mode 100644 src/encoded/tests/test_upgrade_ontology_term.py delete mode 100644 src/encoded/tests/test_upgrade_publication.py delete mode 100644 src/encoded/tests/test_upgrade_target.py delete mode 100644 src/encoded/tests/test_upgrade_tracking_item.py delete mode 100644 src/encoded/tests/test_upgrade_treatment.py delete mode 100644 src/encoded/tests/test_upgrade_workflow.py delete mode 100644 src/encoded/tests/test_upgrade_workflow_run.py delete mode 100644 src/encoded/tests/test_util.py delete mode 100644 src/encoded/tests/test_validation_errors.py delete mode 100644 src/encoded/tests/test_views.py diff --git a/src/encoded/tests/test_access_key.py b/src/encoded/tests/test_access_key.py deleted file mode 100644 index 503291a0b0..0000000000 --- a/src/encoded/tests/test_access_key.py +++ /dev/null @@ -1,175 +0,0 @@ -import pytest - -from base64 import b64encode -from pyramid.compat import ascii_native_ -from snovault import COLLECTIONS -from ..edw_hash import EDWHash - - -pytestmark = [pytest.mark.working, pytest.mark.setone] - - -def basic_auth(username, password): - return 'Basic ' + ascii_native_(b64encode(('%s:%s' % (username, password)).encode('utf-8'))) - - -def auth_header(access_key): - return basic_auth(access_key['access_key_id'], access_key['secret_access_key']) - - -@pytest.fixture -def no_login_submitter(testapp, lab, award): - item = { - 'first_name': 'ENCODE', - 'last_name': 'Submitter', - 'email': 'no_login_submitter@example.org', - 'submits_for': [lab['@id']], - 'status': 'revoked', - } - # User @@object view has keys omitted. - res = testapp.post_json('/user', item) - return testapp.get(res.location).json - - -@pytest.fixture -def no_login_access_key(testapp, no_login_submitter): - description = 'My programmatic key' - item = { - 'user': no_login_submitter['@id'], - 'description': description, - } - res = testapp.post_json('/access_key', item) - result = res.json['@graph'][0].copy() - result['secret_access_key'] = res.json['secret_access_key'] - return result - - -def test_access_key_get(anontestapp, access_key): - headers = {'Authorization': auth_header(access_key)} - anontestapp.get('/', headers=headers) - - -def test_access_key_get_bad_username(anontestapp, access_key): - headers = {'Authorization': basic_auth('not_an_access_key', 'bad_password')} - anontestapp.get('/', headers=headers, status=401) - - -def test_access_key_get_bad_password(anontestapp, access_key): - headers = {'Authorization': basic_auth(access_key['access_key_id'], 'bad_password')} - anontestapp.get('/', headers=headers, status=401) - - -def test_access_key_principals(anontestapp, execute_counter, access_key, submitter, lab): - headers = {'Authorization': auth_header(access_key)} - with execute_counter.expect(2): - res = anontestapp.get('/@@testing-user', headers=headers) - - assert res.json['authenticated_userid'] == 'accesskey.' + access_key['access_key_id'] - - assert sorted(res.json['effective_principals']) == [ - 'accesskey.%s' % access_key['access_key_id'], - 'group.submitter', - 'lab.%s' % lab['uuid'], - 'submits_for.%s' % lab['uuid'], - 'system.Authenticated', - 'system.Everyone', - 'userid.%s' % submitter['uuid'], - 'viewing_group.4DN', - ] - - -# this user has the 4DN viewing group -@pytest.fixture -def viewing_group_member(testapp, award): - item = { - 'first_name': 'Viewing', - 'last_name': 'Group', - 'email': 'viewing_group_member@example.org', - 'viewing_groups': [award['viewing_group']], - 'status': 'current' - } - # User @@object view has keys omitted. - res = testapp.post_json('/user', item) - return testapp.get(res.location).json - - -def test_access_key_self_create_no_submits_for(anontestapp, access_key, viewing_group_member): - submitter = viewing_group_member - extra_environ = {'REMOTE_USER': str(submitter['email'])} - res = anontestapp.post_json( - '/access_key/', {}, extra_environ=extra_environ - ) - access_key_id = res.json['access_key_id'] - headers = { - 'Authorization': basic_auth(access_key_id, res.json['secret_access_key']), - } - res = anontestapp.get('/@@testing-user', headers=headers) - assert res.json['authenticated_userid'] == 'accesskey.' + access_key_id - - -def test_access_key_self_create(anontestapp, access_key, submitter): - extra_environ = {'REMOTE_USER': str(submitter['email'])} - res = anontestapp.post_json( - '/access_key/', {}, extra_environ=extra_environ - ) - access_key_id = res.json['access_key_id'] - headers = { - 'Authorization': basic_auth(access_key_id, res.json['secret_access_key']), - } - res = anontestapp.get('/@@testing-user', headers=headers) - assert res.json['authenticated_userid'] == 'accesskey.' + access_key_id - - -def test_access_key_submitter_cannot_create_for_someone_else(anontestapp, submitter): - extra_environ = {'REMOTE_USER': str(submitter['email'])} - anontestapp.post_json( - '/access_key/', {'user': 'BOGUS'}, extra_environ=extra_environ, status=422) - - -def test_access_key_reset(anontestapp, access_key, submitter): - headers = {'Authorization': auth_header(access_key)} - extra_environ = {'REMOTE_USER': str(submitter['email'])} # Must be native string for Python 2.7 - res = anontestapp.post_json( - access_key['@id'] + '@@reset-secret', {}, extra_environ=extra_environ) - new_headers = { - 'Authorization': basic_auth(access_key['access_key_id'], res.json['secret_access_key']), - } - anontestapp.get('/@@testing-user', headers=headers, status=401) - res = anontestapp.get('/@@testing-user', headers=new_headers) - assert res.json['authenticated_userid'] == 'accesskey.' + access_key['access_key_id'] - - -def test_access_key_delete_disable_login(anontestapp, testapp, access_key): - testapp.patch_json(access_key['@id'], {'status': 'deleted'}) - headers = {'Authorization': auth_header(access_key)} - anontestapp.get('/@@testing-user', headers=headers, status=401) - - -def test_access_key_user_disable_login(anontestapp, no_login_access_key): - access_key = no_login_access_key - headers = {'Authorization': auth_header(access_key)} - anontestapp.get('/@@testing-user', headers=headers, status=401) - - -def test_access_key_edit(anontestapp, access_key): - headers = {'Authorization': auth_header(access_key)} - NEW_DESCRIPTION = 'new description' - properties = {'description': NEW_DESCRIPTION} - anontestapp.put_json(access_key['@id'], properties, headers=headers) - - res = anontestapp.get(access_key['@id'], properties, headers=headers) - assert res.json['description'] == NEW_DESCRIPTION - - -@pytest.mark.parametrize('frame', ['', 'raw', 'object', 'embedded', 'page']) -def test_access_key_view_hides_secret_access_key_hash(testapp, access_key, frame): - query = '?frame=' + frame if frame else '' - res = testapp.get(access_key['@id'] + query) - assert 'secret_access_key_hash' not in res.json - - -def test_access_key_uses_edw_hash(app, access_key): - root = app.registry[COLLECTIONS] - obj = root.by_item_type['access_key'][access_key['access_key_id']] - pwhash = obj.properties['secret_access_key_hash'] - assert EDWHash.hash(access_key['secret_access_key']) == pwhash diff --git a/src/encoded/tests/test_auth0.py b/src/encoded/tests/test_auth0.py deleted file mode 100644 index e3a6568d07..0000000000 --- a/src/encoded/tests/test_auth0.py +++ /dev/null @@ -1,406 +0,0 @@ -import contextlib -import datetime -import jwt -import os -import pytest -import requests -import time - -from dcicutils.misc_utils import Retry -from dcicutils.qa_utils import override_dict -from http import cookies -from pyramid.testing import DummyRequest -from snovault.authentication import get_jwt -from ..edw_hash import EDWHash -from ..util import get_trusted_email - - -pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.indexing] - - -@pytest.fixture(scope='session') -def auth0_access_token(): - creds = { - 'connection': 'Username-Password-Authentication', - 'scope': 'openid email', - 'client_id': 'DPxEwsZRnKDpk0VfVAxrStRKukN14ILB', - 'grant_type': 'password', - 'username': '4dndcic@gmail.com', - 'password': 'Testing123', - } - url = 'https://hms-dbmi.auth0.com/oauth/ro' - try: - res = requests.post(url, json=creds) - res.raise_for_status() - except Exception as e: - raise AssertionError("Error retrieving auth0 test user access token: %r" % e) - - data = res.json() - if 'id_token' not in data: - raise AssertionError("Missing 'id_token' in auth0 test user access token: %r" % data) - - return data['id_token'] - - -@pytest.fixture(scope='session') -def auth0_access_token_no_email(): - creds = { - 'connection': 'Username-Password-Authentication', - 'scope': 'openid', - 'client_id': 'DPxEwsZRnKDpk0VfVAxrStRKukN14ILB', - 'grant_type': 'password', - 'username': 'test1@test.com', - 'password': 'Testing123', - } - url = 'https://hms-dbmi.auth0.com/oauth/ro' - try: - res = requests.post(url, json=creds) - res.raise_for_status() - except Exception as e: - raise AssertionError("Error retrieving auth0 test user access token: %r" % e) - - data = res.json() - if 'id_token' not in data: - raise AssertionError("Missing 'id_token' in auth0 test user access token: %r" % data) - - return data['id_token'] - - -@pytest.fixture() -def auth0_4dn_user_token(auth0_access_token): - return {'id_token': auth0_access_token} - - -@pytest.fixture() -def auth0_4dn_user_profile(): - return {'email': '4dndcic@gmail.com'} - - -@Retry.retry_allowed(retries_allowed=20, wait_seconds=0.5) -def _auth0_await_user(testapp, user_uuid): - """ - Wait (for a reasonable time) for a given user's uuid to appear in a /users/ response. - - This function will retry at half-second intervals until the query doesn't fail - or the retry conditions are exceeded. - """ - url = "/users/%s" + user_uuid - response = testapp.get('/users/') - assert response.status_code == 200, "Expected %s to exist." % url - assert any(user['uuid'] == user_uuid for user in response.json['@graph']) - return response - - -@pytest.fixture() -def auth0_existing_4dn_user_profile(testapp, auth0_4dn_user_profile): - - # Create a user with the persona email - url = '/users/' - first_name = 'Auth0' - last_name = 'Test User' - item = { - 'email': auth0_4dn_user_profile['email'], - 'first_name': first_name, - 'last_name': last_name, - } - [user] = testapp.post_json(url, item, status=201).json['@graph'] - assert user['display_title'] == first_name + " " + last_name # Validate that useful processing occurred. - - _auth0_await_user(testapp, user['uuid']) - - return user # Now that it exists - - -@pytest.fixture() -def headers(auth0_access_token): - return { - 'Accept': 'application/json', - 'Content-Type': 'application/json', - 'Authorization': 'Bearer ' + auth0_access_token - } - - -@pytest.fixture() -def fake_request(headers): - - class FakeRequest(object): - """Mocked Request class""" - # TODO: See if could/should use or subclass from DummyRequest - def __init__(self): - self.headers = headers - self.cookies = {} - self.method = "GET" - - return FakeRequest() - - -def test_get_jwt_gets_bearer_auth(fake_request): - jwt = get_jwt(fake_request) - assert jwt == fake_request.headers['Authorization'][7:] - - -SPACE = ' ' - - -def test_get_jwt_gets_bearer_auth_too(): - - fake_jwt = 'abc.def.ghi' - req = DummyRequest(headers={'Authorization': 'bearer' + SPACE + fake_jwt}) - jwt = get_jwt(req) - assert jwt == fake_jwt - - -def test_get_jwt_gets_bearer_auth_ignores_extra_space(): - fake_jwt = 'abc.def.ghi' - req = DummyRequest(headers={'Authorization': 'bearer' + 2*SPACE + fake_jwt + SPACE}) - jwt = get_jwt(req) - assert jwt == fake_jwt - - -def test_get_jwt_gets_jwt_with_spaces(): - fake_jwt = 'abc def ghi' # Spaces in a JWT are not legal - req = DummyRequest(headers={'Authorization': 'bearer' + SPACE + fake_jwt + SPACE}) - jwt = get_jwt(req) - assert jwt == fake_jwt - - -def test_get_jwt_fails_bearer_auth_no_sep(): - fake_jwt = 'abc.def.ghi' - # This makes sure there's a space separator after 'bearer'. - req = DummyRequest(headers={'Authorization': 'bearer.' + fake_jwt}) - jwt = get_jwt(req) - assert jwt is None - - -def test_get_jwt_skips_basic_auth(fake_request): - with override_dict(fake_request.headers, Authorization='Basic test_token'): - jwt = get_jwt(fake_request) - assert jwt is None - - -def test_get_jwt_falls_back_to_cookie(fake_request): - fake_request.cookies['jwtToken'] = 'test_token' - fake_request.headers['Authorization'] = 'Basic test_token' - jwt = get_jwt(fake_request) - assert jwt == 'test_token' - - -def test_get_jwt_falls_back_to_cookie_too(fake_request): - with override_dict(fake_request.cookies, jwtToken='test_token'): - with override_dict(fake_request.headers, Authorization='Basic stuff_base64_encoded'): - jwt = get_jwt(fake_request) - assert jwt == 'test_token' - - -@pytest.mark.parametrize('request_method', ['HEAD', 'GET', 'POST', 'PATCH']) -def test_get_jwt_falls_back_to_cookie_for_any_method(fake_request, request_method): - req = DummyRequest(headers={'Authorization': 'Basic not_the_droids_you_are_looking_for'}, - cookies={'jwtToken': 'r2d2_and_c3po'}) - req.method = request_method - jwt = get_jwt(req) - assert jwt == 'r2d2_and_c3po' - - -def test_auth_token_unknown_user(anontestapp, auth0_4dn_user_token): - # Should succeed regardless of token - endpoint just saves cookie. - # (We give less feedback from this endpoint than we could to help avoid brute-force attacks) - anontestapp.post_json('/login', auth0_4dn_user_token, status=200) - - -def test_auth_token_no_email(anontestapp, auth0_access_token_no_email, headers): - headers1 = headers.copy() - headers1['Authorization'] = 'Bearer ' + auth0_access_token_no_email - # Log in without headers - anontestapp.get('/session-properties', headers=headers1, status=401) - - -def test_invalid_auth_token(anontestapp, headers): - headers1 = headers.copy() - headers1['Authorization'] = 'Bearer invalid token' - # Log in without headers - anontestapp.get('/session-properties', headers=headers1, status=401) - - -# TODO (C4-173): This is intentionally disabled for now. It requires additional security that we need to reconsider. -# -kmp 2-Jun-2020 -# -# def test_login_logout(testapp, anontestapp, headers, -# auth0_existing_4dn_user_profile, -# auth0_4dn_user_token): -# -# # Log in -# res = anontestapp.post_json('/login', headers=headers) -# -# assert res.json.get('auth.userid') is None -# assert 'id_token' in res.json -# assert 'user_actions' in res.json -# -# # Log out -# res = anontestapp.get('/logout?redirect=false', status=200) -# # no more cookies -# assert 'auth.userid' not in res.json -# assert 'id_token' not in res.json -# assert 'user_actions' not in res.json - - -@pytest.mark.skip # XXX: needs refactor -def test_404_keeps_auth_info(testapp, anontestapp, headers, - auth0_existing_4dn_user_profile, - auth0_4dn_user_token): - - page_view_request_headers = headers.copy() - # X-User-Info header is only set for text/html -formatted Responses. - page_view_request_headers.update({ - "Accept": "text/html", - "Content-Type": "text/html", - "Cookie": "jwtToken=" + headers['Authorization'][7:] - }) - # Log in - res = anontestapp.get('/not_found_url', headers=page_view_request_headers, status=404) - - assert str(res.status_int) == "404" - try: - assert res.headers.get('X-Request-JWT', None) is not None - assert res.headers.get('X-User-Info', None) is not None - except Exception as e: - if os.environ.get('TRAVIS', False): - print("This does not work on travis due to Auth0 access issues") - else: - raise e - - -# TODO (C4-173): This is intentionally disabled for now. It requires additional security that we need to reconsider. -# -kmp 2-Jun-2020 -# -# def test_login_logout_redirect(testapp, anontestapp, headers, -# auth0_existing_4dn_user_profile, -# auth0_4dn_user_token): -# -# # Log in -# res = anontestapp.post_json('/login', headers=headers) -# -# assert res.json.get('auth.userid') is None -# assert 'id_token' in res.json -# assert 'user_actions' in res.json -# -# # Log out -# res = anontestapp.get('/logout?redirect=True', status=302) - - -def test_jwt_is_stateless_so_doesnt_actually_need_login(testapp, anontestapp, auth0_4dn_user_token, - auth0_existing_4dn_user_profile, headers): - - # Just FYI: This test was failing for me (dmichaels/2023-06-14) and it turned out to be because - # I had, for some reason, my Auth0Secret environment variable set (to whatever); unsetting this - # fixed the problem; not sure how it was causing a failure; something someone else may run into. - - res2 = anontestapp.get('/users/', headers=headers, status=200) - assert '@id' in res2.json['@graph'][0] - - -def test_jwt_works_without_keys(testapp, anontestapp, auth0_4dn_user_token, - auth0_existing_4dn_user_profile, headers): - - # clear out keys - old_key = anontestapp.app.registry.settings['auth0.secret'] - anontestapp.app.registry.settings['auth0.secret'] = None - res2 = anontestapp.get('/users/', headers=headers, status=200) - - anontestapp.app.registry.settings['auth0.secret'] = old_key - assert '@id' in res2.json['@graph'][0] - - -def test_impersonate_invalid_user(anontestapp, admin): - - anontestapp.post_json('/impersonate-user', - {'userid': 'not@here.usr'}, - extra_environ={'REMOTE_USER': str(admin['email'])}, - status=422) - - -@pytest.mark.broken -@pytest.mark.skip -def test_impersonate_user_obsolete(anontestapp, admin, submitter): - if not os.environ.get('Auth0Secret'): - pytest.skip("need the keys to impersonate user, which aren't here") - - res = anontestapp.post_json('/impersonate-user', {'userid': submitter['email']}, - extra_environ={'REMOTE_USER': str(admin['email'])}) - - # we should get back a new token - assert 'user_actions' in res.json - assert 'id_token' in res.json - - # and we should be able to use that token as the new user - headers = { - 'Accept': 'applicatin/json', - 'Content-Type': 'application/json', - 'Authorization': 'Bearer ' + res.json['id_token'] - } - res2 = anontestapp.get('/users/', headers=headers) - assert '@id' in res2.json['@graph'][0] - - -@pytest.mark.broken -@pytest.mark.skip -def test_impersonate_user(anontestapp, admin, submitter): - if not os.environ.get('Auth0Secret'): - pytest.skip("need the keys to impersonate user, which aren't here") - - res = anontestapp.post_json('/impersonate-user', - {'userid': submitter['email']}, - extra_environ={'REMOTE_USER': str(admin['email'])}, - status=200) - - # we should get back a new token - assert 'user_actions' in res.json - try: - # This try tries to assure that if an error occurs, we don't leave a bunch of JWT in the open - # on an error message for someone to pick up and use. The try will catch the error and issue - # a different, cleaner error if we don't get the data we expect to get. -kmp 9-Mar-2021 - c = cookies.SimpleCookie() - c.load(res.headers['Set-Cookie']) - returned_jwt_hashed = EDWHash.hash(c['jwtToken'].value) - jwt_headers = jwt.get_unverified_header(c['jwtToken'].value) - email_from_jwt = jwt.decode(c['jwtToken'].value, verify=False)['email'] - c = None - except Exception: - raise AssertionError("jwtToken cookie not found in first return value.") - - assert jwt_headers['typ'] == 'JWT' - assert jwt_headers['alg'] == 'HS256' - assert email_from_jwt == submitter['email'] - - # We used to get back an id_token as part of the result JSON and we had to pass that token as part of the - # Authorization on the next request ('Authorization': 'Bearer ' + res.json['id_token']) - # but now it comes back as a protected cookie that is passed through on subsequent requests mostly - # invisibly. We test that here, but try to do so carefully. -kmp 9-Mar-2021 - headers = { - 'Accept': 'application/json', - 'Content-Type': 'application/json' - } - res2 = anontestapp.get('/users/', headers=headers, status=200) - users2 = res2.json['@graph'] - users2_0 = users2[0] - assert '@id' in users2_0 - try: - sent_jwt_hashed = EDWHash.hash(res2.request.cookies['jwtToken']) - except Exception: - raise AssertionError("jwtToken cookie not found in second request.") - assert sent_jwt_hashed == returned_jwt_hashed, "The jwtToken returned from first request wasn't sent on the second." - - res3 = anontestapp.get('/me', status=307) - me = res3.json - assert submitter['title'] == "ENCODE Submitter" - assert me['title'] == "ENCODE Submitter" - - -def test_impersonate_user_simple(anontestapp, admin, submitter): - if not os.environ.get('Auth0Secret'): - pytest.skip("need the keys to impersonate user, which aren't here") - - anontestapp.post_json('/impersonate-user', {'userid': submitter['email']}, - extra_environ={'REMOTE_USER': str(admin['email'])}, - status=200) - - assert anontestapp.get('/me', status=307).json['title'] == submitter['title'] diff --git a/src/encoded/tests/test_authentication.py b/src/encoded/tests/test_authentication.py deleted file mode 100644 index d7bedcf10c..0000000000 --- a/src/encoded/tests/test_authentication.py +++ /dev/null @@ -1,103 +0,0 @@ -import pytest -import unittest - -from pyramid.interfaces import IAuthenticationPolicy -from pyramid.security import Authenticated, Everyone -from pyramid.testing import DummyRequest -from zope.interface.verify import verifyClass, verifyObject -from snovault.authentication import NamespacedAuthenticationPolicy - - -pytestmark = [pytest.mark.setone, pytest.mark.working] - - -class TestNamespacedAuthenticationPolicy(unittest.TestCase): - """ This is a modified version of TestRemoteUserAuthenticationPolicy - """ - def _getTargetClass(self): - return NamespacedAuthenticationPolicy - - def _makeOne(self, namespace='user', - base='pyramid.authentication.RemoteUserAuthenticationPolicy', - *args, **kw): - return self._getTargetClass()(namespace, base, *args, **kw) - - def test_class_implements_IAuthenticationPolicy(self): - klass = self._makeOne().__class__ - verifyClass(IAuthenticationPolicy, klass) - - def test_instance_implements_IAuthenticationPolicy(self): - verifyObject(IAuthenticationPolicy, self._makeOne()) - - def test_unauthenticated_userid_returns_None(self): - request = DummyRequest(environ={}) - policy = self._makeOne() - self.assertEqual(policy.unauthenticated_userid(request), None) - - def test_unauthenticated_userid(self): - request = DummyRequest(environ={'REMOTE_USER':'fred'}) - policy = self._makeOne() - self.assertEqual(policy.unauthenticated_userid(request), 'user.fred') - - def test_authenticated_userid_None(self): - request = DummyRequest(environ={}) - policy = self._makeOne() - self.assertEqual(policy.authenticated_userid(request), None) - - def test_authenticated_userid(self): - request = DummyRequest(environ={'REMOTE_USER':'fred'}) - policy = self._makeOne() - self.assertEqual(policy.authenticated_userid(request), 'user.fred') - - def test_effective_principals_None(self): - request = DummyRequest(environ={}) - policy = self._makeOne() - self.assertEqual(policy.effective_principals(request), [Everyone]) - - def test_effective_principals(self): - request = DummyRequest(environ={'REMOTE_USER':'fred'}) - policy = self._makeOne() - self.assertEqual(policy.effective_principals(request), - [Everyone, Authenticated, 'user.fred']) - - def test_remember(self): - request = DummyRequest(environ={'REMOTE_USER':'fred'}) - policy = self._makeOne() - result = policy.remember(request, 'fred') - self.assertEqual(result, []) - - def test_forget(self): - request = DummyRequest(environ={'REMOTE_USER':'fred'}) - policy = self._makeOne() - result = policy.forget(request) - self.assertEqual(result, []) - - # From TestSessionAuthenticationPolicy - - def test_session_remember(self): - request = DummyRequest() - policy = self._makeOne( - base='pyramid.authentication.SessionAuthenticationPolicy', - prefix='') - result = policy.remember(request, 'user.fred') - self.assertEqual(request.session.get('userid'), 'fred') - self.assertEqual(result, []) - self.assertEqual(policy.unauthenticated_userid(request), 'user.fred') - - def test_session_forget(self): - request = DummyRequest(session={'userid':'fred'}) - policy = self._makeOne( - base='pyramid.authentication.SessionAuthenticationPolicy', - prefix='') - result = policy.forget(request) - self.assertEqual(request.session.get('userid'), None) - self.assertEqual(result, []) - - def test_session_forget_no_identity(self): - request = DummyRequest() - policy = self._makeOne( - base='pyramid.authentication.SessionAuthenticationPolicy', - prefix='') - result = policy.forget(request) - self.assertEqual(request.session.get('userid'), None) - self.assertEqual(result, []) diff --git a/src/encoded/tests/test_batch_download.py b/src/encoded/tests/test_batch_download.py deleted file mode 100644 index 659bf66a4e..0000000000 --- a/src/encoded/tests/test_batch_download.py +++ /dev/null @@ -1,43 +0,0 @@ -import pytest - -from dcicutils.qa_utils import notice_pytest_fixtures -from ..util import delay_rerun -# Use workbook fixture from BDD tests (including elasticsearch) -#from .workbook_fixtures import es_app_settings, es_app, es_testapp, workbook - - -# NOTE WELL: app-settings and app are not used here explicitly but are probably still needed. -# See longer explanation at top of test_aggregation.py -kmp 28-Jun-2020 -# notice_pytest_fixtures(es_app_settings, es_app, es_testapp, workbook) - -pytestmark = [# pytest.mark.indexing, - pytest.mark.workbook, - pytest.mark.flaky(rerun_filter=delay_rerun)] - - -@pytest.mark.skip(reason="update data when we have a working experiment") -def test_report_download(es_testapp, workbook): - notice_pytest_fixtures(es_testapp, workbook) - - res = es_testapp.get('/report.tsv?type=Experiment&sort=accession') - assert res.headers['content-type'] == 'text/tsv; charset=UTF-8' - disposition = res.headers['content-disposition'] - assert disposition == 'attachment;filename="report.tsv"' - lines = res.body.splitlines() - assert lines[0].split(b'\t') == [ - b'ID', b'Accession', b'Assay Type', b'Assay Nickname', b'Target', - b'Biosample', b'Description', b'Lab', b'Project', b'Status', - b'Linked Antibody', b'Species', b'Life stage', b'Age', b'Age Units', - b'Treatment', b'Term ID', b'Concentration', b'Concentration units', - b'Duration', b'Duration units', b'Synchronization', - b'Post-synchronization time', b'Post-synchronization time units', - b'Replicates', b'Files', b'Dbxrefs' - ] - assert lines[1].split(b'\t') == [ - b'/experiments/ENCSR000AAL/', b'ENCSR000AAL', b'RNA-seq', b'RNA-seq', - b'', b'K562', b'RNA Evaluation K562 Small Total RNA-seq from Gingeras', - b'Thomas Gingeras, CSHL', b'ENCODE', b'released', b'', - b'', b'', b'', b'', b'', b'', b'', b'', b'', - b'', b'', b'', b'', b'', b'', b'' - ] - assert len(lines) == 44 diff --git a/src/encoded/tests/test_clear_db_es_contents.py b/src/encoded/tests/test_clear_db_es_contents.py deleted file mode 100644 index 4f6f3b9443..0000000000 --- a/src/encoded/tests/test_clear_db_es_contents.py +++ /dev/null @@ -1,297 +0,0 @@ -import contextlib -import pytest - -from dcicutils.env_utils import EnvUtils -from dcicutils.lang_utils import disjoined_list -from dcicutils.qa_utils import logged_messages, input_mocked -from unittest import mock -from ..commands import clear_db_es_contents as clear_db_es_contents_module -from ..commands.clear_db_es_contents import ( - clear_db_tables, - run_clear_db_es, - main as clear_db_es_contents_main -) - - -pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.indexing] - - -def test_clear_db_tables(app, testapp): - # post an item and make sure it's there - post_res = testapp.post_json('/testing-post-put-patch/', {'required': 'abc'}, - status=201) - testapp.get(post_res.location, status=200) - clear_res = clear_db_tables(app) - assert clear_res is True - # item should no longer be present - testapp.get(post_res.location, status=404) - - -_FOURFRONT_PRODUCTION_ENVS = ['fourfront-production-blue', 'fourfront-production-green', 'data', 'staging'] -# Really we only care about the first of these names, but the rest are names that were at one time -# planned to be stg or prd names for cgap, so we'll use them to tell that run_clear_db_es is properly -# skipping any such names. -kmp 4-Jun-2022 -_CGAP_PRODUCTION_ENVS = ['fourfront-cgap', 'fourfront-cgap-green', 'cgap-green', 'fourfront-cgap-blue', 'cgap-blue'] - -_PRODUCTION_ENVS = EnvUtils.app_case(if_fourfront=_FOURFRONT_PRODUCTION_ENVS, if_cgap=_CGAP_PRODUCTION_ENVS) - -TEST_ENV = EnvUtils.app_case(if_fourfront='fourfront-mastertest', if_cgap='cgap-devtest') - -OTHER_ENV = EnvUtils.app_case(if_fourfront='fourfront-foo', if_cgap='cgap-foo') - -DECOY_ENV_1 = TEST_ENV + '-decoy-1' -DECOY_ENV_2 = TEST_ENV + '-decoy-2' - - -@contextlib.contextmanager -def local_env_name_registry_setting_for_testing(app, envname): - old_env = app.registry.settings.get('env.name') - print(f"Remembering old env.name = {old_env}") - try: - app.registry.settings['env.name'] = envname - print(f"Set env.name = {envname}") - yield - finally: - if old_env is None: - print(f"Removing env.name") - del app.registry.settings['env.name'] - else: - print(f"Restoring env.name to {old_env}") - app.registry.settings['env.name'] = old_env - - -@pytest.mark.unit -def test_run_clear_db_es_unit(app, testapp): - - with mock.patch.object(clear_db_es_contents_module, "clear_db_tables") as mock_clear_db_tables: - with mock.patch.object(clear_db_es_contents_module, "run_create_mapping") as mock_run_create_mapping: - - def mocked_is_stg_or_prd_env(env): - result = (env in _PRODUCTION_ENVS # really this should be enough - # for pragmatic redundancy since these will match our real production systems, protect them - or env in _CGAP_PRODUCTION_ENVS - or env in _FOURFRONT_PRODUCTION_ENVS - or env.endswith("blue") or env.endswith("green") or env.endswith("cgap")) - print(f"Mocked is_stg_or_prd_env({env}) returning {result}.") - return result - - with mock.patch.object(clear_db_es_contents_module, "is_stg_or_prd_env") as mock_is_stg_or_prd_env: - mock_is_stg_or_prd_env.side_effect = mocked_is_stg_or_prd_env - - expected_db_clears = 0 - expected_es_clears = 0 - - assert mock_clear_db_tables.call_count == expected_db_clears - assert mock_run_create_mapping.call_count == expected_es_clears - - # It works positionally - assert run_clear_db_es(app, None, True) is True - expected_db_clears += 1 - expected_es_clears += 0 - - assert mock_clear_db_tables.call_count == expected_db_clears - assert mock_run_create_mapping.call_count == expected_es_clears - - # It works by keyword argument - assert run_clear_db_es(app, only_envs=None, skip_es=True) is True - expected_db_clears += 1 - expected_es_clears += 0 - - assert mock_clear_db_tables.call_count == expected_db_clears - assert mock_run_create_mapping.call_count == expected_es_clears - - for production_env in _PRODUCTION_ENVS: - with local_env_name_registry_setting_for_testing(app, production_env): - # should never run on production envs env - assert clear_db_es_contents_module.is_stg_or_prd_env(production_env) is True - with logged_messages(module=clear_db_es_contents_module, error=[ - (f'clear_db_es_contents: This action cannot be performed on env {production_env}' - f' because it is a production-class (stg or prd) environment.' - f' Skipping the attempt to clear DB.')]): - assert run_clear_db_es(app, only_envs=None, skip_es=True) is False - expected_db_clears += 0 - expected_es_clears += 0 - assert mock_clear_db_tables.call_count == expected_db_clears - assert mock_run_create_mapping.call_count == expected_es_clears - - with local_env_name_registry_setting_for_testing(app, TEST_ENV): - - allowed_envs = [OTHER_ENV] - - # test if we are only running on specific envs - with logged_messages(module=clear_db_es_contents_module, - error=[(f'clear_db_es_contents: The current environment, {TEST_ENV},' - f' is not {disjoined_list(allowed_envs)}.' - f' Skipping the attempt to clear DB.')]): - assert run_clear_db_es(app, only_envs=allowed_envs, skip_es=True) is False - expected_db_clears += 0 - expected_es_clears += 0 - assert mock_clear_db_tables.call_count == expected_db_clears - assert mock_run_create_mapping.call_count == expected_es_clears - - # test again if we are only running on specific envs - with logged_messages(module=clear_db_es_contents_module, - error=[(f'clear_db_es_contents: The current environment, {TEST_ENV},' - f' is not {disjoined_list(allowed_envs)}.' - f' Skipping the attempt to clear DB.')]): - assert run_clear_db_es(app, only_envs=allowed_envs, skip_es=False) is False - expected_db_clears += 0 - expected_es_clears += 0 - assert mock_clear_db_tables.call_count == expected_db_clears - assert mock_run_create_mapping.call_count == expected_es_clears - - # test if we are only running on specific envs - assert run_clear_db_es(app, only_envs=[TEST_ENV], skip_es=True) is True - expected_db_clears += 1 - expected_es_clears += 0 - assert mock_clear_db_tables.call_count == expected_db_clears - assert mock_run_create_mapping.call_count == expected_es_clears - - # test again if we are only running on specific envs - assert run_clear_db_es(app, only_envs=[TEST_ENV], skip_es=False) is True - expected_db_clears += 1 - expected_es_clears += 1 - assert mock_clear_db_tables.call_count == expected_db_clears - assert mock_run_create_mapping.call_count == expected_es_clears - - allowed_envs = [DECOY_ENV_1, DECOY_ENV_2] - # test if we are only running on specific envs - with logged_messages(module=clear_db_es_contents_module, - error=[(f'clear_db_es_contents: The current environment, {TEST_ENV},' - f' is not {disjoined_list(allowed_envs)}.' - f' Skipping the attempt to clear DB.')]): - assert run_clear_db_es(app, only_envs=allowed_envs, skip_es=False) is False - expected_db_clears += 0 - expected_es_clears += 0 - assert mock_clear_db_tables.call_count == expected_db_clears - assert mock_run_create_mapping.call_count == expected_es_clears - - allowed_envs = [DECOY_ENV_1, TEST_ENV] - # test if we are only running on specific envs - assert run_clear_db_es(app, only_envs=allowed_envs, skip_es=False) is True - expected_db_clears += 1 - expected_es_clears += 1 - assert mock_clear_db_tables.call_count == expected_db_clears - assert mock_run_create_mapping.call_count == expected_es_clears - - -@pytest.mark.unit -def test_clear_db_es_contents_main(): - - # It should never get to these first two in this test, but they're ethere for safety. - with mock.patch.object(clear_db_es_contents_module, "clear_db_tables"): - with mock.patch.object(clear_db_es_contents_module, "run_create_mapping"): - - class FakeApp: - - class Registry: - def __init__(self): - self.settings = {} - - def __init__(self, config_uri, appname): - self.appname = appname - self.config_uri = config_uri - self.registry = self.Registry() - - def __str__(self): - return f"" - - def __repr__(self): - return str(self) - - class MockDBSession: - - def __init__(self, app): - self.app = app - - apps = {} - - def mocked_get_app(config_uri, appname): - key = (config_uri, appname) - app = apps.get(key) - if not app: - apps[key] = app = FakeApp(config_uri, appname) - return app - - def mocked_configure_dbsession(app): - return MockDBSession(app) - - with mock.patch.object(clear_db_es_contents_module, "run_clear_db_es") as mock_run_clear_db_es: - with mock.patch.object(clear_db_es_contents_module, "get_app") as mock_get_app: - mock_get_app.side_effect = mocked_get_app - with mock.patch.object(clear_db_es_contents_module, - "configure_dbsession") as mock_configure_dbsession: - mock_configure_dbsession.side_effect = mocked_configure_dbsession - - config_uri = 'production.ini' - appname = "app" - - - with input_mocked( - # We'll be prompted for the environment name to confirm. - "local", - module=clear_db_es_contents_module): - - clear_db_es_contents_main([config_uri]) - mock_run_clear_db_es.assert_called_with(app=mocked_get_app(config_uri, None), - only_envs=[], - skip_es=False) - - with input_mocked( - # No input prompting will occur because --no-confirm was supplied. - module=clear_db_es_contents_module): - - clear_db_es_contents_main([config_uri, "--no-confirm"]) - mock_run_clear_db_es.assert_called_with(app=mocked_get_app(config_uri, None), - only_envs=[], - skip_es=False) - - with input_mocked( - # We'll be prompted for the environment name to confirm. - "local", - module=clear_db_es_contents_module): - - clear_db_es_contents_main([config_uri, "--app-name", appname]) - mock_run_clear_db_es.assert_called_with(app=mocked_get_app(config_uri, appname), - only_envs=[], - skip_es=False) - - with input_mocked( - # We'll be prompted for the environment name to confirm. - "local", - module=clear_db_es_contents_module): - - clear_db_es_contents_main([config_uri, "--app-name", appname, '--skip-es']) - mock_run_clear_db_es.assert_called_with(app=mocked_get_app(config_uri, appname), - only_envs=[], - skip_es=True) - - with input_mocked( - # No input prompting will occur because --only-if-env was supplied. - module=clear_db_es_contents_module): - - clear_db_es_contents_main([config_uri, "--app-name", appname, "--only-if-env", TEST_ENV]) - mock_run_clear_db_es.assert_called_with(app=mocked_get_app(config_uri, appname), - only_envs=[TEST_ENV], - skip_es=False) - - with input_mocked( - # We'll be prompted for the environment name to confirm. - "local", - module=clear_db_es_contents_module): - - clear_db_es_contents_main([config_uri, "--app-name", appname, "--only-if-env", TEST_ENV, - "--confirm"]) - mock_run_clear_db_es.assert_called_with(app=mocked_get_app(config_uri, appname), - only_envs=[TEST_ENV], - skip_es=False) - - with input_mocked( - # No input prompting will occur because --only-if-env was supplied. - module=clear_db_es_contents_module): - - clear_db_es_contents_main([config_uri, "--app-name", appname, - "--only-if-env", f"{TEST_ENV},{OTHER_ENV}"]) - mock_run_clear_db_es.assert_called_with(app=mocked_get_app(config_uri, appname), - only_envs=[TEST_ENV, OTHER_ENV], - skip_es=False) diff --git a/src/encoded/tests/test_create_mapping.py b/src/encoded/tests/test_create_mapping.py deleted file mode 100644 index 881bd015a2..0000000000 --- a/src/encoded/tests/test_create_mapping.py +++ /dev/null @@ -1,213 +0,0 @@ -import pytest - -from dcicutils.deployment_utils import CreateMappingOnDeployManager -from snovault import COLLECTIONS, TYPES -from snovault.elasticsearch.create_mapping import type_mapping -from snovault.elasticsearch.create_mapping import run as run_create_mapping -from snovault.util import add_default_embeds -from unittest.mock import patch, MagicMock -from .datafixtures import ORDER -from ..commands import create_mapping_on_deploy -from ..commands.create_mapping_on_deploy import ( - ITEM_INDEX_ORDER, - _run_create_mapping # noqa - yeah, it's internal but we want to test it -) -# TODO: We should not be importing *. Even stranger, PyCharm says we don't use anything from there. -kmp 14-Feb-2020 -# Experimentally commenting this out. -kmp 28-Jun-2020 -# from ..types.experiment import * - - -pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.workbook] - -# Using workbook inserts - required for test_run_create_mapping_with_upgrader - - -@pytest.mark.parametrize('item_type', ORDER) -def test_create_mapping(registry, item_type): - """ - This test does not actually use elasticsearch - Only tests the mappings generated from schemas - """ - mapping = type_mapping(registry[TYPES], item_type) - assert mapping - type_info = registry[TYPES].by_item_type[item_type] - schema = type_info.schema - embeds = add_default_embeds(item_type, registry[TYPES], type_info.embedded_list, schema) - # assert that all embeds exist in mapping for the given type - for embed in embeds: - mapping_pointer = mapping - split_embed = embed.split('.') - for idx, split_ in enumerate(split_embed): - # see if this is last level of embedding- may be a field or object - if idx == len(split_embed) - 1: - if 'properties' in mapping_pointer and split_ in mapping_pointer['properties']: - final_mapping = mapping_pointer['properties'] - else: - final_mapping = mapping_pointer - if split_ != '*': - assert split_ in final_mapping - else: - assert 'properties' in final_mapping or final_mapping.get('type') == 'object' - else: - # TODO/dmichaels/2023-05-27: This fails on item_type = 'filter_set' and is fixed by - # removing "institution" and "project" from "attribution" in snovault/schemas/mixins.json. - # Or, it works with the new snovault.schema_utils code to look for $ref schemas in the - # app-specific (i.e. here in fourfront/portal) before looking in snovault. - assert split_ in mapping_pointer['properties'] - mapping_pointer = mapping_pointer['properties'][split_] - - -def test_create_mapping_item_order(registry): - # make sure every item type name is represented in the item ordering - for i_type in registry[COLLECTIONS].by_item_type: - # ignore "testing" types - if i_type.startswith('testing_'): - continue - assert registry[COLLECTIONS][i_type].type_info.name in ITEM_INDEX_ORDER - - -class MockedCommandArgs: - - def __init__(self, wipe_es=None, skip=None, strict=None, clear_queue=None): - self.wipe_es = wipe_es - self.skip = skip - self.strict = strict - self.clear_queue = clear_queue - - -class MockedLog: - - def __init__(self): - self.log = [] - - def info(self, msg): - self.log.append(('info', msg)) - - def error(self, msg): - self.log.append(('error', msg)) - - -# These next are more extensively tested in dcicutils. -# This is just plausibility checking that we've received things OK. - -@patch('dcicutils.deployment_utils.compute_ff_prd_env', MagicMock(return_value='fourfront-production-green')) -@patch('encoded.commands.create_mapping_on_deploy.get_my_env', MagicMock(return_value='fourfront-production-blue')) -def test_get_deployment_config_staging(): - """ Tests get_deployment_config in the new staging case """ - my_env = create_mapping_on_deploy.get_my_env('ignored-for-mock') - assert my_env == 'fourfront-production-blue' - cfg = CreateMappingOnDeployManager.get_deploy_config(env=my_env, args=MockedCommandArgs(), log=MockedLog()) - assert cfg['ENV_NAME'] == my_env # sanity - assert cfg['SKIP'] is False - assert cfg['WIPE_ES'] is True - assert cfg['STRICT'] is True - - -@patch('dcicutils.deployment_utils.compute_ff_prd_env', MagicMock(return_value='fourfront-production-green')) -@patch('encoded.commands.create_mapping_on_deploy.get_my_env', MagicMock(return_value='fourfront-production-green')) -def test_get_deployment_config_prod(): - """ Tests get_deployment_config in the new production case (should always proceed) """ - my_env = create_mapping_on_deploy.get_my_env('ignored-for-mock') - assert my_env == 'fourfront-production-green' - cfg = CreateMappingOnDeployManager.get_deploy_config(env=my_env, args=MockedCommandArgs(), log=MockedLog()) - assert cfg['ENV_NAME'] == my_env # sanity - assert cfg['SKIP'] is False - assert cfg['WIPE_ES'] is False - assert cfg['STRICT'] is False - - -@patch('dcicutils.deployment_utils.compute_ff_prd_env', MagicMock(return_value='fourfront-green')) -@patch('encoded.commands.create_mapping_on_deploy.get_my_env', MagicMock(return_value='fourfront-hotseat')) -def test_get_deployment_config_hotseat(): - """ Tests get_deployment_config in the hotseat case with a new-style ecosystem. """ - my_env = create_mapping_on_deploy.get_my_env('ignored-for-mock') - assert my_env == 'fourfront-hotseat' - cfg = CreateMappingOnDeployManager.get_deploy_config(env=my_env, args=MockedCommandArgs(), log=MockedLog()) - assert cfg['ENV_NAME'] == my_env # sanity - assert cfg['SKIP'] is True # The other values (WIPE_ES and STRICT) don't matter if this is set. - - -@patch('dcicutils.deployment_utils.compute_ff_prd_env', MagicMock(return_value='fourfront-green')) -@patch('encoded.commands.create_mapping_on_deploy.get_my_env', MagicMock(return_value='fourfront-mastertest')) -def test_get_deployment_config_mastertest(): - """ Tests get_deployment_config in the hotseat case with a new-style ecosystem. """ - my_env = create_mapping_on_deploy.get_my_env('ignored-for-mock') - assert my_env == 'fourfront-mastertest' - cfg = CreateMappingOnDeployManager.get_deploy_config(env=my_env, args=MockedCommandArgs(), log=MockedLog()) - assert cfg['ENV_NAME'] == my_env # sanity - assert cfg['SKIP'] is False - assert cfg['WIPE_ES'] is True - assert cfg['STRICT'] is False - - -class Simulation: - - def __init__(self, mocked_app, expect_check_first=False, expect_purge_queue=False, expect_strict=False): - self.run_has_been_called = False - self.mocked_app = mocked_app - self.expect_check_first = expect_check_first - self.expect_purge_queue = expect_purge_queue - self.expect_strict = expect_strict - - def __str__(self): - return ("<{cls} run {called} expecting cf={cf} pq={pq} es={es} {id}>" - .format(cls=self.__class__.__name__, called="CALLED" if self.run_has_been_called else "UNCALLED", - cf=self.expect_check_first, pq=self.expect_purge_queue, es=self.expect_strict, id=id(self))) - - def __repr__(self): - return self.__str__() - - def mocked_run_create_mapping(self, app, check_first=False, strict=False, purge_queue=False, item_order=None, - **kwargs): - self.run_has_been_called = True - assert kwargs == {}, "mocked_run_create_mapping needs adjusting. It doesn't expect these keywords: %s" % kwargs - assert app == self.mocked_app, "Mocked app was not as expected: %s" % app - # check_first is (not WIPE_ES) - assert check_first is self.expect_check_first, "check_first is not False: %s" % check_first - # purge_queue is whether --clear-queue was in command args - assert bool(purge_queue) is self.expect_purge_queue, ( - "bool(purge_queue) is not False. purge_queue=%s" % purge_queue) - # This should be a constant for our purposes - assert item_order == ITEM_INDEX_ORDER, "item_order was not as expected: %s" % item_order - # strict is the STRICT argument - assert strict is self.expect_strict, "strict is not False: %s" % strict - - -@patch("snovault.elasticsearch.indexer_queue.QueueManager.add_uuids") -def test_run_create_mapping_with_upgrader(mock_add_uuids, es_testapp, workbook): - """ - Test for catching items in need of upgrading when running - create_mapping. - - Indexer queue method mocked to check correct calls, so no items - actually indexed/upgraded. - - Moved to this file so fixtures interact cleanly - Will Sept 28 2022 - """ - app = es_testapp.app - type_to_upgrade = "Biosample" - - search_query = "/search/?type=" + type_to_upgrade + "&frame=object" - search = es_testapp.get(search_query, status=200).json["@graph"] - item_type_uuids = sorted([x["uuid"] for x in search]) - - # No schema version change, so nothing needs indexing - run_create_mapping(app, item_order=[type_to_upgrade], check_first=True, purge_queue=True, strict=True) - (_, uuids_to_index), _ = mock_add_uuids.call_args - assert not uuids_to_index - - # Change schema version in registry so all posted items of this type - # "need" to be upgraded - registry_schema = app.registry[TYPES][type_to_upgrade].schema - schema_version_default = registry_schema["properties"]["schema_version"]["default"] - updated_schema_version = str(int(schema_version_default) + 1) - registry_schema["properties"]["schema_version"]["default"] = updated_schema_version - - run_create_mapping(app, item_order=[type_to_upgrade], check_first=True) - (_, uuids_to_index), _ = mock_add_uuids.call_args - assert sorted(uuids_to_index) == item_type_uuids - - # Revert item type schema version - registry_schema["properties"]["schema_version"]["default"] = schema_version_default - - diff --git a/src/encoded/tests/test_dependencies.py b/src/encoded/tests/test_dependencies.py deleted file mode 100644 index dc807f98c7..0000000000 --- a/src/encoded/tests/test_dependencies.py +++ /dev/null @@ -1,109 +0,0 @@ -import pytest -from ..types.dependencies import DependencyEmbedder, DependencyEmbedderError - - -pytestmark = [pytest.mark.setone, pytest.mark.working] - - -class TestDependencyEmbedder: - - @pytest.mark.parametrize('base_path, t, expected', [ - ('treatment', 'treatment', [ - 'treatment.treatment_type', - 'treatment.chemical', - 'treatment.biological_agent', - 'treatment.constructs.name', - 'treatment.duration', - 'treatment.duration_units', - 'treatment.concentration', - 'treatment.concentration_units', - 'treatment.temperature', - 'treatment.target.feature_type.term_id', - 'treatment.target.feature_type.term_name', - 'treatment.target.feature_type.term_url', - 'treatment.target.feature_type.preferred_name', - 'treatment.target.preferred_label', - 'treatment.target.cellular_structure', - 'treatment.target.organism_name', - 'treatment.target.relevant_genes.geneid', - 'treatment.target.relevant_genes.preferred_symbol', - 'treatment.target.feature_mods', - 'treatment.target.genome_location.genome_assembly', - 'treatment.target.genome_location.location_description', - 'treatment.target.genome_location.start_coordinate', - 'treatment.target.genome_location.end_coordinate', - 'treatment.target.genome_location.chromosome', - ]), - ('experiment_set.treatment', 'treatment', [ - 'experiment_set.treatment.treatment_type', - 'experiment_set.treatment.chemical', - 'experiment_set.treatment.biological_agent', - 'experiment_set.treatment.constructs.name', - 'experiment_set.treatment.duration', - 'experiment_set.treatment.duration_units', - 'experiment_set.treatment.concentration', - 'experiment_set.treatment.concentration_units', - 'experiment_set.treatment.temperature', - 'experiment_set.treatment.target.feature_type.term_id', - 'experiment_set.treatment.target.feature_type.term_name', - 'experiment_set.treatment.target.feature_type.term_url', - 'experiment_set.treatment.target.feature_type.preferred_name', - 'experiment_set.treatment.target.preferred_label', - 'experiment_set.treatment.target.cellular_structure', - 'experiment_set.treatment.target.organism_name', - 'experiment_set.treatment.target.relevant_genes.geneid', - 'experiment_set.treatment.target.relevant_genes.preferred_symbol', - 'experiment_set.treatment.target.feature_mods', - 'experiment_set.treatment.target.genome_location.genome_assembly', - 'experiment_set.treatment.target.genome_location.location_description', - 'experiment_set.treatment.target.genome_location.start_coordinate', - 'experiment_set.treatment.target.genome_location.end_coordinate', - 'experiment_set.treatment.target.genome_location.chromosome', - ]) - ]) - def test_dependency_embedder_basic(self, base_path, t, expected): - embeds = DependencyEmbedder.embed_defaults_for_type(base_path=base_path, t=t) - assert sorted(embeds) == sorted(expected) - - @pytest.mark.parametrize('t', [ - 'definitely_does_not_exist', - '', - None, - 'biosample' - ]) - def test_dependency_embedder_error(self, t): - with pytest.raises(DependencyEmbedderError): - DependencyEmbedder.embed_defaults_for_type(base_path='dummy-path', t=t) - with pytest.raises(DependencyEmbedderError): - DependencyEmbedder.embed_for_type(base_path='dummy-path', t=t, additional_embeds=[]) - - @pytest.mark.parametrize('base_path,t,additional,expected', [ - ('genes', 'gene', [], [ - 'genes.geneid', - 'genes.preferred_symbol', - ]), - ('genes', 'gene', ['description'], [ - 'genes.geneid', - 'genes.preferred_symbol', - 'genes.description' - ]), - ('genes.most_severe_gene', 'gene', ['description', 'another_field'], [ - 'genes.most_severe_gene.geneid', - 'genes.most_severe_gene.preferred_symbol', - 'genes.most_severe_gene.description', - 'genes.most_severe_gene.another_field' - ]) - ]) - def test_dependency_embedder_additional_basic(self, base_path, t, additional, expected): - embeds = DependencyEmbedder.embed_for_type(base_path=base_path, t=t, additional_embeds=additional) - assert sorted(embeds) == sorted(expected) - - @pytest.mark.parametrize('additional', [ - 'a string', - 5, - None, - object() - ]) - def test_dependency_embedder_additional_error(self, additional): - with pytest.raises(DependencyEmbedderError): - DependencyEmbedder.embed_for_type(base_path='dummy-path', t='gene', additional_embeds=additional) # noQA type hints working as intended diff --git a/src/encoded/tests/test_download.py b/src/encoded/tests/test_download.py deleted file mode 100644 index 0ffd364fd0..0000000000 --- a/src/encoded/tests/test_download.py +++ /dev/null @@ -1,233 +0,0 @@ -import pytest - -from base64 import b64decode -from unittest import mock - - -pytestmark = [pytest.mark.working, pytest.mark.setone] - - -RED_DOT = """data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUA -AAAFCAYAAACNbyblAAAAHElEQVQI12P4//8/w38GIAXDIBKE0DHxgljNBAAO -9TXL0Y4OHwAAAABJRU5ErkJggg==""" - -BLUE_DOT = """data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA -oAAAAKAQMAAAC3/F3+AAAACXBIWXMAAA7DAAAOwwHHb6hkAA -AAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAgY0hSTQ -AAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPA -AAAANQTFRFALfvPEv6TAAAAAtJREFUCB1jYMAHAAAeAAEBGN -laAAAAAElFTkSuQmCC""" - - -@pytest.fixture -def testing_download(testapp): - url = '/testing-downloads/' - item = { - 'attachment': { - 'download': 'red-dot.png', - 'href': RED_DOT, - }, - 'attachment2': { - 'download': 'blue-dot.png', - 'href': BLUE_DOT, - }, - } - res = testapp.post_json(url, item, status=201) - return res.location - - -def test_download_create(testapp, testing_download): - res = testapp.get(testing_download) - attachment = res.json['attachment'] - attachment2 = res.json['attachment2'] - - assert attachment['href'] == '@@download/attachment/red-dot.png' - assert attachment['type'] == 'image/png' - assert attachment['width'] == 5 - assert attachment['height'] == 5 - assert attachment['md5sum'] == 'b60ab2708daec7685f3d412a5e05191a' - url = testing_download + '/' + attachment['href'] - res = testapp.get(url) - assert res.content_type == 'image/png' - assert res.body == b64decode(RED_DOT.split(',', 1)[1]) - - assert attachment2['href'] == '@@download/attachment2/blue-dot.png' - assert attachment2['type'] == 'image/png' - assert attachment2['width'] == 10 - assert attachment2['height'] == 10 - assert attachment2['md5sum'] == '013f03aa088adb19aa226c3439bda179' - url = testing_download + '/' + attachment2['href'] - res = testapp.get(url) - assert res.content_type == 'image/png' - assert res.body == b64decode(BLUE_DOT.split(',', 1)[1]) - - -def test_download_update(testapp, testing_download): - item = { - 'attachment': { - 'download': 'blue-dot.png', - 'href': BLUE_DOT, - }, - 'attachment2': { - 'download': 'red-dot.png', - 'href': RED_DOT, - }, - } - testapp.put_json(testing_download, item, status=200) - res = testapp.get(testing_download) - attachment = res.json['attachment'] - attachment2 = res.json['attachment2'] - - assert attachment['href'] == '@@download/attachment/blue-dot.png' - url = testing_download + '/' + attachment['href'] - res = testapp.get(url) - assert res.content_type == 'image/png' - assert res.body == b64decode(BLUE_DOT.split(',', 1)[1]) - - assert attachment2['href'] == '@@download/attachment2/red-dot.png' - url = testing_download + '/' + attachment2['href'] - res = testapp.get(url) - assert res.content_type == 'image/png' - assert res.body == b64decode(RED_DOT.split(',', 1)[1]) - - -def test_download_update_no_change(testapp, testing_download): - item = { - 'attachment': { - 'download': 'red-dot.png', - 'href': '@@download/attachment/red-dot.png', - }, - 'attachment2': { - 'download': 'blue-dot.png', - 'href': '@@download/attachment2/blue-dot.png', - }, - } - testapp.put_json(testing_download, item, status=200) - - res = testapp.get(testing_download) - attachment = res.json['attachment'] - attachment2 = res.json['attachment2'] - assert attachment['href'] == '@@download/attachment/red-dot.png' - assert attachment2['href'] == '@@download/attachment2/blue-dot.png' - - -def test_download_update_one(testapp, testing_download): - item = { - 'attachment': { - 'download': 'red-dot.png', - 'href': '@@download/attachment/red-dot.png', - }, - 'attachment2': { - 'download': 'red-dot.png', - 'href': RED_DOT, - }, - } - testapp.put_json(testing_download, item, status=200) - - res = testapp.get(testing_download) - attachment = res.json['attachment'] - attachment2 = res.json['attachment2'] - - assert attachment['href'] == '@@download/attachment/red-dot.png' - url = testing_download + '/' + attachment['href'] - res = testapp.get(url) - assert res.content_type == 'image/png' - assert res.body == b64decode(RED_DOT.split(',', 1)[1]) - - assert attachment2['href'] == '@@download/attachment2/red-dot.png' - url = testing_download + '/' + attachment2['href'] - res = testapp.get(url) - assert res.content_type == 'image/png' - assert res.body == b64decode(RED_DOT.split(',', 1)[1]) - - -def test_download_remove_one(testapp, testing_download): - item = { - 'attachment': { - 'download': 'red-dot.png', - 'href': '@@download/attachment/red-dot.png', - }, - } - testapp.put_json(testing_download, item, status=200) - - res = testapp.get(testing_download) - assert 'attachment' in res.json - assert 'attachment2' not in res.json - - url = testing_download + '/@@download/attachment2/red-dot.png' - testapp.get(url, status=404) - - -@pytest.mark.parametrize( - 'href', - [ - '@@download/attachment/another.png', - 'http://example.com/another.png', - ]) -def test_download_update_bad_change(testapp, testing_download, href): - item = {'attachment': { - 'download': 'red-dot.png', - 'href': href, - }} - testapp.put_json(testing_download, item, status=422) - - -@pytest.mark.parametrize( - 'href', - [ - 'http://example.com/another.png', - 'data:image/png;base64,NOT_BASE64', - 'data:image/png;NOT_A_PNG', - 'data:text/plain;asdf', - ]) -def test_download_create_bad_change(testapp, href): - url = '/testing-downloads/' - item = {'attachment': { - 'download': 'red-dot.png', - 'href': href, - }} - testapp.post_json(url, item, status=422) - - -def test_download_create_force_extension(testapp): - url = '/testing-downloads/' - item = {'attachment': { - 'download': 'red-dot.png', - 'href': '@@download/attachment/another.png', - }} - testapp.post_json(url, item, status=201) - - -def test_download_create_wrong_extension(testapp): - url = '/testing-downloads/' - item = {'attachment': { - 'download': 'red-dot.jpg', - 'href': RED_DOT, - }} - testapp.post_json(url, item, status=422) - - -def test_download_create_w_wrong_md5sum(testapp): - url = '/testing-downloads/' - item = {'attachment': { - 'download': 'red-dot.jpg', - 'href': RED_DOT, - 'md5sum': 'deadbeef', - }} - testapp.post_json(url, item, status=422) - - -def test_download_item_with_attachment(testapp, award, lab): - item = { - 'attachment': { - 'download': 'red-dot.png', - 'href': RED_DOT, - 'blob_id': 'fa4558df-c38f-4d72-a1ea-c1a58133a4b0', - }, - 'award': award['@id'], - 'lab': lab['@id'] - } - res = testapp.post_json('/document', item).json['@graph'][0] - - with mock.patch('encoded.types.get_s3_presigned_url', return_value=''): - testapp.get(res['@id'] + res['attachment']['href'], status=200) diff --git a/src/encoded/tests/test_edw_hash.py b/src/encoded/tests/test_edw_hash.py deleted file mode 100644 index a26356ae0b..0000000000 --- a/src/encoded/tests/test_edw_hash.py +++ /dev/null @@ -1,18 +0,0 @@ -import pytest - -from ..edw_hash import EDWHash - - -pytestmark = [pytest.mark.setone, pytest.mark.working] - - -TEST_HASHES = { - "test": "Jnh+8wNnELksNFVbxkya8RDrxJNL13dUWTXhp5DCx/quTM2/cYn7azzl2Uk3I2zc", - "test2": "sh33L5uQeLr//jJULb7mAnbVADkkWZrgcXx97DCacueGtEU5G2HtqUv73UTS0EI0", - "testing100" * 10: "5rznDSIcDPd/9rjom6P/qkJGtJSV47y/u5+KlkILROaqQ6axhEyVIQTahuBYerLG", -} - - -@pytest.mark.parametrize(('password', 'pwhash'), TEST_HASHES.items()) -def test_edw_hash(password, pwhash): - assert EDWHash.hash(password) == pwhash diff --git a/src/encoded/tests/test_embedding.py b/src/encoded/tests/test_embedding.py deleted file mode 100644 index 47781d7fea..0000000000 --- a/src/encoded/tests/test_embedding.py +++ /dev/null @@ -1,166 +0,0 @@ -import pytest - -from snovault import TYPES -from snovault.util import add_default_embeds, crawl_schemas_by_embeds -from ..types.base import get_item_or_none -from .datafixtures import ORDER - - -pytestmark = [pytest.mark.setone, pytest.mark.working] - - -targets = [ - {'name': 'one', 'uuid': '775795d3-4410-4114-836b-8eeecf1d0c2f'}, - {'name': 'two', 'uuid': 'd6784f5e-48a1-4b40-9b11-c8aefb6e1377'}, -] - -sources = [ - { - 'name': 'A', - 'target': '775795d3-4410-4114-836b-8eeecf1d0c2f', - 'uuid': '16157204-8c8f-4672-a1a4-14f4b8021fcd', - 'status': 'current', - }, - { - 'name': 'B', - 'target': 'd6784f5e-48a1-4b40-9b11-c8aefb6e1377', - 'uuid': '1e152917-c5fd-4aec-b74f-b0533d0cc55c', - 'status': 'deleted', - }, -] - - -@pytest.fixture -def content(testapp): - url = '/testing-link-targets/' - for item in targets: - testapp.post_json(url, item, status=201) - - url = '/testing-link-sources/' - for item in sources: - testapp.post_json(url, item, status=201) - - -def test_linked_uuids_object(content, dummy_request, threadlocals): - # needed to track _linked_uuids - dummy_request._indexing_view = True - dummy_request.embed('/testing-link-sources/', sources[0]['uuid'], '@@object') - # only object visited here is the source itself, hence one _linked_uuid - assert dummy_request._linked_uuids == {('16157204-8c8f-4672-a1a4-14f4b8021fcd', 'TestingLinkSource')} - - -def test_linked_uuids_embedded(content, dummy_request, threadlocals): - # needed to track _linked_uuids - dummy_request._indexing_view = True - dummy_request.embed('/testing-link-sources/', sources[0]['uuid'], '@@embedded') - assert dummy_request._linked_uuids == { - ('16157204-8c8f-4672-a1a4-14f4b8021fcd', 'TestingLinkSource'), - ('775795d3-4410-4114-836b-8eeecf1d0c2f', 'TestingLinkTarget') - } - - -def test_target_rev_linked_uuids_indexing_view(content, dummy_request, threadlocals): - res_target = dummy_request.embed('/testing-link-targets/', targets[0]['uuid'], '@@index-data', as_user='INDEXER') - # should have the itself and the rev link to source in the _linked_uuids - assert dummy_request._linked_uuids == { - ('16157204-8c8f-4672-a1a4-14f4b8021fcd', 'TestingLinkSource'), - ('775795d3-4410-4114-836b-8eeecf1d0c2f', 'TestingLinkTarget') - } - assert res_target['rev_link_names'] == {'reverse': [sources[0]['uuid']]} - assert res_target['rev_linked_to_me'] == [] - - -def test_source_rev_linked_uuids_indexing_view(content, dummy_request, threadlocals): - res_target = dummy_request.embed('/testing-link-sources/', sources[0]['uuid'], '@@index-data', as_user='INDEXER') - # should have the itself and the rev link to source in the _linked_uuids - assert dummy_request._linked_uuids == { - ('16157204-8c8f-4672-a1a4-14f4b8021fcd', 'TestingLinkSource'), - ('775795d3-4410-4114-836b-8eeecf1d0c2f', 'TestingLinkTarget') - } - assert res_target['rev_link_names'] == {} - assert res_target['rev_linked_to_me'] == ['775795d3-4410-4114-836b-8eeecf1d0c2f'] - - -def test_linked_uuids_experiment(experiment, lab, award, human_biosample, human_biosource, mboI, dummy_request, threadlocals): - to_embed = ['lab.uuid', 'award.uuid', 'biosample.biosource.uuid', 'digestion_enzyme.uuid'] - dummy_request._indexing_view = True - dummy_request.embed(experiment['@id'], '@@embedded', fields_to_embed=to_embed) - linked_uuids = dummy_request._linked_uuids - # starting item is not in linked_uuids - assert (experiment['uuid'], experiment['@type'][0]) in linked_uuids - assert (lab['uuid'], lab['@type'][0]) in linked_uuids - assert (award['uuid'], award['@type'][0]) in linked_uuids - # biosample is added because of biosample.biosource - assert (human_biosample['uuid'], human_biosample['@type'][0]) in linked_uuids - assert (human_biosource['uuid'], human_biosource['@type'][0]) in linked_uuids - assert (mboI['uuid'], mboI['@type'][0]) in linked_uuids - - -@pytest.mark.parametrize('item_type', ORDER) -def test_add_default_embeds(registry, item_type): - """ - Ensure default embedding matches the schema for each object - """ - type_info = registry[TYPES].by_item_type[item_type] - schema = type_info.schema - embeds = add_default_embeds(item_type, registry[TYPES], type_info.embedded_list, schema) - principals_allowed_included_in_default_embeds = False - for embed in embeds: - split_embed = embed.strip().split('.') - if 'principals_allowed' in split_embed: - principals_allowed_included_in_default_embeds = True - error, added_embeds = crawl_schemas_by_embeds(item_type, registry[TYPES], split_embed, schema['properties']) - assert error is None - - assert principals_allowed_included_in_default_embeds - - -@pytest.mark.parametrize('item_type', ORDER) -def test_manual_embeds(registry, item_type): - """ - Ensure manual embedding in the types files are valid - """ - type_info = registry[TYPES].by_item_type[item_type] - schema = type_info.schema - embeds = type_info.embedded_list - for embed in embeds: - split_embed = embed.strip().split('.') - error, added_embeds = crawl_schemas_by_embeds(item_type, registry[TYPES], split_embed, schema['properties']) - assert error is None - - -def test_fictitous_embed(registry): - """ - Made up embedding for biosample, which is useful to check that default - embedding will occur for every item in a embed path - For example, if embedded_list contains biosource.individual.organism.name, - the organism subpath should be in the added_embeds even though it is - not a terminal object - """ - type_info = registry[TYPES].by_item_type['biosample'] - schema = type_info.schema - embed = 'biosource.individual.organism.name' - split_embed = embed.strip().split('.') - error, added_embeds = crawl_schemas_by_embeds('biosample', registry[TYPES], split_embed, schema['properties']) - assert 'biosource' in added_embeds - assert 'biosource.individual' in added_embeds - assert 'biosource.individual.organism' in added_embeds - assert error is None - - -def test_get_item_or_none(content, dummy_request, threadlocals): - """ - Not necessarily the best place for this test, but test that the - `get_item_or_none` function works with multiple inputs - """ - used_item = sources[0] - # all of these should get the full item - res1 = get_item_or_none(dummy_request, used_item) - res2 = get_item_or_none(dummy_request, {'uuid': used_item['uuid']}) - res3 = get_item_or_none(dummy_request, used_item['uuid']) - res4 = get_item_or_none(dummy_request, used_item['uuid'], '/testing-link-sources/') - for res in [res1, res2, res3, res4]: - assert res['uuid'] == used_item['uuid'] - assert res['name'] == used_item['name'] - assert '@id' in res - assert '@type' in res diff --git a/src/encoded/tests/test_file.py b/src/encoded/tests/test_file.py deleted file mode 100644 index cd7902a0f8..0000000000 --- a/src/encoded/tests/test_file.py +++ /dev/null @@ -1,1491 +0,0 @@ -import boto3 -import os -import pytest -import tempfile - -from dcicutils.beanstalk_utils import source_beanstalk_env_vars -from pyramid.httpexceptions import HTTPForbidden -from unittest import mock -from ..types.file import FileFastq, post_upload, external_creds - - -# adding a mark to a list applies it to every test in the file -pytestmark = [pytest.mark.setone, pytest.mark.workinG] - - -def test_processed_file_unique_md5(testapp, mcool_file_json): - # first time pass - res_init = testapp.post_json('/file_processed', mcool_file_json).json['@graph'][0] - res = testapp.post_json('/file_processed', mcool_file_json, status=422) - assert 'ValidationFailure' in res.json['@type'] - assert mcool_file_json['md5sum'] in res.json['errors'][0]['description'] - assert res_init['accession'] in res.json['errors'][0]['description'] - - # we can of course, patch or put to ourself though - testapp.patch_json('/file_processed/%s' % res_init['accession'], mcool_file_json) - testapp.put_json('/file_processed/%s' % res_init['accession'], mcool_file_json) - - # but we can't change somebody else to overwrite us - existing_md5sum = mcool_file_json['md5sum'] - mcool_file_json['md5sum'] = 'new md5sum' - res_2 = testapp.post_json('/file_processed', mcool_file_json).json['@graph'][0] - mcool_file_json['md5sum'] = existing_md5sum - - res = testapp.patch_json('/file_processed/%s' % res_2['accession'], mcool_file_json, status=422) - assert 'ValidationFailure' in res.json['@type'] - assert res.json['errors'][0]['name'] == 'File: non-unique md5sum' - assert mcool_file_json['md5sum'] in res.json['errors'][0]['description'] - - res = testapp.put_json('/file_processed/%s' % res_2['accession'], mcool_file_json, status=422) - assert res.json['errors'][0]['name'] == 'File: non-unique md5sum' - assert 'ValidationFailure' in res.json['@type'] - assert mcool_file_json['md5sum'] in res.json['errors'][0]['description'] - - -def test_processed_file_unique_md5_skip_validation(testapp, mcool_file_json): - # first time pass - res = testapp.post_json('/file_processed', mcool_file_json).json['@graph'][0] - testapp.post_json('/file_processed?force_md5=true', mcool_file_json) - testapp.patch_json('/file_processed/%s/?force_md5=true' % res['accession'], mcool_file_json) - testapp.put_json('/file_processed/%s/?force_md5=true' % res['accession'], mcool_file_json) - - -def test_reference_file_by_md5(testapp, file): - res = testapp.get('/md5:{md5sum}'.format(**file)).follow(status=200) - assert res.json['@id'] == file['@id'] - - -def test_file_content_md5sum_unique(testapp, file, fastq_json): - testapp.patch_json('/{uuid}'.format(**file), {'content_md5sum': '1234'}, status=200) - fastq_json['content_md5sum'] = '1234' - res2 = testapp.post_json('/file_fastq', fastq_json, status=409) - assert res2.json.get('detail').startswith("Keys conflict") - - -def test_replaced_file_not_uniqued(testapp, file): - testapp.patch_json('/{uuid}'.format(**file), {'status': 'replaced'}, status=200) - testapp.get('/md5:{md5sum}'.format(**file), status=404) - - -@pytest.fixture -def fastq_json(award, experiment, lab, file_formats): - return { - 'accession': '4DNFIO67APU2', - 'award': award['uuid'], - 'lab': lab['uuid'], - 'file_format': file_formats.get('fastq').get('uuid'), - 'filename': 'test.fastq.gz', - 'md5sum': '0123456789abcdef0123456789abcdef', - 'status': 'uploaded', - } - - -@pytest.fixture -def fastq_json_released(award, experiment, lab, file_formats): - return { - 'accession': '4DNFIO67APU3', - 'award': award['uuid'], - 'lab': lab['uuid'], - 'file_format': file_formats.get('fastq').get('uuid'), - 'filename': 'test.fastq.gz', - 'md5sum': '0123456789abcdef0123456789abcdef', - 'status': 'released', - } - - -@pytest.fixture -def proc_file_json(award, experiment, lab, another_lab, file_formats): - return { - 'accession': '4DNFIO67APU4', - 'award': award['uuid'], - 'lab': lab['uuid'], - 'file_format': file_formats.get('pairs').get('uuid'), - 'filename': 'test.pairs.gz', - 'md5sum': '0123456789abcdef0123456789abcdef', - 'status': 'uploading', - 'contributing_labs': [another_lab['uuid']] - } - - -def test_file_post_fastq(testapp, fastq_json): - testapp.post_json('/file_fastq', fastq_json, status=201) - - -@pytest.fixture -def fastq_uploading(fastq_json): - fastq_json['status'] = 'uploading' - return fastq_json - - -@pytest.fixture -def test_file_tsv_notes_field(award, lab, file_formats): - return { - 'award': award['uuid'], - 'lab': lab['uuid'], - 'file_format': file_formats.get('fastq').get('uuid') - } - - -@pytest.mark.integrated -def test_restricted_no_download(testapp, fastq_json): - # check that initial download works - res = testapp.post_json('/file_fastq', fastq_json, status=201) - resobj = res.json['@graph'][0] - s3 = boto3.client('s3') - s3.put_object(Bucket='test-wfout-bucket', Key=resobj['upload_key'], Body=str.encode('')) - download_link = resobj['href'] - testapp.get(download_link, status=307) - testapp.patch_json(resobj['@id'], {'status': 'restricted'}, status=200) - # fail download of restricted file - testapp.get(download_link, status=403) - s3.delete_object(Bucket='test-wfout-bucket', Key=resobj['upload_key']) - - -@pytest.mark.integrated -def test_upload_key_updated_on_accession_change(testapp, proc_file_json): - newacc = '4DNFINNNNNNN' - fext = 'pairs.gz' - res = testapp.post_json('/file_processed', proc_file_json, status=201) - resobj = res.json['@graph'][0] - s3 = boto3.client('s3') - s3.put_object(Bucket='test-wfout-bucket', Key=resobj['upload_key'], Body=str.encode('')) - pres = testapp.patch_json(resobj['@id'], {'accession': newacc}, status=200) - presobj = pres.json['@graph'][0] - assert resobj['upload_key'] != presobj['upload_key'] - assert presobj['upload_key'].endswith("{}.{}".format(newacc, fext)) - s3.delete_object(Bucket='test-wfout-bucket', Key=resobj['upload_key']) - - -def test_extra_files_stuff(testapp, proc_file_json, file_formats): - extra_files = [{'file_format': 'pairs_px2'}] - proc_file_json['extra_files'] = extra_files - res = testapp.post_json('/file_processed', proc_file_json, status=201) - resobj = res.json['@graph'][0] - assert len(resobj['extra_files']) == len(extra_files) - file_name = ("%s.pairs.gz.px2" % (resobj['accession'])) - expected_key = "%s/%s" % (resobj['uuid'], file_name) - assert resobj['extra_files'][0]['upload_key'] == expected_key - assert resobj['extra_files'][0]['href'] - assert resobj['extra_files_creds'][0]['file_format'] == file_formats['pairs_px2']['uuid'] - assert resobj['extra_files_creds'][0]['upload_key'] == expected_key - assert resobj['extra_files_creds'][0]['upload_credentials'] - assert 'test-wfout-bucket' in resobj['upload_credentials']['upload_url'] - - -def test_patch_extra_files(testapp, proc_file_json): - extra_files = [{'file_format': 'pairs_px2', 'status': 'to be uploaded by workflow'}] - proc_file_json['extra_files'] = extra_files - res = testapp.post_json('/file_processed', proc_file_json, status=201) - resobj = res.json['@graph'][0] - - # now patch this guy with just the extra files changing the format of extfile first - extra_files[0]['file_format'] = 'pairsam_px2' - patch = {'uuid': resobj['uuid'], 'extra_files': extra_files} - res = testapp.patch_json('/file_processed/' + resobj['uuid'], patch, status=200) - resobj = res.json['@graph'][0] - - # ensure we get correct stuff back after a patch - # bug was that we were only getting back the file_format - assert len(resobj['extra_files']) == len(extra_files) - file_name = ("%s.sam.pairs.gz.px2" % (resobj['accession'])) - expected_key = "%s/%s" % (resobj['uuid'], file_name) - assert resobj['extra_files'][0]['upload_key'] == expected_key - assert resobj['extra_files'][0]['href'] - assert resobj['extra_files_creds'][0]['upload_key'] == expected_key - assert resobj['extra_files_creds'][0]['upload_credentials'] - assert 'test-wfout-bucket' in resobj['upload_credentials']['upload_url'] - assert resobj['extra_files'][0]['status'] == 'to be uploaded by workflow' - - -def test_extra_files_get_upload(testapp, proc_file_json): - extra_files = [{'file_format': 'pairs_px2'}] - proc_file_json['extra_files'] = extra_files - res = testapp.post_json('/file_processed', proc_file_json, status=201) - resobj = res.json['@graph'][0] - - get_res = testapp.get(resobj['@id'] + '/upload') - get_resobj = get_res.json['@graph'][0] - assert get_resobj['upload_credentials'] - assert get_resobj['extra_files_creds'][0] - - -def test_extra_files_throws_on_duplicate_file_format(testapp, proc_file_json): - # same file_format as original file - extra_files = [{'file_format': 'pairs'}] - proc_file_json['extra_files'] = extra_files - with pytest.raises(Exception) as exc: - testapp.post_json('/file_processed', proc_file_json, status=201) - assert "must have unique file_format" in exc.value - - -def test_extra_files_throws_on_duplicate_file_format_in_extra(testapp, proc_file_json): - # same file_format as original file - extra_files = [{'file_format': 'pairs_px2'}, - {'file_format': 'pairs_px'}] - proc_file_json['extra_files'] = extra_files - with pytest.raises(Exception) as exc: - testapp.post_json('/file_processed', proc_file_json, status=201) - assert "must have unique file_format" in exc.value - - -def test_files_aws_credentials(testapp, fastq_uploading): - # fastq_uploading.pop('filename') - res = testapp.post_json('/file_fastq', fastq_uploading, status=201) - resobj = res.json['@graph'][0] - - res_put = testapp.put_json(resobj['@id'], fastq_uploading) - - assert resobj['upload_credentials']['key'] == res_put.json['@graph'][0]['upload_credentials']['key'] - assert 'test-wfout-bucket' in resobj['upload_credentials']['upload_url'] - - -def test_files_aws_credentials_change_filename(testapp, fastq_uploading, file_formats): - fastq_uploading['filename'] = 'test.zip' - fastq_uploading['file_format'] = file_formats.get('zip').get('uuid') - res = testapp.post_json('/file_calibration', fastq_uploading, status=201) - resobj = res.json['@graph'][0] - - fastq_uploading['filename'] = 'test.tiff' - fastq_uploading['file_format'] = file_formats.get('tiff').get('uuid') - res_put = testapp.put_json(resobj['@id'], fastq_uploading) - - assert resobj['upload_credentials']['key'].endswith('zip') - assert resobj['href'].endswith('zip') - assert res_put.json['@graph'][0]['upload_credentials']['key'].endswith('tiff') - assert res_put.json['@graph'][0]['href'].endswith('tiff') - - -def test_status_change_doesnt_muck_with_creds(testapp, fastq_uploading, file_formats): - fastq_uploading['filename'] = 'test.zip' - fastq_uploading['file_format'] = file_formats.get('zip').get('uuid') - res = testapp.post_json('/file_calibration', fastq_uploading, status=201) - resobj = res.json['@graph'][0] - - fastq_uploading['status'] = 'released' - res_put = testapp.put_json(resobj['@id'], fastq_uploading) - res_upload = testapp.get(resobj['@id'] + '/upload') - put_obj = res_upload.json['@graph'][0] - - assert resobj['upload_credentials']['key'] == put_obj['upload_credentials']['key'] - - assert resobj['href'] == res_put.json['@graph'][0]['href'] - - -def test_s3_filename_validation(testapp, fastq_uploading, file_formats): - """ - s3 won't allow certain characters in filenames, hence the regex validator - created in file.json schema. Required regex is: "^[\\w+=,.@-]*$" - """ - # first a working one - fastq_uploading['filename'] = 'test_file.fastq.gz' - fastq_uploading['file_format'] = file_formats.get('fastq').get('uuid') - testapp.post_json('/file_fastq', fastq_uploading, status=201) - # now some bad boys that don't pass - fastq_uploading['filename'] = 'test file.fastq.gz' - testapp.post_json('/file_fastq', fastq_uploading, status=422) - fastq_uploading['filename'] = 'test|file.fastq.gz' - testapp.post_json('/file_fastq', fastq_uploading, status=422) - fastq_uploading['filename'] = 'test~file.fastq.gz' - testapp.post_json('/file_fastq', fastq_uploading, status=422) - fastq_uploading['filename'] = 'test#file.fastq.gz' - testapp.post_json('/file_fastq', fastq_uploading, status=422) - - -def test_files_open_data_url_not_released(testapp, fastq_json): - """ Test S3 Open Data URL when a file has not been flagged as released """ - res = testapp.post_json('/file_fastq', fastq_json, status=201) - resobj = res.json['@graph'][0] - # 1. check that initial download works - download_link = resobj['href'] - direct_res = testapp.get(download_link, status=307) - # 2. check that the bucket in the redirect is the 4DN test bucket, not open data - non_open_data_bucket = 'test-wfout-bucket.s3.amazonaws.com' - assert non_open_data_bucket in [i[1] for i in direct_res.headerlist if i[0] == 'Location'][0] - - -def test_files_open_data_url_released_not_transferred(testapp, fastq_json_released): - """ Test S3 Open Data URL when a file has been released but not transferred to Open Data """ - res = testapp.post_json('/file_fastq', fastq_json_released, status=201) - resobj = res.json['@graph'][0] - # 1. check that initial download works - download_link = resobj['href'] - direct_res = testapp.get(download_link, status=307) - # 2. check that the bucket in the redirect is the 4DN test bucket, not open data - non_open_data_bucket = 'test-wfout-bucket.s3.amazonaws.com' - assert non_open_data_bucket in [i[1] for i in direct_res.headerlist if i[0] == 'Location'][0] - - -def test_files_open_data_url_released_and_transferred(testapp, fastq_json_released): - """ Test S3 Open Data URL when a file has been released and has been transferred to Open Data""" - with mock.patch('encoded.types.file.File._head_s3', return_value=None): - res = testapp.post_json('/file_fastq', fastq_json_released, status=201) - bucket = '4dn-open-data-public' # the Open Data bucket, not the 4DN test bucket - resobj = res.json['@graph'][0] - # 1. check that initial download works - download_link = resobj['href'] - direct_res = testapp.get(download_link, status=307) - # 2. check that the bucket in the redirect is the open data bucket, not 4DN test - assert bucket in [i[1] for i in direct_res.headerlist if i[0] == 'Location'][0] - - -@pytest.mark.integrated -def test_files_get_s3_with_no_filename_posted(testapp, fastq_uploading): - fastq_uploading.pop('filename') - res = testapp.post_json('/file_fastq', fastq_uploading, status=201) - resobj = res.json['@graph'][0] - s3 = boto3.client('s3') - s3.put_object(Bucket='test-wfout-bucket', Key=resobj['upload_key']) - - # 307 is redirect to s3 using auto generated download url - fastq_res = testapp.get('{href}' - .format(**resobj), - status=307) - s3.delete_object(Bucket='test-wfout-bucket', Key=resobj['upload_key']) - - -@pytest.mark.integrated -def test_files_get_s3_with_no_filename_patched(testapp, fastq_uploading, - fastq_json): - fastq_uploading.pop('filename') - res = testapp.post_json('/file_fastq', fastq_json, status=201) - resobj = res.json['@graph'][0] - s3 = boto3.client('s3') - s3.put_object(Bucket='test-wfout-bucket', Key=resobj['upload_key']) - - props = {'uuid': resobj['uuid'], - 'aliases': ['dcic:test_1'], - 'status': 'uploading'} - - patched = testapp.patch_json('/file_fastq/{uuid}' - .format(**props), props) - - # 307 is redirect to s3 using auto generated download url - fastq_res = testapp.get('{href}' - .format(**resobj), - status=307) - assert props['uuid'] in fastq_res.text - s3.delete_object(Bucket='test-wfout-bucket', Key=resobj['upload_key']) - -@pytest.fixture -def pairs_file_json(award, experiment, lab, file_formats, quality_metric_pairsqc): - item = { - 'award': award['@id'], - 'lab': lab['@id'], - 'file_format': file_formats.get('pairs').get('uuid'), - 'md5sum': '00000000000000000000000000000001', - 'filename': 'my.pairs.pairs.gz', - 'status': 'uploaded', - 'accession': '4DNFIAAAAAAP', - 'quality_metric': quality_metric_pairsqc['@id'] - } - return item - - -@pytest.fixture -def mcool_file_json(award, experiment, lab, file_formats): - item = { - 'award': award['@id'], - 'lab': lab['@id'], - 'file_format': file_formats.get('mcool').get('uuid'), - 'md5sum': '00000000000000000000000000000000', - 'filename': 'my.cool.mcool', - 'status': 'uploaded', - } - return item - - -@pytest.fixture -def bedGraph_file_json(award, experiment, lab, file_formats): - item = { - 'award': award['@id'], - 'lab': lab['@id'], - 'file_format': file_formats.get('bg').get('uuid'), - 'md5sum': '00000000000000000000000000000000', - 'filename': 'my.bedGraph.gz', - 'status': 'uploaded', - } - return item - -@pytest.fixture -def bigwig_file_json(award, experiment, lab, file_formats): - item = { - 'award': award['@id'], - 'lab': lab['@id'], - 'file_format': file_formats.get('bw').get('uuid'), - 'md5sum': '00000000000000000000000000000000', - 'filename': 'my.bw', - 'status': 'uploaded', - } - return item - -@pytest.fixture -def bigbed_file_json(award, experiment, lab, file_formats): - item = { - 'award': award['@id'], - 'lab': lab['@id'], - 'file_format': file_formats.get('bigbed').get('uuid'), - 'md5sum': '00000000000000000000000000000000', - 'filename': 'my.bb', - 'status': 'uploaded', - } - return item - -@pytest.fixture -def bed_beddb_file_json(award, experiment, lab, file_formats): - item = { - 'award': award['@id'], - 'lab': lab['@id'], - 'file_format': file_formats.get('bed').get('uuid'), - 'md5sum': '00000000000000000000000000000000', - 'filename': 'my.bed.gz', - 'status': 'uploaded', - 'extra_files' : [ - { - "file_format" : file_formats.get('beddb').get('uuid'), - "file_size" :12345678, - "md5sum": "00000000000000000000000000000002" - }, - ] - } - return item - -@pytest.fixture -def beddb_file_json(award, experiment, lab, file_formats): - item = { - 'award': award['@id'], - 'lab': lab['@id'], - 'file_format': file_formats.get('beddb').get('uuid'), - 'md5sum': '00000000000000000000000000000000', - 'filename': 'my.beddb', - 'status': 'uploaded', - } - return item - -@pytest.fixture -def chromsizes_file_json(award, experiment, lab, file_formats): - item = { - 'award': award['@id'], - 'lab': lab['@id'], - 'file_format': file_formats.get('chromsizes').get('uuid'), - 'md5sum': '00000000000000000000000000000000', - 'filename': 'my.chrom.sizes', - 'status': 'uploaded', - } - return item - -@pytest.fixture -def mcool_file(testapp, mcool_file_json): - res = testapp.post_json('/file_processed', mcool_file_json) - return res.json['@graph'][0] - - -@pytest.fixture -def file(testapp, award, experiment, lab, file_formats): - - item = { - 'award': award['@id'], - 'lab': lab['@id'], - 'file_format': file_formats.get('fastq').get('uuid'), - 'md5sum': '00000000000000000000000000000000', - 'filename': 'my.fastq.gz', - 'status': 'uploaded', - } - res = testapp.post_json('/file_fastq', item) - return res.json['@graph'][0] - - -@pytest.fixture -def fastq_related_file(fastq_json): - item = fastq_json.copy() - item['related_files'] = [{'relationship_type': 'derived from', - 'file': fastq_json['accession']}] - item['md5sum'] = '2123456789abcdef0123456789abcdef' - item['accession'] = '' - return item - - -def test_file_post_fastq_related(testapp, fastq_json, fastq_related_file): - testapp.post_json('/file_fastq', fastq_json, status=201) - fastq_related_res = testapp.post_json('/file_fastq', fastq_related_file, status=201) - - # when updating the last one we should have updated this one too - fastq_res = testapp.get('/md5:{md5sum}'.format(**fastq_json)).follow(status=200) - fastq_related_files = fastq_res.json['related_files'] - assert fastq_related_files[0]['file']['@id'] == fastq_related_res.json['@graph'][0]['@id'] - - -def test_external_creds(): - - with mock.patch('encoded.types.file.boto3', autospec=True): - - ret = external_creds('test-wfout-bucket', 'test-key', 'name') - assert ret['key'] == 'test-key' - assert ret['bucket'] == 'test-wfout-bucket' - assert ret['service'] == 's3' - assert 'upload_credentials' in ret.keys() - - -def test_create_file_request_proper_s3_resource(registry, fastq_json): - # ensure status uploading so create tries to upload - fastq_json['status'] = "uploading" - # don't actually call aws - with mock.patch('encoded.types.file.external_creds') as external_creds: - # don't actually create this bad boy - with mock.patch('encoded.types.base.Item.create'): - FileFastq.create(registry, '1234567', fastq_json) - # check that we would have called aws - expected_s3_key = "1234567/%s.fastq.gz" % (fastq_json['accession']) - external_creds.assert_called_once_with('test-wfout-bucket', expected_s3_key, - fastq_json['filename'], 'test-profile') - - -def test_name_for_replaced_file_is_uuid(registry, fastq_json): - fastq_json['status'] = 'replaced' - uuid = "0afb6080-1c08-11e4-8c21-0800200c9a44" - my_file = FileFastq.create(registry, uuid, fastq_json) - assert my_file.__name__ == uuid - - -def test_upload_credentails_not_set_for_replaced_file(registry, fastq_json): - fastq_json['status'] = 'replaced' - uuid = "0afb6080-1c08-11e4-8c21-0800200c9a44" - my_file = FileFastq.create(registry, uuid, fastq_json) - # upload credentials only get set when status is 'uploading' - assert my_file.upload_credentials() is None - - -def test_name_for_file_is_accession(registry, fastq_json): - uuid = "0afb6080-1c08-11e4-8c21-0800200c9a44" - my_file = FileFastq.create(registry, uuid, fastq_json) - assert my_file.__name__ == fastq_json['accession'] - - -def test_calculated_display_title_for_fastq(file): - assert file['display_title'] == file['accession'] + '.fastq.gz' - - -def test_post_upload_only_on_uploading(registry, fastq_json, request): - uuid = "0afb6080-1c08-11e4-8c21-0800200c9a44" - my_file = FileFastq.create(registry, uuid, fastq_json) - try: - post_upload(my_file, request) - except HTTPForbidden: - assert True - return - assert False - - -def test_post_upload_only_for_uploading_or_upload_failed_status(registry, fastq_json, request): - fastq_json['status'] = 'uploaded' - uuid = "0afb6080-1c08-11e4-8c21-0800200c9a44" - my_file = FileFastq.create(registry, uuid, fastq_json) - try: - post_upload(my_file, request) - except HTTPForbidden as e: - assert True - else: - assert False - - -def test_workflowrun_output_rev_link(testapp, fastq_json, workflow_run_json): - res = testapp.post_json('/file_fastq', fastq_json, status=201).json['@graph'][0] - workflow_run_json['output_files'] = [{'workflow_argument_name': 'test', 'value': res['@id']}] - res2 = testapp.post_json('/workflow_run_sbg', workflow_run_json).json['@graph'][0] - - new_file = testapp.get(res['@id']).json - assert new_file['workflow_run_outputs'][0]['@id'] == res2['@id'] - - -def test_workflowrun_input_rev_link(testapp, fastq_json, workflow_run_json): - res = testapp.post_json('/file_fastq', fastq_json, status=201).json['@graph'][0] - workflow_run_json['input_files'] = [{'workflow_argument_name': 'test', 'value': res['@id']}] - res2 = testapp.post_json('/workflow_run_sbg', workflow_run_json).json['@graph'][0] - - new_file = testapp.get(res['@id']).json - assert new_file['workflow_run_inputs'][0]['@id'] == res2['@id'] - - -def test_workflowrun_input_rev_link_pf(testapp, proc_file_json, workflow_run_awsem_json): - res = testapp.post_json('/file_processed', proc_file_json, status=201).json['@graph'][0] - workflow_run_awsem_json['input_files'] = [{'workflow_argument_name': 'test_inp', 'value': res['@id']}] - workflow_run_awsem_json['output_files'] = [{'workflow_argument_name': 'test_out', 'value': res['@id']}] - res2 = testapp.post_json('/workflow_run_awsem', workflow_run_awsem_json).json['@graph'][0] - new_file = testapp.get(res['@id']).json - assert new_file['workflow_run_inputs'][0]['@id'] == res2['@id'] - assert new_file['workflow_run_outputs'][0]['@id'] == res2['@id'] - - -def test_workflowrun_input_rev_link_pf_disabled_at_post(testapp, proc_file_json, workflow_run_awsem_json): - proc_file_json['disable_wfr_inputs'] = True - res = testapp.post_json('/file_processed', proc_file_json, status=201).json['@graph'][0] - workflow_run_awsem_json['input_files'] = [{'workflow_argument_name': 'test_inp', 'value': res['@id']}] - workflow_run_awsem_json['output_files'] = [{'workflow_argument_name': 'test_out', 'value': res['@id']}] - res2 = testapp.post_json('/workflow_run_awsem', workflow_run_awsem_json).json['@graph'][0] - new_file = testapp.get(res['@id']).json - assert new_file['workflow_run_outputs'][0]['@id'] == res2['@id'] - assert new_file.get('workflow_run_inputs') == [] - - -def test_workflowrun_input_rev_link_pf_disabled_at_patch(testapp, proc_file_json, workflow_run_awsem_json): - res = testapp.post_json('/file_processed', proc_file_json, status=201).json['@graph'][0] - workflow_run_awsem_json['input_files'] = [{'workflow_argument_name': 'test_inp', 'value': res['@id']}] - workflow_run_awsem_json['output_files'] = [{'workflow_argument_name': 'test_out', 'value': res['@id']}] - res2 = testapp.post_json('/workflow_run_awsem', workflow_run_awsem_json).json['@graph'][0] - new_file = testapp.patch_json(res['@id'], {'disable_wfr_inputs': True}, status=200).json['@graph'][0] - assert new_file['workflow_run_outputs'][0] == res2['@id'] - assert new_file.get('workflow_run_inputs') == [] - - -def test_experiment_rev_link_on_files(testapp, fastq_json, experiment_data): - res = testapp.post_json('/file_fastq', fastq_json, status=201).json['@graph'][0] - experiment_data['files'] = [res['@id']] - res2 = testapp.post_json('/experiment_hi_c', experiment_data).json['@graph'][0] - new_file = testapp.get(res['@id']).json - assert new_file['experiments'][0]['@id'] == res2['@id'] - - -def test_experiment_rev_link_on_processedfiles(testapp, proc_file_json, experiment_data): - res = testapp.post_json('/file_processed', proc_file_json, status=201).json['@graph'][0] - experiment_data['processed_files'] = [res['@id']] - res2 = testapp.post_json('/experiment_hi_c', experiment_data).json['@graph'][0] - - new_file = testapp.get(res['@id']).json - assert new_file['experiments'][0]['@id'] == res2['@id'] - - -def test_no_experiment_rev_link_on_file_processed_in_files_field( - testapp, proc_file_json, experiment_data): - res = testapp.post_json('/file_processed', proc_file_json, status=201).json['@graph'][0] - experiment_data['files'] = [res['@id']] - res2 = testapp.post_json('/experiment_hi_c', experiment_data).json['@graph'][0] - - new_file = testapp.get(res['@id']).json - assert not new_file['experiments'] - - -def test_experiment_set_rev_link_on_processedfiles(testapp, proc_file_json, rep_set_data): - res = testapp.post_json('/file_processed', proc_file_json, status=201).json['@graph'][0] - rep_set_data['processed_files'] = [res['@id']] - res2 = testapp.post_json('/experiment_set_replicate', rep_set_data).json['@graph'][0] - - new_file = testapp.get(res['@id']).json - assert new_file['experiment_sets'][0]['@id'] == res2['@id'] - - -def test_no_experiment_set_rev_link_on_raw_file(testapp, fastq_json, experiment_data, rep_set_data): - res = testapp.post_json('/file_fastq', fastq_json, status=201).json['@graph'][0] - experiment_data['files'] = [res['@id']] - res2 = testapp.post_json('/experiment_hi_c', experiment_data).json['@graph'][0] - rep_set_data['replicate_exps'] = [ - {'replicate_exp': res2['@id'], - 'bio_rep_no': 1, - 'tec_rep_no': 1 - }] - res3 = testapp.post_json('/experiment_set_replicate', rep_set_data).json['@graph'][0] - - new_file = testapp.get(res['@id']).json - assert new_file['experiments'][0]['@id'] == res2['@id'] - assert 'experiment_sets' not in new_file - - -def test_force_beanstalk_env(): - """ - This test is a bit outdated, since env variable loading has moved to - application __init__ from file.py. But let's keep the test... - """ - secret = os.environ.get("AWS_SECRET_ACCESS_KEY") - key = os.environ.get("AWS_ACCESS_KEY_ID") - os.environ.pop("AWS_SECRET_ACCESS_KEY") - os.environ.pop("AWS_ACCESS_KEY_ID") - - test_cfg = tempfile.NamedTemporaryFile(mode='w', delete=False) - test_cfg.write('export AWS_SECRET_ACCESS_KEY="its a secret"\n') - test_cfg.write('export AWS_ACCESS_KEY_ID="its a secret id"\n') - test_cfg_name = test_cfg.name - test_cfg.close() - - # mock_boto - with mock.patch('encoded.tests.test_file.boto3', autospec=True) as mock_boto: - - source_beanstalk_env_vars(test_cfg_name) - boto3.client('sts', aws_access_key_id=os.environ.get("AWS_ACCESS_KEY_ID"), - aws_secret_access_key=os.environ.get("AWS_SECRET_ACCESS_KEY")) - # reset - os.environ["AWS_SECRET_ACCESS_KEY"] = secret - os.environ["AWS_ACCESS_KEY_ID"] = key - # os.remove(test_cfg.delete) - - # ensure boto called with correct arguments - mock_boto.client.assert_called_once_with('sts', aws_access_key_id='its a secret id', - aws_secret_access_key='its a secret') - - -@pytest.fixture -def processed_file_data(award, lab, file_formats): - return { - 'award': award['@id'], - 'lab': lab['@id'], - 'file_format': file_formats.get('pairs').get('uuid'), - } - - -def test_validate_produced_from_files_no_produced_by_and_filename_no_filename( - testapp, processed_file_data): - res = testapp.post_json('/files-processed', processed_file_data, status=201) - assert not res.json.get('errors') - - -def test_validate_filename_invalid_file_format_post(testapp, processed_file_data): - processed_file_data['file_format'] = 'stringy file format' - processed_file_data['filename'] = 'test_file.pairs.gz' - res = testapp.post_json('/files-processed', processed_file_data, status=422) - errors = res.json['errors'] - descriptions = ''.join([e['description'] for e in errors]) - assert 'Problem getting file_format for test_file.pairs.gz' in descriptions - - -def test_validate_filename_valid_file_format_and_name_post(testapp, processed_file_data): - processed_file_data['filename'] = 'test_file.pairs.gz' - res = testapp.post_json('/files-processed', processed_file_data, status=201) - assert not res.json.get('errors') - - -def test_validate_filename_invalid_filename_post(testapp, processed_file_data): - processed_file_data['filename'] = 'test_file_pairs.gz' - res = testapp.post_json('/files-processed', processed_file_data, status=422) - errors = res.json['errors'] - descriptions = ''.join([e['description'] for e in errors]) - assert "Filename test_file_pairs.gz extension does not agree with specified file format. Valid extension(s): '.pairs.gz'" in descriptions - - -def test_validate_filename_valid_filename_patch(testapp, processed_file_data): - processed_file_data['filename'] = 'test_file1.pairs.gz' - res1 = testapp.post_json('/files-processed', processed_file_data, status=201) - assert not res1.json.get('errors') - res1_props = res1.json['@graph'][0] - assert res1_props['filename'] == 'test_file1.pairs.gz' - filename2patch = 'test_file2.pairs.gz' - res2 = testapp.patch_json(res1_props['@id'], {'filename': filename2patch}, status=200) - assert not res2.json.get('errors') - assert res2.json['@graph'][0]['filename'] == 'test_file2.pairs.gz' - - -def test_validate_filename_invalid_filename_patch(testapp, processed_file_data): - processed_file_data['filename'] = 'test_file1.pairs.gz' - res1 = testapp.post_json('/files-processed', processed_file_data, status=201) - assert not res1.json.get('errors') - res1_props = res1.json['@graph'][0] - assert res1_props['filename'] == 'test_file1.pairs.gz' - filename2patch = 'test_file2.bam' - res2 = testapp.patch_json(res1_props['@id'], {'filename': filename2patch}, status=422) - errors = res2.json['errors'] - descriptions = ''.join([e['description'] for e in errors]) - assert "Filename test_file2.bam extension does not agree with specified file format. Valid extension(s): '.pairs.gz'" in descriptions - - -def test_validate_produced_from_files_invalid_post(testapp, processed_file_data): - fids = ['not_a_file_id', 'definitely_not'] - processed_file_data['produced_from'] = fids - res = testapp.post_json('/files-processed', processed_file_data, status=422) - errors = res.json['errors'] - descriptions = [e['description'] for e in errors] - for fid in fids: - desc = "'%s' not found" % fid - assert desc in descriptions - - -def test_validate_produced_from_files_valid_post(testapp, processed_file_data, file, mcool_file): - processed_file_data['produced_from'] = [file['@id'], mcool_file['@id']] - res = testapp.post_json('/files-processed', processed_file_data, status=201) - assert not res.json.get('errors') - - -def test_validate_produced_from_files_valid_patch(testapp, processed_file_data, file, mcool_file): - res = testapp.post_json('/files-processed', processed_file_data, status=201).json['@graph'][0] - pres = testapp.patch_json(res['@id'], {'produced_from': [file['@id'], mcool_file['@id']]}, status=200) - assert not pres.json.get('errors') - - -def test_validate_extra_files_no_extra_files(testapp, processed_file_data): - res = testapp.post_json('/files-processed', processed_file_data, status=201) - assert not res.json.get('errors') - - -def test_validate_extra_files_extra_files_good_post(testapp, processed_file_data): - extf = {'file_format': 'pairs_px2'} - processed_file_data['extra_files'] = [extf] - res = testapp.post_json('/files-processed', processed_file_data, status=201) - assert not res.json.get('errors') - - -def test_validate_extra_files_extra_files_bad_post_extra_same_as_primary(testapp, processed_file_data): - extf = {'file_format': 'pairs'} - processed_file_data['extra_files'] = [extf] - res = testapp.post_json('/files-processed', processed_file_data, status=422) - assert res.json['errors'][0]['name'] == 'File: invalid extra_file formats' - assert "'pairs' format cannot be the same for file and extra_file" == res.json['errors'][0]['description'] - - -def test_validate_extra_files_extra_files_bad_patch_extra_same_as_primary(testapp, processed_file_data): - extf = {'file_format': 'pairs'} - res1 = testapp.post_json('/files-processed', processed_file_data, status=201) - pfid = res1.json['@graph'][0]['@id'] - res2 = testapp.patch_json(pfid, {'extra_files': [extf]}, status=422) - assert res2.json['errors'][0]['name'] == 'File: invalid extra_file formats' - assert "'pairs' format cannot be the same for file and extra_file" == res2.json['errors'][0]['description'] - - -def test_validate_extra_files_extra_files_bad_post_existing_extra_format(testapp, processed_file_data): - extfs = [{'file_format': 'pairs_px2'}, {'file_format': 'pairs_px2'}] - processed_file_data['extra_files'] = extfs - res = testapp.post_json('/files-processed', processed_file_data, status=422) - assert res.json['errors'][0]['name'] == 'File: invalid extra_file formats' - assert "Multple extra files with 'pairs_px2' format cannot be submitted at the same time" == res.json['errors'][0]['description'] - - -def test_validate_extra_files_extra_files_ok_patch_existing_extra_format(testapp, processed_file_data): - extf = {'file_format': 'pairs_px2'} - processed_file_data['extra_files'] = [extf] - res1 = testapp.post_json('/files-processed', processed_file_data, status=201) - pfid = res1.json['@graph'][0]['@id'] - res2 = testapp.patch_json(pfid, {'extra_files': [extf]}, status=200) - assert not res2.json.get('errors') - - -def test_validate_extra_files_parent_should_not_have_extras( - testapp, processed_file_data, file_formats): - extf = {'file_format': 'pairs_px2'} - processed_file_data['file_format'] = file_formats.get('mcool').get('uuid') - processed_file_data['extra_files'] = [extf] - res1 = testapp.post_json('/files-processed', processed_file_data, status=422) - errors = res1.json['errors'] - descriptions = ''.join([e['description'] for e in errors]) - assert "File with format mcool should not have extra_files" in descriptions - - -def test_validate_extra_files_bad_extras_format( - testapp, processed_file_data, file_formats): - extf = {'file_format': 'whosit'} - processed_file_data['extra_files'] = [extf] - res1 = testapp.post_json('/files-processed', processed_file_data, status=422) - errors = res1.json['errors'] - descriptions = ''.join([e['description'] for e in errors]) - assert "'whosit' not a valid or known file format" in descriptions - - -def test_validate_file_format_validity_for_file_type_allows(testapp, file_formats, award, lab): - my_fastq_file = { - 'award': award['@id'], - 'lab': lab['@id'], - 'file_format': file_formats.get('fastq').get('uuid'), - } - my_proc_file = { - 'award': award['@id'], - 'lab': lab['@id'], - 'file_format': file_formats.get('pairs').get('uuid'), - } - res1 = testapp.post_json('/files-fastq', my_fastq_file, status=201) - res2 = testapp.post_json('/files-processed', my_proc_file, status=201) - assert not res1.json.get('errors') - assert not res2.json.get('errors') - - -def test_validate_file_format_validity_for_file_type_fires(testapp, file_formats, award, lab): - my_fastq_file = { - 'award': award['@id'], - 'lab': lab['@id'], - 'file_format': file_formats.get('pairs').get('uuid'), - } - my_proc_file = { - 'award': award['@id'], - 'lab': lab['@id'], - 'file_format': file_formats.get('fastq').get('uuid'), - } - res1 = testapp.post_json('/files-fastq', my_fastq_file, status=422) - errors = res1.json['errors'] - descriptions = ''.join([e['description'] for e in errors]) - assert "File format pairs is not allowed for FileFastq" in descriptions - res2 = testapp.post_json('/files-processed', my_proc_file, status=422) - errors = res2.json['errors'] - descriptions = ''.join([e['description'] for e in errors]) - assert "File format fastq is not allowed for FileProcessed" in descriptions - - -def test_file_format_does_not_exist(testapp, file_formats, award, lab): - my_fastq_file = { - 'award': award['@id'], - 'lab': lab['@id'], - 'file_format': 'waldo', - } - res1 = testapp.post_json('/files-fastq', my_fastq_file, status=422) - errors = res1.json['errors'] - descriptions = ''.join([e['description'] for e in errors]) - assert "'waldo' not found" in descriptions - - -def test_filename_patch_fails_wrong_format(testapp, file_formats, award, lab): - my_fastq_file = { - 'award': award['@id'], - 'lab': lab['@id'], - 'file_format': file_formats.get('fastq').get('uuid'), - 'filename': 'test.fastq.gz' - } - res1 = testapp.post_json('/files-fastq', my_fastq_file, status=201) - resobj = res1.json['@graph'][0] - patch_data = {"file_format": file_formats.get('pairs').get('uuid')} - res2 = testapp.patch_json('/files-fastq/' + resobj['uuid'], patch_data, status=422) - errors = res2.json['errors'] - error1 = "Filename test.fastq.gz extension does not agree with specified file format. Valid extension(s): '.pairs.gz'" - error2 = "File format pairs is not allowed for FileFastq" - descriptions = ''.join([e['description'] for e in errors]) - assert error1 in descriptions - assert error2 in descriptions - - -def test_filename_patch_works_with_different_format(testapp, file_formats, award, lab): - my_proc_file = { - 'award': award['@id'], - 'lab': lab['@id'], - 'file_format': file_formats.get('pairs').get('uuid'), - 'filename': 'test.pairs.gz' - } - res1 = testapp.post_json('/files-processed', my_proc_file, status=201) - resobj = res1.json['@graph'][0] - patch_data = {"file_format": file_formats.get('bam').get('uuid'), 'filename': 'test.bam'} - res2 = testapp.patch_json('/files-processed/' + resobj['uuid'], patch_data, status=200) - assert not res2.json.get('errors') - - -def test_file_format_patch_works_if_no_filename(testapp, file_formats, award, lab): - my_proc_file = { - 'award': award['@id'], - 'lab': lab['@id'], - 'file_format': file_formats.get('pairs').get('uuid') - } - res1 = testapp.post_json('/files-processed', my_proc_file, status=201) - resobj = res1.json['@graph'][0] - patch_data = {"file_format": file_formats.get('bam').get('uuid')} - res2 = testapp.patch_json('/files-processed/' + resobj['uuid'], patch_data, status=200) - assert not res2.json.get('errors') - - -def test_file_generate_track_title_fp_all_present(testapp, file_formats, award, lab): - pf_file_meta = { - 'award': award['@id'], - 'lab': lab['@id'], - 'file_format': file_formats.get('mcool').get('uuid'), - 'override_experiment_type': 'DNase Hi-C', - 'override_lab_name': 'Test Lab', - 'file_type': 'normalized counts', - 'override_assay_info': 'PARK1', - 'override_biosource_name': 'GM12878', - 'override_replicate_info': 'Biorep 1, Techrep 1', - 'override_experiment_bucket': 'processed file', - 'higlass_uid': 'test_hg_uid' - } - res1 = testapp.post_json('/files-processed', pf_file_meta, status=201) - pf = res1.json.get('@graph')[0] - assert pf.get('track_and_facet_info', {}).get('track_title') == 'normalized counts for GM12878 DNase Hi-C PARK1' - - -def test_file_generate_track_title_w_override_title_all_present(testapp, file_formats, award, lab): - pf_file_meta = { - 'award': award['@id'], - 'lab': lab['@id'], - 'file_format': file_formats.get('mcool').get('uuid'), - 'override_experiment_type': 'DNase Hi-C', - 'override_lab_name': 'Test Lab', - 'file_type': 'normalized counts', - 'override_assay_info': 'PARK1', - 'override_biosource_name': 'GM12878', - 'override_replicate_info': 'Biorep 1, Techrep 1', - 'override_experiment_bucket': 'processed file', - 'override_track_title': 'my test track title', - 'higlass_uid': 'test_hg_uid' - } - res1 = testapp.post_json('/files-processed', pf_file_meta, status=201) - pf = res1.json.get('@graph')[0] - assert pf.get('track_and_facet_info', {}).get('track_title') == 'my test track title' - - -def test_file_generate_track_title_w_override_title_all_missing(testapp, file_formats, award, lab): - pf_file_meta = { - 'award': award['@id'], - 'lab': lab['@id'], - 'file_format': file_formats.get('mcool').get('uuid'), - 'file_type': 'normalized counts', - 'override_track_title': 'my test track title', - 'higlass_uid': 'test_hg_uid' - } - res1 = testapp.post_json('/files-processed', pf_file_meta, status=201) - pf = res1.json.get('@graph')[0] - assert pf.get('track_and_facet_info', {}).get('track_title') == 'my test track title' - - -def test_file_generate_track_title_fp_all_missing(testapp, file_formats, award, lab): - pf_file_meta = { - 'award': award['@id'], - 'lab': lab['@id'], - 'file_format': file_formats.get('mcool').get('uuid'), - 'lab': lab['@id'], - 'higlass_uid': 'test_hg_uid' - } - res1 = testapp.post_json('/files-processed', pf_file_meta, status=201) - pf = res1.json.get('@graph')[0] - assert pf.get('track_and_facet_info', {}).get('track_title') is None - - -def test_file_generate_track_title_fp_most_missing(testapp, file_formats, award, lab): - pf_file_meta = { - 'award': award['@id'], - 'lab': lab['@id'], - 'file_format': file_formats.get('mcool').get('uuid'), - 'lab': lab['@id'], - 'override_experiment_type': 'DNase Hi-C', - 'higlass_uid': 'test_hg_uid' - } - res1 = testapp.post_json('/files-processed', pf_file_meta, status=201) - pf = res1.json.get('@graph')[0] - assert pf.get('track_and_facet_info', {}).get('track_title') == 'unspecified type for unknown sample DNase Hi-C' - - -def test_file_generate_track_title_fvis(testapp, file_formats, award, lab, GM12878_biosource): - vistrack_meta = { - 'award': award['@id'], - 'lab': lab['@id'], - 'file_format': file_formats.get('mcool').get('uuid'), - 'override_experiment_type': 'DNase Hi-C', - 'lab': lab['@id'], - 'file_type': 'fold change over control', - 'override_lab_name': 'Some Dude, Somewhere', - 'override_assay_info': 'PARK1', - 'biosource': GM12878_biosource['@id'], - 'override_replicate_info': 'bio1 tec1', - 'higlass_uid': 'test_hg_uid' - } - res1 = testapp.post_json('/files-vistrack', vistrack_meta) - vt = res1.json.get('@graph')[0] - assert vt.get('track_and_facet_info', {}).get('track_title') == 'fold change over control for GM12878 DNase Hi-C PARK1' - - -@pytest.fixture -def custom_experiment_set_data(lab, award): - return { - 'lab': lab['@id'], - 'award': award['@id'], - 'description': 'test experiment set', - 'experimentset_type': 'custom', - 'status': 'in review by lab' - } - - -def test_track_and_file_facet_info_no_link_to_exp_or_eset(testapp, proc_file_json): - # should only have lab_name - res = testapp.post_json('/file_processed', proc_file_json, status=201).json['@graph'][0] - tf_info = res.get('track_and_facet_info') - assert 'lab_name' in tf_info - assert len(tf_info) == 1 - - -def test_track_and_file_facet_info_file_link_to_multi_expts( - testapp, proc_file_json, experiment_data): - pfile = testapp.post_json('/file_processed', proc_file_json, status=201).json['@graph'][0] - experiment_data['processed_files'] = [pfile['@id']] - expt1 = testapp.post_json('/experiment_hi_c', experiment_data, status=201).json['@graph'][0] - expt2 = testapp.post_json('/experiment_hi_c', experiment_data, status=201).json['@graph'][0] - assert pfile['@id'] in expt1['processed_files'] - assert pfile['@id'] in expt2['processed_files'] - res = testapp.get(pfile['@id']).json - tf_info = res.get('track_and_facet_info') - assert 'lab_name' in tf_info - assert len(tf_info) == 1 - - -def test_track_and_file_facet_info_file_link_to_expt_w_cat_rep_type_pfbucket( - testapp, proc_file_json, experiment_data): - pfile = testapp.post_json('/file_processed', proc_file_json, status=201).json['@graph'][0] - experiment_data['processed_files'] = [pfile['@id']] - testapp.post_json('/experiment_hi_c', experiment_data, status=201) - res = testapp.get(pfile['@id']).json - tf_info = res.get('track_and_facet_info') - assert tf_info['experiment_type'] == 'in situ Hi-C' - assert tf_info['experiment_bucket'] == 'processed file' - assert tf_info['assay_info'] == 'MboI' - - -def test_track_and_file_facet_info_file_link_to_expt_opfbucket( - testapp, proc_file_json, experiment_data): - pfile = testapp.post_json('/file_processed', proc_file_json, status=201).json['@graph'][0] - experiment_data['other_processed_files'] = [{'title': 'some other files', 'type': 'supplementary', 'files': [pfile['@id']]}] - testapp.post_json('/experiment_hi_c', experiment_data, status=201) - res = testapp.get(pfile['@id']).json - tf_info = res.get('track_and_facet_info') - assert tf_info['experiment_type'] == 'in situ Hi-C' - assert tf_info['experiment_bucket'] == 'some other files' - - -def test_track_and_file_facet_info_file_link_to_expt_pf_and_opf_buckets( - testapp, proc_file_json, experiment_data): - pfile = testapp.post_json('/file_processed', proc_file_json, status=201).json['@graph'][0] - experiment_data['processed_files'] = [pfile['@id']] - experiment_data['other_processed_files'] = [{'title': 'some other files', 'type': 'supplementary', 'files': [pfile['@id']]}] - testapp.post_json('/experiment_hi_c', experiment_data, status=201) - res = testapp.get(pfile['@id']).json - tf_info = res.get('track_and_facet_info') - assert tf_info['experiment_type'] == 'in situ Hi-C' - assert tf_info['experiment_bucket'] == 'processed file' - - -def test_track_and_file_facet_info_file_link_to_expt_w_rep( - testapp, proc_file_json, experiment_data, rep_set_data): - pfile = testapp.post_json('/file_processed', proc_file_json, status=201).json['@graph'][0] - experiment_data['processed_files'] = [pfile['@id']] - expt = testapp.post_json('/experiment_hi_c', experiment_data, status=201).json['@graph'][0] - rep_set_data['replicate_exps'] = [{'bio_rep_no': 1, 'tec_rep_no': 1, 'replicate_exp': expt['@id']}] - testapp.post_json('/experiment_set_replicate', rep_set_data, status=201) - res = testapp.get(pfile['@id']).json - tf_info = res.get('track_and_facet_info') - assert tf_info['experiment_type'] == 'in situ Hi-C' - assert tf_info['replicate_info'] == 'Biorep 1, Techrep 1' - - -def test_track_and_file_facet_info_file_link_to_expt_w_rep_and_custom_eset( - testapp, proc_file_json, experiment_data, rep_set_data, custom_experiment_set_data): - pfile = testapp.post_json('/file_processed', proc_file_json, status=201).json['@graph'][0] - experiment_data['processed_files'] = [pfile['@id']] - expt = testapp.post_json('/experiment_hi_c', experiment_data, status=201).json['@graph'][0] - rep_set_data['replicate_exps'] = [{'bio_rep_no': 1, 'tec_rep_no': 1, 'replicate_exp': expt['@id']}] - testapp.post_json('/experiment_set_replicate', rep_set_data, status=201) - custom_experiment_set_data['experiments_in_set'] = [expt['@id']] - testapp.post_json('/experiment_set', custom_experiment_set_data, status=201) - res = testapp.get(pfile['@id']).json - tf_info = res.get('track_and_facet_info') - assert tf_info['experiment_type'] == 'in situ Hi-C' - assert tf_info['replicate_info'] == 'Biorep 1, Techrep 1' - - -def test_track_and_file_facet_info_file_link_to_expt_no_cat_or_rep( - testapp, proc_file_json, experiment_data, exp_types): - pfile = testapp.post_json('/file_processed', proc_file_json, status=201).json['@graph'][0] - experiment_data['experiment_type'] = exp_types['rnaseq']['@id'] - experiment_data['processed_files'] = [pfile['@id']] - del experiment_data['digestion_enzyme'] - testapp.post_json('/experiment_seq', experiment_data, status=201) - res = testapp.get(pfile['@id']).json - tf_info = res.get('track_and_facet_info') - assert tf_info['experiment_type'] == 'RNA-seq' - assert 'assay_info' not in tf_info - assert 'replicate_info' not in tf_info - - -def test_track_and_file_facet_info_file_link_to_expt_biosample_cell( - testapp, proc_file_json, experiment_data): - pfile = testapp.post_json('/file_processed', proc_file_json, status=201).json['@graph'][0] - experiment_data['processed_files'] = [pfile['@id']] - testapp.post_json('/experiment_hi_c', experiment_data, status=201) - res = testapp.get(pfile['@id']).json - tf_info = res.get('track_and_facet_info') - assert tf_info['experiment_type'] == 'in situ Hi-C' - assert tf_info['biosource_name'] == 'GM12878' - - -def test_track_and_file_facet_info_file_link_to_expt_biosample_tissue( - testapp, proc_file_json, experiment_data, tissue_biosample, lung_oterm): - pfile = testapp.post_json('/file_processed', proc_file_json, status=201).json['@graph'][0] - experiment_data['biosample'] = tissue_biosample['@id'] - experiment_data['processed_files'] = [pfile['@id']] - testapp.post_json('/experiment_hi_c', experiment_data, status=201) - res = testapp.get(pfile['@id']).json - tf_info = res.get('track_and_facet_info') - assert tf_info['experiment_type'] == 'in situ Hi-C' - assert tf_info['biosource_name'] == lung_oterm.get('preferred_name') - - -def test_track_and_file_facet_info_file_fastq_link_to_expt( - testapp, file_fastq, experiment_data): - experiment_data['files'] = [file_fastq['@id']] - testapp.post_json('/experiment_hi_c', experiment_data, status=201) - res = testapp.get(file_fastq['@id']).json - tf_info = res.get('track_and_facet_info') - assert tf_info['experiment_bucket'] == 'raw file' - - -def test_track_and_file_facet_info_file_link_to_multi_repsets( - testapp, proc_file_json, rep_set_data): - pfile = testapp.post_json('/file_processed', proc_file_json, status=201).json['@graph'][0] - rep_set_data['processed_files'] = [pfile['@id']] - repset1 = testapp.post_json('/experiment_set_replicate', rep_set_data, status=201).json['@graph'][0] - repset2 = testapp.post_json('/experiment_set_replicate', rep_set_data, status=201).json['@graph'][0] - assert pfile['@id'] in repset1['processed_files'] - assert pfile['@id'] in repset2['processed_files'] - res = testapp.get(pfile['@id']).json - tf_info = res.get('track_and_facet_info') - assert 'lab_name' in tf_info - assert len(tf_info) == 1 - - -def test_track_and_file_facet_info_file_link_to_repset_w_one_expt( - testapp, proc_file_json, rep_set_data, experiment_data): - pfile = testapp.post_json('/file_processed', proc_file_json, status=201).json['@graph'][0] - expt = testapp.post_json('/experiment_hi_c', experiment_data, status=201).json['@graph'][0] - rep_set_data['processed_files'] = [pfile['@id']] - rep_set_data['replicate_exps'] = [{'bio_rep_no': 1, 'tec_rep_no': 1, 'replicate_exp': expt['@id']}] - testapp.post_json('/experiment_set_replicate', rep_set_data, status=201) - res = testapp.get(pfile['@id']).json - tf_info = res.get('track_and_facet_info') - assert tf_info['experiment_type'] == 'in situ Hi-C' - assert tf_info['experiment_bucket'] == 'processed file' - assert tf_info['assay_info'] == 'MboI' - assert tf_info['biosource_name'] == 'GM12878' - assert tf_info['replicate_info'] == 'unreplicated' - - -def test_track_and_file_facet_info_file_link_to_repset_w_multi_expt_and_opf( - testapp, proc_file_json, rep_set_data, experiment_data): - pfile = testapp.post_json('/file_processed', proc_file_json, status=201).json['@graph'][0] - del proc_file_json['accession'] - del proc_file_json['md5sum'] - opf = testapp.post_json('/file_processed', proc_file_json, status=201).json['@graph'][0] - expt1 = testapp.post_json('/experiment_hi_c', experiment_data, status=201).json['@graph'][0] - expt2 = testapp.post_json('/experiment_hi_c', experiment_data, status=201).json['@graph'][0] - rep_set_data['other_processed_files'] = [{'title': 'some other files', 'type': 'supplementary', 'files': [opf['@id'], pfile['@id']]}] - rep_set_data['replicate_exps'] = [{'bio_rep_no': 1, 'tec_rep_no': 1, 'replicate_exp': expt1['@id']}, - {'bio_rep_no': 1, 'tec_rep_no': 2, 'replicate_exp': expt2['@id']}] - testapp.post_json('/experiment_set_replicate', rep_set_data, status=201) - res = testapp.get(pfile['@id']).json - tf_info = res.get('track_and_facet_info') - assert tf_info['experiment_bucket'] == 'some other files' - assert tf_info['replicate_info'] == 'merged replicates' - - -def test_track_and_file_facet_info_file_w_all_override_fields( - testapp, proc_file_json, experiment_data): - overrides = { - 'override_lab_name': 'awesome lab', - 'override_experiment_type': 'TRIP', - 'override_biosource_name': 'some cell', - 'override_assay_info': 'cold', - 'override_replicate_info': 'replicated lots', - 'override_experiment_bucket': 'important files' - } - proc_file_json.update(overrides) - pfile = testapp.post_json('/file_processed', proc_file_json, status=201).json['@graph'][0] - tf_info = pfile.get('track_and_facet_info') - for k, v in overrides.items(): - tf = k.replace('override_', '', 1) - assert tf_info[tf] == v - # make sure it doesn't change - experiment_data['processed_files'] = [pfile['@id']] - testapp.post_json('/experiment_hi_c', experiment_data, status=201) - res = testapp.get(pfile['@id']).json - tf_info2 = res.get('track_and_facet_info') - for k, v in overrides.items(): - tf = k.replace('override_', '', 1) - assert tf_info2[tf] == v - - -def test_track_and_file_facet_info_file_vistrack_w_all_override_fields( - testapp, proc_file_json, experiment_data, file_formats): - overrides = { - 'override_lab_name': 'awesome lab', - 'override_experiment_type': 'TRIP', - 'override_assay_info': 'cold', - 'override_replicate_info': 'replicated lots', - 'override_experiment_bucket': 'important files' - } - proc_file_json.update(overrides) - proc_file_json['file_format'] = file_formats.get('bw').get('uuid') - proc_file_json['filename'] = 'test.bw' - pfile = testapp.post_json('/file_vistrack', proc_file_json, status=201).json['@graph'][0] - tf_info = pfile.get('track_and_facet_info') - for k, v in overrides.items(): - tf = k.replace('override_', '', 1) - assert tf_info[tf] == v - - -def test_track_and_file_facet_info_file_w_some_override_fields( - testapp, proc_file_json, experiment_data): - overrides = { - 'override_experiment_type': 'TRIP', - 'override_biosource_name': 'some cell', - 'override_assay_info': 'cold', - 'override_replicate_info': 'replicated lots', - } - proc_file_json.update(overrides) - pfile = testapp.post_json('/file_processed', proc_file_json, status=201).json['@graph'][0] - tf_info = pfile.get('track_and_facet_info') - assert len(tf_info) == 5 # lab will get calculated since expt_type exists - for k, v in overrides.items(): - tf = k.replace('override_', '', 1) - assert tf_info[tf] == v - # make sure it doesn't change - experiment_data['processed_files'] = [pfile['@id']] - testapp.post_json('/experiment_hi_c', experiment_data, status=201) - res = testapp.get(pfile['@id']).json - tf_info2 = res.get('track_and_facet_info') - for k, v in overrides.items(): - tf = k.replace('override_', '', 1) - assert tf_info2[tf] == v - assert tf_info2['experiment_type'] == 'TRIP' - assert tf_info2['lab_name'] == 'ENCODE lab' - assert tf_info2['experiment_bucket'] == 'processed file' - - -def test_track_and_file_facet_info_file_patch_override_fields( - testapp, proc_file_json, experiment_data): - pfile = testapp.post_json('/file_processed', proc_file_json, status=201).json['@graph'][0] - experiment_data['processed_files'] = [pfile['@id']] - testapp.post_json('/experiment_hi_c', experiment_data, status=201) - res = testapp.get(pfile['@id']).json - tf_info = res.get('track_and_facet_info') - assert tf_info['experiment_type'] == 'in situ Hi-C' - # make sure it does change - testapp.patch_json(pfile['@id'], {'override_experiment_type': 'new type'}, status=200) - res2 = testapp.get(pfile['@id']).json - tf_info = res2.get('track_and_facet_info') - assert tf_info['experiment_type'] == 'new type' - - -@pytest.fixture -def dcic_lab(testapp, award): - item = { - 'name': '4dn-dcic-lab', - 'title': '4DN DCIC, HMS', - 'status': 'current', - 'awards': [award['@id']], - 'uuid': '9780e6b5-507c-4287-b817-103e784587c8' - } - return testapp.post_json('/lab', item).json['@graph'][0] - - -def test_track_and_file_facet_info_pf_attr_2_dcic_w_expt_lab( - testapp, proc_file_json, experiment_data, lab, dcic_lab): - """ case where experiment is from a lab and file generated by - dcic pipe - dcic is lab for file and encode is contributing lab - expectation is tfi.lab_name is dcic lab and tfi.experimental_lab is encode - """ - experiment_data['lab'] = lab.get('uuid') - proc_file_json['lab'] = dcic_lab.get('uuid') - proc_file_json['contributing_labs'] = [lab.get('uuid')] - pf = testapp.post_json('/file_processed', proc_file_json).json['@graph'][0] - experiment_data['processed_files'] = [pf['@id']] - testapp.post_json('/experiment_hi_c', experiment_data, status=201) - pf2 = testapp.get(pf['@id']).json - assert pf2.get('track_and_facet_info').get('experimental_lab') == lab.get('display_title') - assert pf2.get('track_and_facet_info').get('lab_name') == dcic_lab.get('display_title') - - -def test_track_and_file_facet_info_pf_attr_2_lab_w_diff_expt_lab( - testapp, proc_file_json, experiment_data, lab, another_lab): - """ case where experiment is from a lab and file is submitted by a different labs - file - encode is experimental lab and another lab is file lab - """ - experiment_data['lab'] = lab.get('uuid') - proc_file_json['lab'] = another_lab.get('uuid') - pf = testapp.post_json('/file_processed', proc_file_json).json['@graph'][0] - experiment_data['processed_files'] = [pf['@id']] - testapp.post_json('/experiment_hi_c', experiment_data, status=201) - pf2 = testapp.get(pf['@id']).json - assert pf2.get('track_and_facet_info').get('experimental_lab') == lab.get('display_title') - assert pf2.get('track_and_facet_info').get('lab_name') == another_lab.get('display_title') - - -def test_track_and_file_facet_info_pf_of_repset_attr_2_lab_w_diff_expt_lab( - testapp, proc_file_json, rep_set_data, lab, another_lab): - """ case where file is linked to replicate set and is from a lab and file is submitted - by a different lab - encode is experimental lab and another lab is file lab - """ - rep_set_data['lab'] = lab.get('uuid') - proc_file_json['lab'] = another_lab.get('uuid') - pf = testapp.post_json('/file_processed', proc_file_json).json['@graph'][0] - rep_set_data['processed_files'] = [pf['@id']] - testapp.post_json('/experiment_set_replicate', rep_set_data, status=201) - pf2 = testapp.get(pf['@id']).json - assert pf2.get('track_and_facet_info').get('experimental_lab') == lab.get('display_title') - assert pf2.get('track_and_facet_info').get('lab_name') == another_lab.get('display_title') - - -def test_track_and_file_facet_info_pf_of_repset_attr_2_dcic_w_diff_expt_lab( - testapp, proc_file_json, rep_set_data, lab, dcic_lab, another_lab): - """ case where file is linked to replicate set and is generated by a dcic pipe - - encode is experimental lab and dcic-lab is file lab also added another lab - to contributing labs to make sure that doesn't interfere with selection - """ - rep_set_data['lab'] = lab.get('uuid') - proc_file_json['lab'] = dcic_lab.get('uuid') - proc_file_json['contributing_labs'] = [lab.get('uuid'), another_lab.get('uuid')] - pf = testapp.post_json('/file_processed', proc_file_json).json['@graph'][0] - rep_set_data['processed_files'] = [pf['@id']] - testapp.post_json('/experiment_set_replicate', rep_set_data, status=201) - pf2 = testapp.get(pf['@id']).json - assert pf2.get('track_and_facet_info').get('experimental_lab') == lab.get('display_title') - assert pf2.get('track_and_facet_info').get('lab_name') == dcic_lab.get('display_title') - - -def test_track_and_file_facet_info_pf_of_expt_from_same_lab_as_expt( - testapp, proc_file_json, experiment_data, lab, another_lab -): - """ case where file is linked to expt and is generated by same lab as experimental lab - - add contributing labs to make sure that doesn't interfere with selection - """ - proc_file_json['lab'] = another_lab['@id'] - proc_file_json['contributing_labs'] = [lab['@id']] - pf = testapp.post_json('/file_processed', proc_file_json).json['@graph'][0] - experiment_data['lab'] = another_lab['@id'] - experiment_data['processed_files'] = [pf.get('@id')] - testapp.post_json('/experiment_hi_c', experiment_data, status=201) - pfres = testapp.get(pf['@id']).json - assert pfres.get('track_and_facet_info').get('experimental_lab') == pfres.get('track_and_facet_info').get('lab_name') == another_lab.get('display_title') - - -def test_track_and_file_facet_info_pf_dcic( - testapp, proc_file_json, dcic_lab -): - """ case where file is not yet linked to experiment or replicate - generated by dcic - """ - proc_file_json['lab'] = dcic_lab['@id'] - del proc_file_json['contributing_labs'] - pf = testapp.post_json('/file_processed', proc_file_json).json['@graph'][0] - tf_info = pf.get('track_and_facet_info') - assert len(tf_info) == 1 - assert tf_info.get('lab_name') == dcic_lab.get('display_title') - - -def test_track_and_file_facet_info_file_link_to_expt_opf_attribution( - testapp, proc_file_json, experiment_data, lab, another_lab): - """ add file of other processed files for an experiment from a different lab - add confirm experimental_lab is assigned correctly - """ - pfile = testapp.post_json('/file_processed', proc_file_json, status=201).json['@graph'][0] - experiment_data['lab'] = another_lab['@id'] - experiment_data['other_processed_files'] = [{'title': 'some other files', 'type': 'supplementary', 'files': [pfile['@id']]}] - testapp.post_json('/experiment_hi_c', experiment_data, status=201) - res = testapp.get(pfile['@id']).json - tf_info = res.get('track_and_facet_info') - assert tf_info.get('lab_name') == lab.get('display_title') - assert tf_info.get('experimental_lab') == another_lab.get('display_title') - - -def test_pairs_file_qc_tsv_link(testapp, pairs_file_json): - res = testapp.post_json('/file_processed', pairs_file_json).json['@graph'][0] - assert res['pairsqc_table'].endswith('%s.plot_table.out' % res['accession']) - - -def test_notes_to_tsv_field(testapp, test_file_tsv_notes_field): - tv = {'integer_read_ids': ['integer_read_ids'], 'integer_read_ids,low_quality': ['integer_read_ids', 'low_quality'], '': []} - for result, test_value in tv.items(): - test_file_tsv_notes_field['notes_to_tsv'] = test_value - pfile = testapp.post_json('/file_fastq', test_file_tsv_notes_field).json - assert pfile['@graph'][0]['tsv_notes'] == result - - -@pytest.fixture -def file_dbxrefs(testapp, fastq_json): - item = fastq_json.copy() - item['dbxrefs'] = ['ENA:SRR10002120'] - res = testapp.post_json('/file_fastq', item) - return res.json['@graph'][0] - - -def test_external_references(file_dbxrefs): - assert file_dbxrefs['external_references'][0]['uri'] == 'https://www.ebi.ac.uk/ena/browser/view/SRR10002120' diff --git a/src/encoded/tests/test_file_drs.py b/src/encoded/tests/test_file_drs.py deleted file mode 100644 index 9ce8412651..0000000000 --- a/src/encoded/tests/test_file_drs.py +++ /dev/null @@ -1,76 +0,0 @@ -import pytest - - -pytestmark = [pytest.mark.setone, pytest.mark.working] - - -DRS_PREFIX = f'/ga4gh/drs/v1/objects' - - -@pytest.fixture -def mcool_file_json(award, experiment, lab, file_formats): - """ Duplicating fixture since these live in another file that is not shared """ - item = { - 'award': award['@id'], - 'lab': lab['@id'], - 'file_format': file_formats.get('mcool').get('uuid'), - 'md5sum': '00000000000000000000000000000000', - 'content_md5sum': '00000000000000000000000000000000', - 'filename': 'my.cool.mcool', - 'status': 'uploaded', - } - return item - - -@pytest.fixture -def file(testapp, award, experiment, lab, file_formats): - """ Duplicating fixture since these live in another file that is not shared """ - item = { - 'award': award['@id'], - 'lab': lab['@id'], - 'file_format': file_formats.get('fastq').get('uuid'), - 'md5sum': '00000000000000000000000000000000', - 'content_md5sum': '00000000000000000000000000000000', - 'filename': 'my.fastq.gz', - 'status': 'uploaded', - } - res = testapp.post_json('/file_fastq', item) - return res.json['@graph'][0] - - -def validate_drs_conversion(drs_obj, meta, uri=None): - """ Validates drs object structure against the metadata in the db """ - assert drs_obj['id'] == meta['@id'] - assert drs_obj['created_time'] == meta['date_created'] - assert drs_obj['drs_id'] == meta['accession'] - assert drs_obj['self_uri'] == f'drs://localhost:80{meta["@id"]}@@drs' if not uri else uri - assert drs_obj['version'] == meta['md5sum'] - assert drs_obj['name'] == meta['filename'] - assert drs_obj['aliases'] == [meta['uuid']] - - -def test_processed_file_drs_view(testapp, mcool_file_json): - """ Tests that processed mcool gives a valid DRS response """ - meta = testapp.post_json('/file_processed', mcool_file_json).json['@graph'][0] - drs_meta = testapp.get(meta['@id'] + '@@drs').json - validate_drs_conversion(drs_meta, meta) - drs_meta = testapp.get(f'{DRS_PREFIX}/{meta["uuid"]}').json - validate_drs_conversion(drs_meta, meta, uri=f'{DRS_PREFIX}/{meta["uuid"]}') - - -def test_fastq_file_drs_view(testapp, file): - """ Tests that a fastq file has valid DRS response """ - drs_meta = testapp.get(file['@id'] + '@@drs').json - validate_drs_conversion(drs_meta, file) - drs_meta = testapp.get(f'{DRS_PREFIX}/{file["uuid"]}').json - validate_drs_conversion(drs_meta, file, uri=f'{DRS_PREFIX}/{file["uuid"]}') - - -def test_fastq_file_drs_access(testapp, file): - """ Tests that access URLs are retrieved successfully """ - drs_meta = testapp.get(file['@id'] + '@@drs').json - drs_object_uri = drs_meta['drs_id'] - drs_object_download = testapp.get(f'/ga4gh/drs/v1/objects/{drs_object_uri}/access/').json - assert drs_object_download == { - 'url': f'https://localhost:80/{drs_object_uri}/@@download' - } diff --git a/src/encoded/tests/test_fixtures.py b/src/encoded/tests/test_fixtures.py deleted file mode 100644 index cb130cd670..0000000000 --- a/src/encoded/tests/test_fixtures.py +++ /dev/null @@ -1,138 +0,0 @@ -import pytest -import webtest - -from unittest import mock - -from ..tests import datafixtures - - -pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema] - - -@pytest.yield_fixture -def minitestdata(app, conn): - tx = conn.begin_nested() - - environ = { - 'HTTP_ACCEPT': 'application/json', - 'REMOTE_USER': 'TEST', - } - testapp = webtest.TestApp(app, environ) - - item = { - 'name': 'human', - 'scientific_name': 'Homo sapiens', - 'taxon_id': '9606', - } - testapp.post_json('/organism', item, status=201) - - yield - tx.rollback() - - -@pytest.yield_fixture -def minitestdata2(app, conn): - tx = conn.begin_nested() - - environ = { - 'HTTP_ACCEPT': 'application/json', - 'REMOTE_USER': 'TEST', - } - testapp = webtest.TestApp(app, environ) - - item = { - 'name': 'human', - 'scientific_name': 'Homo sapiens', - 'taxon_id': '9606', - } - testapp.post_json('/organism', item, status=201) - - yield - tx.rollback() - - -@pytest.mark.usefixtures('minitestdata') -def test_fixtures1(testapp): - """ This test is not really exhaustive. - - Still need to inspect the sql log to verify fixture correctness. - """ - res = testapp.get('/organism').maybe_follow() - items = res.json['@graph'] - assert len(items) == 1 - - # Trigger an error - item = {'foo': 'bar'} - res = testapp.post_json('/organism', item, status=422) - assert res.json['errors'] - - res = testapp.get('/organism').maybe_follow() - items = res.json['@graph'] - assert len(items) == 1 - - item = { - 'name': 'mouse', - 'scientific_name': 'Mus musculus', - 'taxon_id': '10090', - } - testapp.post_json('/organism', item, status=201) - - res = testapp.get('/organism').maybe_follow() - items = res.json['@graph'] - assert len(items) == 2 - - # Trigger an error - item = {'foo': 'bar'} - res = testapp.post_json('/organism', item, status=422) - assert res.json['errors'] - - res = testapp.get('/organism').maybe_follow() - items = res.json['@graph'] - assert len(items) == 2 - - -def test_fixtures2(minitestdata2, testapp): - # http://stackoverflow.com/questions/15775601/mutually-exclusive-fixtures - res = testapp.get('/organisms/') - items = res.json['@graph'] - assert len(items) == 1 - - -@pytest.mark.skip # not clear this has been working for some time -def test_order_complete(app, conn): - # TODO: This could use a doc string or comment. -kent & eric 29-Jun-2020 - print("original datafixtures.ORDER =", datafixtures.ORDER) - print("original len(datafixtures.ORDER) =", len(datafixtures.ORDER)) - assert "access_key" not in datafixtures.ORDER - order_for_testing = datafixtures.ORDER + ["access_key"] - with mock.patch.object(datafixtures, "ORDER", order_for_testing): - print("mocked datafixtures.ORDER =", datafixtures.ORDER) - print("len(mocked datafixtures.ORDER) =", len(datafixtures.ORDER)) - assert "access_key" in datafixtures.ORDER - ORDER = datafixtures.ORDER - environ = { - 'HTTP_ACCEPT': 'application/json', - 'REMOTE_USER': 'TEST', - } - testapp = webtest.TestApp(app, environ) - master_types = [] - profiles = testapp.get('/profiles/?frame=raw').json - for a_type in profiles: - if profiles[a_type].get('id') and profiles[a_type]['isAbstract'] is False: - schema_name = profiles[a_type]['id'].split('/')[-1][:-5] - master_types.append(schema_name) - print(ORDER) - print(master_types) - print(len(ORDER)) - print(len(master_types)) - - missing_types = [i for i in master_types if i not in ORDER] - extra_types = [i for i in ORDER if i not in master_types] - print(missing_types) - print(extra_types) - - assert missing_types == [] - assert extra_types == [] - print("restored datafixtures.ORDER =", datafixtures.ORDER) - print("restored len(datafixtures.ORDER) =", len(datafixtures.ORDER)) - assert "access_key" not in datafixtures.ORDER diff --git a/src/encoded/tests/test_fourfront_submission.py b/src/encoded/tests/test_fourfront_submission.py deleted file mode 100644 index fb71dd9916..0000000000 --- a/src/encoded/tests/test_fourfront_submission.py +++ /dev/null @@ -1,14 +0,0 @@ -import pytest - - -pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema] - -# test that the right fields are in place for metadata submission on FF. -# specifically, a valid submits_for lab and valid award -def test_user_with_submitter(testapp, submitter): - assert len(submitter['submits_for']) > 0 - lab = submitter['submits_for'][0]['@id'] - lab_res = testapp.get(lab, status=200) - assert len(lab_res.json['awards']) > 0 - award = lab_res.json['awards'][0]['@id'] - testapp.get(award, status=200) diff --git a/src/encoded/tests/test_generate_ontology.py b/src/encoded/tests/test_generate_ontology.py deleted file mode 100644 index e2f7f859f1..0000000000 --- a/src/encoded/tests/test_generate_ontology.py +++ /dev/null @@ -1,1375 +0,0 @@ -import json -import os -import pytest - -from collections import OrderedDict -from rdflib import URIRef -from unittest import mock -from ..commands import generate_ontology as go -from ..commands.owltools import Owler - - -pytestmark = [pytest.mark.setone, pytest.mark.working] - - -def test_parse_args_defaults(): - args = [] - args = go.parse_args(args) - assert args.ontology == 'all' - assert args.env == 'data' - assert args.simple is False - assert args.full is False - assert args.pretty is False - assert args.phase is False - assert args.owlfile is None - assert args.keyfile == "{}/keypairs.json".format(os.path.expanduser('~')) - assert args.key is None - assert args.keyname == 'default' - - -@pytest.fixture -def connection(): - return { - "server": "https://data.4dnucleome.org/", - "key": "testkey", - "secret": "testsecret" - } - - -@pytest.fixture -def slim_terms(): - return [ - { - "uuid": "111119bc-8535-4448-903e-854af460a233", - "term_name": "ectoderm", - "term_id": "UBERON:0000924", - "is_slim_for": "developmental", - }, - { - "uuid": "111122bc-8535-4448-903e-854af460a233", - "preferred_name": "3D chromatin structure", - "term_name": "chromosome conformation identification objective", - "term_id": "OBI:0001917", - "is_slim_for": "assay" - } - ] - - -def test_connect2server(connection): - # parameters we pass in don't really matter - key = "{'server': 'https://data.4dnucleome.org/', 'key': 'testkey', 'secret': 'testsecret'}" - with mock.patch('encoded.commands.generate_ontology.get_authentication_with_server', return_value=connection): - retval = go.connect2server(None, key) - assert retval == connection - - -# see ontology schema for full schema -# now synonym_terms and definition_terms are fully embedded -all_ontology = [ - { - 'download_url': 'http://www.ebi.ac.uk/efo/efo_inferred.owl', - 'synonym_terms': [ - '/ontology-terms/111111bc-8535-4448-903e-854af460a233/', - '/ontology-terms/111112bc-8535-4448-903e-854af460a233/'], - '@id': '/ontologys/530006bc-8535-4448-903e-854af460b254/', - '@type': ['Ontology', 'Item'], - 'definition_terms': [ - '/ontology-terms/111115bc-8535-4448-903e-854af460a233/', - '/ontology-terms/111116bc-8535-4448-903e-854af460a233/'], - 'namespace_url': 'http://www.ebi.ac.uk/efo/', - 'ontology_prefix': 'EFO', - 'uuid': '530006bc-8535-4448-903e-854af460b254', - 'ontology_name': 'Experimental Factor Ontology' - }, - { - 'ontology_name': 'Uberon', - '@type': ['Ontology', 'Item'], - 'ontology_prefix': 'UBERON', - 'namespace_url': 'http://purl.obolibrary.org/obo/', - 'download_url': 'http://purl.obolibrary.org/obo/uberon/composite-metazoan.owl', - '@id': '/ontologys/530016bc-8535-4448-903e-854af460b254/', - 'definition_terms': ['/ontology-terms/111116bc-8535-4448-903e-854af460a233/'], - 'uuid': '530016bc-8535-4448-903e-854af460b254', - }, - { - 'ontology_name': 'Ontology for Biomedical Investigations', - '@type': ['Ontology', 'Item'], - 'ontology_prefix': 'OBI', - 'namespace_url': 'http://purl.obolibrary.org/obo/', - 'download_url': 'http://purl.obolibrary.org/obo/obi.owl', - '@id': '/ontologys/530026bc-8535-4448-903e-854af460b254/', - 'definition_terms': [ - { - 'term_name': 'definition', - '@type': ['OntologyTerm', 'Item'], - 'term_id': 'IAO:0000115', - '@id': '/ontology-terms/111116bc-8535-4448-903e-854af460a233/', - 'uuid': '111116bc-8535-4448-903e-854af460a233', - 'term_url': 'http://purl.obolibrary.org/obo/IAO_0000115' - } - ], - 'uuid': '530026bc-8535-4448-903e-854af460b254', - 'synonym_terms': [ - { - 'term_name': 'alternative term', - '@type': ['OntologyTerm', 'Item'], - 'term_id': 'IAO:0000118', - '@id': '/ontology-terms/111117bc-8535-4448-903e-854af460a233/', - 'uuid': '111117bc-8535-4448-903e-854af460a233', - 'term_url': 'http://purl.obolibrary.org/obo/IAO_0000118' - }, - { - 'term_name': 'alternative term', - '@type': ['OntologyTerm', 'Item'], - 'term_id': 'IAO:0000118', - '@id': '/ontology-terms/111117bc-8535-4448-903e-854af460a233/', - 'uuid': '111117bc-8535-4448-903e-854af460a233', - 'term_url': 'http://purl.obolibrary.org/obo/IAO_0000118' - } - ] - } -] - - -def get_fdn_ontology_side_effect(*args, **kwargs): - for i, arg in enumerate(args): - print('ARG', i, ' = ', arg) - if args[0] is not None: - return all_ontology[0] - else: - return all_ontology - - -def test_get_ontologies_all(connection): - prefixes = ['EFO', 'UBERON', 'OBI'] - with mock.patch('encoded.commands.generate_ontology.search_metadata', return_value=all_ontology): - ont_list = 'all' - ontologies = go.get_ontologies(connection, ont_list) - assert len(ontologies) == 3 - for ont in ontologies: - assert ont['ontology_prefix'] in prefixes - - -def test_get_ontologies_one(connection): - prefix = 'EFO' - with mock.patch('encoded.commands.generate_ontology.get_metadata', side_effect=get_fdn_ontology_side_effect): - ont_list = 'EFO' - ontologies = go.get_ontologies(connection, ont_list) - assert len(ontologies) == 1 - assert ontologies[0]['ontology_prefix'] == prefix - - -def test_get_ontologies_not_in_db(connection): - all_ontology.append({'@type': ['Error', 'Item'], 'ontology_prefix': 'FAKE'}) - with mock.patch('encoded.commands.generate_ontology.get_metadata', - return_value={'@type': ['Error', 'Item'], 'ontology_prefix': 'FAKE'}): - ont_list = 'FAKE' - ontologies = go.get_ontologies(connection, ont_list) - assert not ontologies - - -@pytest.fixture -def slim_term_list(): - # see ontology_term schema for full schema - return [{'term_id': 'a_term1', 'uuid': 'uuida1', 'is_slim_for': 'assay'}, - {'term_id': 'a_term2', 'uuid': 'uuida2', 'is_slim_for': 'assay'}, - {'term_id': 'd_term1', 'uuid': 'uuidd1', 'is_slim_for': 'developmental'}] - - -@pytest.fixture -def slim_terms_by_ont(slim_term_list): - return [ - [slim_term_list[0], - slim_term_list[1]], - [slim_term_list[2]], - None, - None, - None - ] - - -@pytest.fixture -def term_w_closure(): - return {'term_id': '1', 'uuid': 'uuid1', - 'closure': ['id1', 'id2', 'a_term1']} - - -@pytest.fixture -def terms_w_closures(term_w_closure): - # term with 2 slims - term_w_two = term_w_closure.copy() - term_w_two['term_id'] = '4' - term_w_two['uuid'] = 'uuid2' - term_w_two['closure'] = term_w_closure['closure'].copy() - term_w_two['closure'].append('a_term2') - # term w closure but no slim terms - term_wo_slim = term_w_closure.copy() - term_wo_slim['term_id'] = '5' - term_wo_slim['uuid'] = 'uuid5' - term_wo_slim['closure'] = term_w_closure['closure'].copy() - term_wo_slim['closure'].pop() - # term with both 'closure' and 'closure_with_develops_from' both with the same slim - term_with_both = term_w_closure.copy() - term_with_both['term_id'] = '3' - term_with_both['uuid'] = 'uuid3' - term_with_both['closure_with_develops_from'] = ['d_term1'] - print(term_with_both) - # term with 'closure_with_develops_from' slim term' - term_cwdf = term_with_both.copy() - term_cwdf['term_id'] = '2' - term_cwdf['uuid'] = 'uuid2' - del term_cwdf['closure'] - # term with no closures - term_w_none = term_cwdf.copy() - term_w_none['term_id'] = '6' - term_w_none['uuid'] = 'uuid6' - del term_w_none['closure_with_develops_from'] - return [term_w_closure, term_cwdf, term_with_both, - term_w_two, term_wo_slim, term_w_none] - - -@pytest.fixture -def terms(): - return { - 'a_term1': { - 'term_id': 'a_term1', - 'term_name': 'name1', - 'all_parents': [] - }, - 'id2': { - 'term_id': 'id2', - 'term_name': 'name2', - 'parents': ['a_term1', 'ObsoleteClass'], - 'all_parents': ['a_term1'] - }, - 'id3': { - 'term_id': 'id3', - 'term_name': 'obsolete name', - 'relationships': ['id2'], - 'all_parents': ['id2'] - }, - 'id4': { - 'term_id': 'id4', - 'term_name': 'Obsolete name', - 'relationships': ['a_term1', 'id2'], - 'all_parents': ['a_term11', 'id2'] - }, - 'd_term1': { - 'term_id': 'd_term1', - 'term_name': '', - 'all_parents': ['id4'] - }, - 'id6': { - 'term_id': 'id6', - 'develops_from': ['id7'], - 'parents': ['id2'], - 'all_parents': [] - }, - 'id7': { - 'term_id': 'id7', - 'parents': ['d_term1'], - 'all_parents': ['id6'] - }, - 'id8': { - 'term_id': 'id8', - 'develops_from': ['id7', 'id3'], - 'all_parents': ['id7', 'id3'] - }, - 'id9': { - 'term_id': 'id9', - 'has_part_inverse': ['id3'], - 'develops_from': ['id3'], - 'all_parents': ['id10'] - } - } - - -@pytest.fixture -def syn_uris(): - return ['http://www.ebi.ac.uk/efo/alternative_term', - 'http://www.geneontology.org/formats/oboInOwl#hasExactSynonym', - 'http://purl.obolibrary.org/obo/IAO_0000118'] - - -@pytest.fixture -def syn_uris_as_URIRef(syn_uris): - return [go.convert2namespace(uri) for uri in syn_uris] - - -def test_get_slim_terms(connection, slim_terms_by_ont): - present = ['developmental', 'assay'] - absent = ['organ', 'system', 'cell'] - test_slim_terms = slim_terms_by_ont - with mock.patch('encoded.commands.generate_ontology.search_metadata', - side_effect=test_slim_terms): - terms = go.get_slim_terms(connection) - assert len(terms) == 3 - for term in terms: - assert term['is_slim_for'] in present - assert term['is_slim_for'] not in absent - - -def test_add_slim_to_term(terms_w_closures, slim_term_list): - slim_ids = ['a_term1', 'd_term1', 'a_term2'] - for i, term in enumerate(terms_w_closures): - test_term = go.add_slim_to_term(term, slim_term_list) - assert test_term['term_id'] == str(i + 1) - if i < 2: - assert len(test_term['slim_terms']) == 1 - assert test_term['slim_terms'][0] == slim_ids[i] - elif i <= 3: - assert len(test_term['slim_terms']) == 2 - for t in test_term['slim_terms']: - assert t in slim_ids - elif i > 3: - assert 'slim_terms' not in test_term - - -def test_add_slim_terms(terms, slim_term_list): - terms = go.add_slim_terms(terms, slim_term_list) - print(terms) - for tid, term in terms.items(): - if tid == 'id6': - assert len(term['slim_terms']) == 2 - assert 'd_term1' in term['slim_terms'] - assert 'a_term1' in term['slim_terms'] - elif tid == 'id9': - assert 'slim_terms' not in term - else: - assert len(term['slim_terms']) == 1 - if tid in ['a_term1', 'id2', 'id3', 'id4']: - assert term['slim_terms'][0] == 'a_term1' - elif tid in ['d_term1', 'id7', 'id8']: - assert term['slim_terms'][0] == 'd_term1' - - -def test_remove_obsoletes_and_unnamed_obsoletes(terms): - db_terms = [] - terms['id10'] = {'term_id': 'id10', 'term_name': 'new_term that is deprecated'} - ids = ['a_term1', 'id2', 'id3', 'id4', 'd_term1', 'id6', 'id7', 'id8', 'id9', 'id10'] - deprecated = 'id10' - for i in ids: - assert i in terms - terms = go.remove_obsoletes_and_unnamed(terms, deprecated, db_terms) - remaining = ids.pop(0) - assert remaining in terms - for i in ids: - assert i not in terms - - -def check_if_URIRef(uri): - return isinstance(uri, URIRef) - - -def test_convert2namespace(syn_uris): - for uri in syn_uris: - ns = go.convert2namespace(uri) - assert check_if_URIRef(ns) - assert str(ns) == uri - - -def test_get_syndef_terms_as_uri(syn_uris): - asrdf = [True, False] - for rdf in asrdf: - uris = go.get_syndef_terms_as_uri(all_ontology[2], 'synonym_terms', rdf) - if rdf: - for uri in uris: - assert check_if_URIRef(uri) - assert str(uri) in syn_uris - else: - assert str(uri) in syn_uris - - -def test_get_synonym_term_uris_no_ontology(): - with mock.patch('encoded.commands.generate_ontology.get_syndef_terms_as_uri', - return_value=[]): - synterms = go.get_synonym_term_uris('ontologys/FAKE') - assert not synterms - - -def test_get_definition_term_uris_no_ontology(): - with mock.patch('encoded.commands.generate_ontology.get_syndef_terms_as_uri', - return_value=[]): - synterms = go.get_definition_term_uris('ontologys/FAKE') - assert not synterms - - -def test_get_synonym_term_uris(syn_uris, syn_uris_as_URIRef): - asrdf = [True, False] - with mock.patch('encoded.commands.generate_ontology.get_syndef_terms_as_uri', - return_value=syn_uris_as_URIRef): - for rdf in asrdf: - uris = go.get_synonym_term_uris('ontid', rdf) - if rdf: - for uri in uris: - assert check_if_URIRef(uri) - assert str(uri) in syn_uris - else: - assert str(uri) in syn_uris - - -def test_get_definition_term_uris(syn_uris, syn_uris_as_URIRef): - asrdf = [True, False] - with mock.patch('encoded.commands.generate_ontology.get_syndef_terms_as_uri', - return_value=syn_uris_as_URIRef): - for rdf in asrdf: - uris = go.get_synonym_term_uris('ontid', rdf) - if rdf: - for uri in uris: - assert check_if_URIRef(uri) - assert str(uri) in syn_uris - else: - assert str(uri) in syn_uris - - -@pytest.fixture -def owler(): - with mock.patch.object(go, 'Owler') as mocked: - yield mocked - - -@pytest.fixture -def returned_synonyms(): - return [ - [], [], - ['testsyn1'], ['testsyn1'], - ['testsyn1', 'testsyn2'], ['testsyn1', 'testsyn2'] - ] - - -def test_get_synonyms_and_definitions(owler, returned_synonyms): - checks = ['testsyn1', 'testsyn2'] - with mock.patch('encoded.commands.generate_ontology.getObjectLiteralsOfType', - side_effect=returned_synonyms): - class_ = 'test_class' - synonym_terms = ['1'] - definition_terms = ['1'] - for i in range(int(len(returned_synonyms) / 2)): - synonyms = go.get_synonyms(class_, owler, synonym_terms) - definitions = go.get_definitions(class_, owler, definition_terms) - assert synonyms == definitions - if i == 0: - assert not synonyms - else: - assert len(synonyms) == i - for syn in synonyms: - assert syn in checks - - -def test_iterative_parents(terms): - for tid, term in terms.items(): - parents = [] - oks = [] - if 'all_parents' in term: - parents = go.iterative_parents(term['all_parents'], terms, 'all_parents') - if tid in ['a_term1', 'id6', 'id9']: - assert not parents - if tid == 'id2': - oks = ['a_term1'] - assert len(parents) == 1 - if tid in ['id3', 'id4']: - oks = ['a_term1', 'id2'] - assert len(parents) == 2 - if tid == 'd_term1': - oks = ['a_term1', 'id2', 'id4'] - assert len(parents) == 3 - if tid == 'id7': - oks = ['id6'] - assert len(parents) == 1 - if tid == 'id8': - oks = ['id6', 'id7', 'a_term1', 'id2', 'id3'] - assert len(parents) == 5 - if oks: - assert [_id in oks for _id in parents] - - -def test_get_all_ancestors(terms): - for tid, term in terms.items(): - term['development'] = term['all_parents'].copy() # adding development to all terms - for tid, term in terms.items(): - term = go.get_all_ancestors(term, terms, 'all_parents') - term = go.get_all_ancestors(term, terms, 'development') - # check they're the same - no need to check both anymore - assert term['closure'] == term['closure_with_develops_from'] - closure = term['closure'] - okids = [] - assert tid in closure # checks that the term id is included - if tid in ['a_term1', 'id6', 'id9']: - assert len(closure) == 1 - if tid in ['id2', 'id7']: - assert len(closure) == 2 - if tid == 'id2': - okids = ['a_term1'] - else: - okids = ['id6'] - if tid in ['id3', 'id4']: - assert len(closure) == 3 - okids = ['a_term1', 'id2'] - if tid == 'd_term1': - assert len(closure) == 4 - okids = ['a_term1', 'id2', 'id4'] - if tid == 'id8': - assert len(closure) == 6 - okids = ['id6', 'id7', 'a_term1', 'id2', 'id3'] - if okids: - assert [_id in okids for _id in closure] - - -def test_combine_all_parents_w_no_parents(): - term = {'term_id': 'id1'} - term = go._combine_all_parents(term) - assert not term['all_parents'] # both should be empty lists - assert not term['development'] - - -def test_combine_all_parents_w_empty_parents(): - term = {'term_id': 'id1', 'parents': [], 'relationships': [], - 'develops_from': [], 'has_part_inverse': []} - term = go._combine_all_parents(term) - assert not term['all_parents'] # both should be empty lists - assert not term['development'] - - -def test_combine_all_parents_w_one_parent(): - term = {'term_id': 'id1', 'parents': ['id2'], 'relationships': [], - 'develops_from': [], 'has_part_inverse': []} - term = go._combine_all_parents(term) - assert len(term['all_parents']) == 1 - assert term['all_parents'][0] == 'id2' - assert term['development'] == term['all_parents'] - - -def test_combine_all_parents_w_two_parents(): - term = {'term_id': 'id1', 'parents': ['id2', 'id3'], 'relationships': [], - 'develops_from': [], 'has_part_inverse': []} - term = go._combine_all_parents(term) - assert len(term['all_parents']) == 2 - assert 'id2' in term['all_parents'] - assert 'id3' in term['all_parents'] - assert sorted(term['development']) == sorted(term['all_parents']) - - -def test_combine_all_parents_w_two_same_parents(): - term = {'term_id': 'id1', 'parents': ['id2', 'id2'], 'relationships': [], - 'develops_from': [], 'has_part_inverse': []} - term = go._combine_all_parents(term) - assert len(term['all_parents']) == 1 - assert term['all_parents'][0] == 'id2' - assert term['development'] == term['all_parents'] - - -def test_combine_all_parents_w_parent_and_relationship_diff(): - term = {'term_id': 'id1', 'parents': ['id2'], 'relationships': ['id3'], - 'develops_from': [], 'has_part_inverse': []} - term = go._combine_all_parents(term) - assert len(term['all_parents']) == 2 - assert 'id2' in term['all_parents'] - assert 'id3' in term['all_parents'] - assert sorted(term['development']) == sorted(term['all_parents']) - - -def test_combine_all_parents_w_parent_and_relationship_same(): - term = {'term_id': 'id1', 'parents': ['id2'], 'relationships': ['id2'], - 'develops_from': [], 'has_part_inverse': []} - term = go._combine_all_parents(term) - assert len(term['all_parents']) == 1 - assert term['all_parents'][0] == 'id2' - assert term['development'] == term['all_parents'] - - -def test_combine_all_parents_w_parent_and_develops_from_diff(): - term = {'term_id': 'id1', 'parents': ['id2'], 'relationships': [], - 'develops_from': ['id3'], 'has_part_inverse': []} - term = go._combine_all_parents(term) - assert len(term['all_parents']) == 1 - assert len(term['development']) == 2 - assert term['all_parents'][0] == 'id2' - assert 'id2' in term['development'] - assert 'id3' in term['development'] - - -def test_combine_all_parents_w_parent_and_develops_from_same(): - term = {'term_id': 'id1', 'parents': ['id2'], 'relationships': [], - 'develops_from': ['id2'], 'has_part_inverse': []} - term = go._combine_all_parents(term) - assert len(term['all_parents']) == 1 - assert term['all_parents'][0] == 'id2' - assert term['development'] == term['all_parents'] - - -def test_combine_all_parents_w_only_develops_from(): - term = {'term_id': 'id1', 'parents': [], 'relationships': [], - 'develops_from': ['id2'], 'has_part_inverse': []} - term = go._combine_all_parents(term) - assert not term['all_parents'] - assert len(term['development']) == 1 - assert term['development'][0] == 'id2' - - -def test_combine_all_parents_w_has_part_inverse_only(): - term = {'term_id': 'id1', 'parents': [], 'relationships': [], - 'develops_from': [], 'has_part_inverse': ['id2']} - term = go._combine_all_parents(term) - assert not term['all_parents'] # both should be empty lists - assert not term['development'] - - -def test_combine_all_parents_w_has_part_inverse_to_exclude(): - term = {'term_id': 'id1', 'parents': [], 'relationships': [], - 'develops_from': ['id2'], 'has_part_inverse': ['id2']} - term = go._combine_all_parents(term) - assert not term['all_parents'] # both should be empty lists - assert not term['development'] - - -def test_combine_all_parents_w_has_part_inverse_to_exclude_plus_others(): - term = {'term_id': 'id1', 'parents': ['id2'], 'relationships': [], - 'develops_from': ['id3', 'id4', 'id5'], 'has_part_inverse': ['id4', 'id5', 'id6']} - term = go._combine_all_parents(term) - assert len(term['all_parents']) == 1 - assert len(term['development']) == 2 - assert term['all_parents'][0] == 'id2' - assert 'id2' in term['development'] - assert 'id3' in term['development'] - - -def test_has_human_empty(): - ll = [] - assert not go._has_human(ll) - - -def test_has_human_no_human(): - ll = ['http://purl.obolibrary.org/obo/BFO_0000051'] - assert not go._has_human(ll) - - -def test_has_human_human(): - ll = ['http://purl.obolibrary.org/obo/BFO_0000051', 'http://purl.obolibrary.org/obo/NCBITaxon_9606'] - assert go._has_human(ll) - - -def test_has_human_uriref_human(): - uri = 'http://purl.obolibrary.org/obo/NCBITaxon_9606' - uri = go.convert2URIRef(uri) - ll = [uri] - assert go._has_human(ll) - - -def test_get_termid_from_uri_no_uri(): - uri = '' - assert not go.get_termid_from_uri(uri) - - -def test_get_termid_from_uri_valid_uri(): - uri = 'http://www.ebi.ac.uk/efo/EFO_0002784' - tid = go.get_termid_from_uri(uri) - assert tid == 'EFO:0002784' - - -def test_get_termid_from_uri_funky_uri1(): - uri = 'http://www.ebi.ac.uk/efo/EFO_UFO_0002784' - tid = go.get_termid_from_uri(uri) - assert tid == 'EFO:UFO:0002784' - - -def test_get_termid_from_uri_funky_uri2(): - uri = 'http://www.ebi.ac.uk/efo/EFO0002784' - tid = go.get_termid_from_uri(uri) - assert tid == 'EFO0002784' - - -@pytest.fixture -def uberon_owler(): - return Owler('src/encoded/tests/data/documents/test_uberon.owl') - - -@pytest.fixture -def uberon_owler2(): - return Owler('src/encoded/tests/data/documents/test_uberon2.owl') - - -@pytest.fixture -def uberon_owler3(): - return Owler('src/encoded/tests/data/documents/test_uberon3.owl') - - -@pytest.fixture -def uberon_owler4(): - return Owler('src/encoded/tests/data/documents/test_uberon4.owl') - - -@pytest.fixture -def ll_class(): - return go.convert2URIRef('http://purl.obolibrary.org/obo/UBERON_0000101') - - -def test_get_term_name_from_rdf_no_name(uberon_owler): - name = go.get_term_name_from_rdf('pickle', uberon_owler) - assert not name - - -def test_get_term_name_from_rdf_has_name(uberon_owler, ll_class): - name = go.get_term_name_from_rdf(ll_class, uberon_owler) - assert name == 'lobe of lung' - - -def test_get_term_name_from_rdf_no_term(uberon_owler): - class_ = go.convert2URIRef('http://purl.obolibrary.org/obo/UBERON_0000001') - name = go.get_term_name_from_rdf(class_, uberon_owler) - assert not name - - -def test_create_term_dict(ll_class, uberon_owler): - with mock.patch('encoded.commands.generate_ontology.get_term_name_from_rdf', - return_value='lung lobe'): - term = go.create_term_dict(ll_class, 'termid', uberon_owler, 'ontid') - assert term['term_name'] == 'lung lobe' - assert term['term_id'] == 'termid' - assert 'ontid' in term['source_ontologies'] - assert term['namespace'] == 'http://purl.obolibrary.org/obo' - assert term['term_url'] == 'http://purl.obolibrary.org/obo/UBERON_0000101' - - -def test_add_term_and_info(uberon_owler2): - testid = 'UBERON:0001772' - relid = 'UBERON:0010304' - for c in uberon_owler2.allclasses: - if go.isBlankNode(c): - test_class = c - parent = go.convert2URIRef('http://purl.obolibrary.org/obo/UBERON_0001772') - terms = go._add_term_and_info(test_class, parent, 'test_rel', uberon_owler2, {}) - assert testid in terms - term = terms[testid] - assert term['term_id'] == testid - assert relid in term['test_rel'] - - -def test_process_intersection_of(uberon_owler3): - terms = {} - for c in uberon_owler3.allclasses: - for i in uberon_owler3.rdfGraph.objects(c, go.IntersectionOf): - terms = go.process_intersection_of(c, i, uberon_owler3, terms) - assert len(terms) == 1 - term = list(terms.values())[0] - assert len(term['relationships']) == 1 - assert term['relationships'][0] == 'UBERON:1' - assert len(term['develops_from']) == 1 - assert term['develops_from'][0] == 'UBERON:2' - - -def test_process_blank_node(uberon_owler3): - terms = {} - for c in uberon_owler3.allclasses: - terms = go.process_blank_node(c, uberon_owler3, terms) - assert len(terms) == 1 - assert 'UBERON:0001772' in terms - - -def test_find_and_add_parent_of(uberon_owler4): - tid = 'CL:0002553' - terms = {tid: {'term_id': tid}} - relids = ['UBERON:0002048', 'OBI:0000456', 'CL:0000058', 'CL:0000133'] - relation = None - seen = False - for c in uberon_owler4.allclasses: - for _, p in enumerate(uberon_owler4.get_classDirectSupers(c, excludeBnodes=False)): - if go.isBlankNode(p): - has_part = False - if not seen: - has_part = True - seen = True - terms = go._find_and_add_parent_of(p, c, uberon_owler4, terms, has_part, relation) - assert len(terms) == 2 - print(terms) - for termid, term in terms.items(): - if termid == tid: - assert len(term['relationships']) == 3 - for t in term['relationships']: - assert t in relids - else: - assert termid in relids - assert len(term['has_part_inverse']) == 1 - assert term['has_part_inverse'][0] == tid - - -def test_process_parents(uberon_owler4): - tids = ['CL:0002553', 'CL:0000058'] - relids = ['OBI:0000456', 'UBERON:0002048'] - terms = {tids[0]: {'term_id': tids[0]}} - for c in uberon_owler4.allclasses: - terms = go.process_parents(c, uberon_owler4, terms) - print(terms) - assert len(terms) == 2 - term1 = terms[tids[0]] - term2 = terms[tids[1]] - assert term1['develops_from'][0] == 'CL:0000133' - assert term1['parents'][0] == 'UBERON:0010313' - assert len(term1['relationships']) == 2 - for r in relids: - assert r in term1['relationships'] - assert term2['has_part_inverse'][0] == tids[0] - - -@pytest.fixture -def terms_w_stuff(): - return { - 'term1': { - 'term_id': 't1', - 'term_name': 'term1', - 'relationships': ['rel1', 'rel2'], - 'all_parents': ['p'], - 'development': 'd', - 'has_part_inverse': [], - 'develops_from': '', - 'part_of': ['p1'], - 'closure': [], - 'closure_with_develops_from': None - }, - 'term2': { - 'term_id': 't1', - 'term_name': 'term1' - }, - 'term3': {}, - 'term4': None - } - - -def test_cleanup_non_fields(terms_w_stuff): - to_delete = ['relationships', 'all_parents', 'development', - 'has_part_inverse', 'develops_from', 'part_of', - 'closure', 'closure_with_develops_from'] - to_keep = ['term_id', 'term_name'] - for d in to_delete + to_keep: - assert d in terms_w_stuff['term1'] - terms = go._cleanup_non_fields(terms_w_stuff) - assert len(terms) == 2 - assert terms['term1'] == terms['term2'] - for d in to_delete: - assert d not in terms['term1'] - for k in to_keep: - assert k in terms['term1'] - - -@pytest.fixture -def mock_get_synonyms(): - syn_lists = [[], ['syn1'], ['syn1', 'syn2']] - with mock.patch('encoded.commands.generate_ontology.get_synonyms', side_effect=syn_lists) as mocked: - yield mocked - - -@pytest.fixture -def mock_get_definitions(): - def_lists = [[], ['def1'], ['def1', 'def2']] - with mock.patch('encoded.commands.generate_ontology.get_synonyms', side_effect=def_lists) as mocked: - yield mocked - - -@pytest.fixture -def simple_terms(): - terms = {'t1': {'term_id': 't1', 'term_url': 'term1'}, - 't2': {'term_id': 't2', 'term_url': 'term2'}, - 't3': {'term_id': 't3', 'term_url': 'term3'}} - return OrderedDict(sorted(terms.items(), key=lambda t: t[0])) - - -def test_add_additional_term_info(simple_terms): - syn_lists = [[], ['syn1'], ['syn1', 'syn2']] - def_lists = [[], ['def1'], ['def1', 'def2']] - # terms = {'t1': {'term_id': 't1', 'term_url': 'term1'}, - # 't2': {'term_id': 't2', 'term_url': 'term2'}, - # 't3': {'term_id': 't3', 'term_url': 'term3'}} - # terms = OrderedDict(sorted(terms.items(), key=lambda t: t[0])) - with mock.patch('encoded.commands.generate_ontology.convert2URIRef', return_value='blah'): - with mock.patch('encoded.commands.generate_ontology.get_synonyms', side_effect=syn_lists): - with mock.patch('encoded.commands.generate_ontology.get_definitions', side_effect=def_lists): - result = go.add_additional_term_info(simple_terms, 'data', 'synterms', 'defterms') - for tid, term in result.items(): - if tid == 't3': - assert 'UNK' in term['definitions'] - assert 'def1' in term['definitions']['UNK'] - assert 'def2' in term['definitions']['UNK'] - assert len(term['synonyms']) == 2 - assert 'syn1' in term['synonyms'] - assert 'syn2' in term['synonyms'] - elif tid == 't2': - assert 'UNK' in term['definitions'] - assert 'def1' in term['definitions']['UNK'] - assert len(term['synonyms']) == 1 - assert term['synonyms'][0] == 'syn1' - else: - assert 'synonyms' not in term - assert 'definition' not in term - - -def test_write_outfile_pretty(simple_terms): - filename = 'tmp_test_file' - go.write_outfile(list(simple_terms.values()), filename, pretty=True) - infile = open(filename, 'r') - result = json.load(infile) - print(result) - for r in result: - assert r in simple_terms.values() - os.remove(filename) - - -def test_write_outfile_notpretty(simple_terms): - print(simple_terms) - filename = 'tmp_test_file' - go.write_outfile(list(simple_terms.values()), filename) - with open(filename, 'r') as infile: - for line in infile: - result = json.loads(line) - for v in simple_terms.values(): - assert v in result - os.remove(filename) - - -@pytest.fixture -def ontology_list(): - return [ - {'uuid': '1', 'ontology_name': 'ont1', 'ontology_prefix': 'TO'}, - {'uuid': '2', 'ontology_name': 'ont2', 'ontology_prefix': 'NN'} - ] - - -@pytest.fixture -def matches(ontology_list): - return [ - {'term_id': 'TO:t1', 'a': 1, 'b': 2, 'c': 3, 'source_ontologies': [ontology_list[0].get('uuid')]}, - {'term_id': 'TO:t1', 'a': 1, 'b': 2, 'c': 3, 'source_ontologies': [ontology_list[0].get('uuid')]} - ] - - -def test_terms_match_identical(matches): - assert go._terms_match(matches[0], matches[1]) - - -def test_terms_match_w_parents(matches): - t1 = matches[0] - t2 = matches[1] - p1 = ['OBI:01', 'EFO:01'] - p2 = [{'@id': '/ontology-terms/OBI:01/', 'display_title': 'blah'}, - {'@id': '/ontology-terms/EFO:01/', 'display_title': 'hah'}] - t1['parents'] = p1 - t2['parents'] = p2 - assert go._terms_match(t1, t2) - - -def test_terms_match_unmatched_parents_1(matches): - t1 = matches[0] - t2 = matches[1] - p1 = ['OBI:01', 'EFO:01'] - p2 = [{'@id': '/ontology-terms/OBI:01/', 'display_title': 'blah'}] - t1['parents'] = p1 - t2['parents'] = p2 - assert not go._terms_match(t1, t2) - - -def test_terms_match_unmatched_parents_2(matches): - t1 = matches[0] - t2 = matches[1] - p1 = ['OBI:01', 'EFO:01'] - p2 = [{'@id': '/ontology-terms/OBI:01/', 'display_title': 'blah'}, - {'@id': '/ontology-terms/EFO:02/', 'display_title': 'hah'}] - t1['parents'] = p1 - t2['parents'] = p2 - assert not go._terms_match(t1, t2) - - -def test_terms_match_w_ontology(matches): - t1 = matches[0] - t2 = matches[1] - o1 = '530016bc-8535-4448-903e-854af460b254' - o2 = {'@id': '/ontologys/530016bc-8535-4448-903e-854af460b254/', 'display_title': 'blah'} - t1['source_ontologies'] = [o1] - t2['source_ontologies'] = [o2] - assert go._terms_match(t1, t2) - - -@pytest.fixture -def ont_terms(matches, ontology_list): - t2 = matches[1] - t2['term_id'] = 'TO:t2' - t2['parents'] = ['OBI:01', 'EFO:01'] - return { - 'TO:t1': matches[0], - 'TO:t2': t2, - 'NN:t3': {'term_id': 'NN:t3', 'x': 7, 'y': 8, 'z': 9, 'source_ontologies': [ontology_list[1]]} - } - - -@pytest.fixture -def db_terms(ont_terms): - db_terms = ont_terms.copy() - db_terms['TO:t1']['uuid'] = '1234' - db_terms['TO:t2']['uuid'] = '5678' - del db_terms['TO:t2']['parents'] - del db_terms['NN:t3'] - for v in db_terms.values(): - v.update({'status': 'released'}) - return db_terms - - -def test_id_post_and_patch_filter(ont_terms, db_terms, ontology_list): - id2chk = 'NN:t3' - result = go.id_post_and_patch(ont_terms, db_terms, ontology_list) - assert len(result[0]) == 1 - assert id2chk == result[0][0].get('term_id') - assert id2chk in result[1] - - -def test_id_post_and_patch_no_filter(ont_terms, db_terms, ontology_list): - tids = ['TO:t1', 'TO:t2', 'NN:t3'] - result = go.id_post_and_patch(ont_terms, db_terms, ontology_list, False) - assert len(result[0]) == 3 - for t in result[0]: - assert t.get('term_id') in tids - for t in result[1]: - assert t in tids - - -def test_id_post_and_patch_id_obs(ont_terms, db_terms, ontology_list): - ''' term in db is no longer in ontology - tests that status of term will be set to obsolete''' - db_terms['TO:t4'] = { - 'term_id': 'TO:t4', - 'source_ontologies': [{'uuid': '1', 'ontology_name': 'ont1', 'ontology_prefix': 'TO'}], - 'uuid': '7890', - 'status': 'released'} - result = go.id_post_and_patch(ont_terms, db_terms, ontology_list) - result_terms = result[0] - assert len(result_terms) == 2 - assert '7890' in [tid.get('uuid') for tid in result_terms] - for t in result_terms: - if t.get('uuid') == '7890': - assert t.get('status') == 'obsolete' - - -def test_id_post_and_patch_id_obs_simple(ont_terms, db_terms, ontology_list): - db_terms['TO:t4'] = { - 'term_id': 'TO:t4', - 'source_ontologies': [{'uuid': '1', 'ontology_name': 'ont1', 'ontology_prefix': 'TO'}], - 'uuid': '7890', - 'status': 'released'} - result = go.id_post_and_patch(ont_terms, db_terms, ontology_list, ontarg='1', simple=True) - result_terms = result[0] - assert len(result_terms) == 2 - assert '7890' in [tid.get('uuid') for tid in result_terms] - for t in result_terms: - if t.get('uuid') == '7890': - assert t.get('status') == 'obsolete' - - -def test_id_post_and_patch_donot_obs(ont_terms, db_terms, ontology_list): - db_terms['t4'] = {'term_id': 't4', 'source_ontologies': {'uuid': '1', 'ontology_name': 'ont1'}, 'uuid': '7890'} - result = go.id_post_and_patch(ont_terms, db_terms, ontology_list, True, False) - assert 't4' not in [t.get('term_id') for t in result[0]] - - -def test_order_terms_by_phasing_no_terms(): - assert not go.order_terms_by_phasing([], []) - - -def test_order_terms_by_phasing_no_new_terms(db_terms): - term_list = db_terms.values() - res = go.order_terms_by_phasing([], term_list) - for r in res: - assert r in term_list - - -def test_order_terms_by_phasing_only_new_terms(db_terms): - id_list = db_terms.keys() - term_list = db_terms.values() - res = go.order_terms_by_phasing(id_list, term_list) - assert len(res) == 4 - assert all([v in r for v in ['term_id', 'source_ontologies', 'uuid'] for r in res[:2]]) - assert all([v in r for v in ['a', 'b', 'c', 'status', 'uuid'] for r in res[2:]]) - - -def test_order_terms_by_phasing_both_new_and_existing(db_terms): - term_list = db_terms.values() - new_id = list(term_list)[0].get('term_id') - res = go.order_terms_by_phasing([new_id], term_list) - assert len(res) == 3 - assert all([v in res[0] for v in ['term_id', 'source_ontologies', 'uuid']]) - assert all([v in r for v in ['a', 'b', 'c', 'status', 'uuid'] for r in res[1:]]) - - -def valid_uuid(uid): - validchars = '0123456789abcdef' - uid = uid.replace('-', '') - if len(uid) != 32: - return False - for c in uid: - if c not in validchars: - return False - return True - - -@pytest.fixture -def embedded_dbterm(): - return { - "synonyms": [ - "renal pelvis uroepithelium", - "renal pelvis transitional epithelium", - "pelvis of ureter uroepithelium", - "renal pelvis urothelium", - "kidney pelvis uroepithelium", - "uroepithelium of pelvis of ureter", - "urothelium of pelvis of ureter", - "uroepithelium of kidney pelvis", - "transitional epithelium of kidney pelvis", - "transitional epithelium of renal pelvis", - "urothelium of kidney pelvis", - "uroepithelium of renal pelvis", - "urothelium of renal pelvis", - "kidney pelvis transitional epithelium", - "pelvis of ureter urothelium" - ], - "preferred_name": "kidney pelvis urothelium", - "references": [], - "external_references": [], - "status": "released", - "term_name": "kidney pelvis urothelium", - "submitted_by": { - "principals_allowed": { - "edit": [ - "group.admin", - "userid.986b362f-4eb6-4a9c-8173-3ab267307e3a" - ], - "view": [ - "group.admin", - "group.read-only-admin", - "remoteuser.EMBED", - "remoteuser.INDEXER", - "userid.986b362f-4eb6-4a9c-8173-3ab267307e3a" - ] - }, - "@id": "/users/986b362f-4eb6-4a9c-8173-3ab267307e3a/", - "@type": [ - "User", - "Item" - ], - "uuid": "986b362f-4eb6-4a9c-8173-3ab267307e3a", - "display_title": "4dn DCIC" - }, - "display_title": "kidney pelvis urothelium", - "schema_version": "1", - "@type": [ - "OntologyTerm", - "Item" - ], - "parents": [ - { - "principals_allowed": { - "edit": [ - "group.admin" - ], - "view": [ - "system.Everyone" - ] - }, - "@id": "/ontology-terms/UBERON:0001254/", - "@type": [ - "OntologyTerm", - "Item" - ], - "uuid": "38dbff69-aac7-46a4-837e-7340c2c5bcd5", - "display_title": "urothelium of ureter" - }, - { - "principals_allowed": { - "edit": [ - "group.admin" - ], - "view": [ - "system.Everyone" - ] - }, - "@id": "/ontology-terms/UBERON:0004819/", - "@type": [ - "OntologyTerm", - "Item" - ], - "uuid": "57ac2905-0533-43c9-988b-9add8c225a78", - "display_title": "kidney epithelium" - } - ], - "date_created": "2017-05-11T16:00:51.747446+00:00", - "term_id": "UBERON:0004788", - "source_ontology": { - "uuid": "530016bc-8535-4448-903e-854af460b254", - "display_title": "Uberon", - "principals_allowed": { - "edit": [ - "group.admin" - ], - "view": [ - "system.Everyone" - ] - }, - "@id": "/ontologys/530016bc-8535-4448-903e-854af460b254/", - "@type": [ - "Ontology", - "Item" - ], - "ontology_name": "Uberon" - }, - "uuid": "e5e1690a-1a80-4e50-a3cf-58f2f269abd8", - "term_url": "http://purl.obolibrary.org/obo/UBERON_0004788", - "last_modified": { - "date_modified": "2018-07-11T05:05:30.826642+00:00", - "modified_by": { - "principals_allowed": { - "edit": [ - "group.admin", - "userid.986b362f-4eb6-4a9c-8173-3ab267307e3a" - ], - "view": [ - "group.admin", - "group.read-only-admin", - "remoteuser.EMBED", - "remoteuser.INDEXER", - "userid.986b362f-4eb6-4a9c-8173-3ab267307e3a" - ] - }, - "@id": "/users/986b362f-4eb6-4a9c-8173-3ab267307e3a/", - "@type": [ - "User", - "Item" - ], - "uuid": "986b362f-4eb6-4a9c-8173-3ab267307e3a", - "display_title": "4dn DCIC" - } - }, - "principals_allowed": { - "edit": [ - "group.admin" - ], - "view": [ - "system.Everyone" - ] - }, - "@id": "/ontology-terms/UBERON:0004788/", - "slim_terms": [ - { - "principals_allowed": { - "edit": [ - "group.admin" - ], - "view": [ - "system.Everyone" - ] - }, - "term_name": "endoderm", - "display_title": "endoderm", - "is_slim_for": "developmental", - "@id": "/ontology-terms/UBERON:0000925/", - "@type": [ - "OntologyTerm", - "Item" - ], - "uuid": "111121bc-8535-4448-903e-854af460a233" - }, - { - "principals_allowed": { - "edit": [ - "group.admin" - ], - "view": [ - "system.Everyone" - ] - }, - "term_name": "kidney", - "display_title": "kidney", - "is_slim_for": "organ", - "@id": "/ontology-terms/UBERON:0002113/", - "@type": [ - "OntologyTerm", - "Item" - ], - "uuid": "111167bc-8535-4448-903e-854af460a233" - }, - { - "principals_allowed": { - "edit": [ - "group.admin" - ], - "view": [ - "system.Everyone" - ] - }, - "term_name": "ureter", - "display_title": "ureter", - "is_slim_for": "organ", - "@id": "/ontology-terms/UBERON:0000056/", - "@type": [ - "OntologyTerm", - "Item" - ], - "uuid": "111148bc-8535-4448-903e-854af460a233" - }, - { - "principals_allowed": { - "edit": [ - "group.admin" - ], - "view": [ - "system.Everyone" - ] - }, - "term_name": "renal system", - "display_title": "renal system", - "is_slim_for": "system", - "@id": "/ontology-terms/UBERON:0001008/", - "@type": [ - "OntologyTerm", - "Item" - ], - "uuid": "111130bc-8535-4448-903e-854af460a233" - }, - { - "principals_allowed": { - "edit": [ - "group.admin" - ], - "view": [ - "system.Everyone" - ] - }, - "term_name": "mesoderm", - "display_title": "mesoderm", - "is_slim_for": "developmental", - "@id": "/ontology-terms/UBERON:0000926/", - "@type": [ - "OntologyTerm", - "Item" - ], - "uuid": "111120bc-8535-4448-903e-854af460a233" - } - ], - "namespace": "http://purl.obolibrary.org/obo", - "definition": "the epithelial lining of the luminal space of the kidney pelvis" - } - - -def test_get_raw_form(embedded_dbterm): - raw_term = go.get_raw_form(embedded_dbterm) - print(raw_term) - - -def test_update_definition_double(): - prefix = 'EFO' - tdef = 'here is EFO definition (EFO)' - dbdef = 'here is outdated definition (EFO, OBI) and another def (SO)' - newdef = go.update_definition(tdef, dbdef, prefix) - assert tdef in newdef - assert 'here is outdated definition (EFO, OBI)' not in newdef - - -def test_update_definition_single(): - prefix = 'EFO' - tdef = 'here (ABL) is EFO definition (EFO)' - dbdef = 'here (ABL) is outdated definition (EFO) and another (random abbrev) def (OBI, SO)' - newdef = go.update_definition(tdef, dbdef, prefix) - assert tdef in newdef - assert newdef == 'here (ABL) is EFO definition (EFO) and another (random abbrev) def (OBI, SO)' diff --git a/src/encoded/tests/test_graph.py b/src/encoded/tests/test_graph.py deleted file mode 100644 index 5567b92018..0000000000 --- a/src/encoded/tests/test_graph.py +++ /dev/null @@ -1,18 +0,0 @@ -import pytest - - -pytestmark = [pytest.mark.setone, pytest.mark.working] - - -def test_graph_dot(testapp): - res = testapp.get('/profiles/graph.dot', status=200) - assert res.content_type == 'text/vnd.graphviz' - assert res.text - - -@pytest.mark.broken # Doesn't work on GitHub Actions -@pytest.mark.skip -def test_graph_svg(testapp): - res = testapp.get('/profiles/graph.svg', status=200) - assert res.content_type == 'image/svg+xml' - assert res.text diff --git a/src/encoded/tests/test_higlass.py b/src/encoded/tests/test_higlass.py deleted file mode 100644 index 703c1b0b5f..0000000000 --- a/src/encoded/tests/test_higlass.py +++ /dev/null @@ -1,2275 +0,0 @@ -import pytest - -from .test_file import mcool_file_json, bedGraph_file_json, bigwig_file_json, bigbed_file_json, bed_beddb_file_json, beddb_file_json, chromsizes_file_json - - -pytestmark = [pytest.mark.setone, pytest.mark.working] - -# Test Higlass display endpoints. - - -@pytest.fixture -def higlass_mcool_viewconf(testapp, award, lab): - """ Creates a fixture for an mcool Higlass view config. - - Args: - testapp(obj): This object can make RESTful API calls to the test server. - - Returns: - Dictionary representing the JSON response after posting the view config. - """ - viewconf = { - 'award': award['@id'], - 'lab': lab['@id'], - "title": - "Test MCOOL Display", - "description": "An MCOOL file track plus annotations for gene GRCm38 (tileset 'IUcqX4GzTNWJIzE2-b_sZg') and chromosome 'JXbq7f-GTeq3FJy_ilIomQ'.", - "uuid": "00000000-1111-0000-1111-000000000002", - "name": "higlass-mcool-test-view", - "genome_assembly": "GRCm38", - "viewconfig": { - "editable": True, - "zoomFixed": False, - "exportViewUrl": "/api/v1/viewconfs", - "zoomLocks": { - "locksByViewUid": {}, - "locksDict": {} - }, - "locationLocks": { - "locksByViewUid": {}, - "locksDict": {} - }, - "trackSourceServers": ["https://higlass.4dnucleome.org/api/v1"], - "views": [{ - "uid": "view-4dn-mcool-0", - "layout": { - "w": 12, - "h": 12, - "x": 0, - "y": 0 - }, - "initialXDomain": [102391829.2052902, 2938891536.27695], - "initialYDomain": [129711724.73566854, 1810982460.1999617], - "autocompleteSource": "/api/v1/suggest/?d=IUcqX4GzTNWJIzE2-b_sZg&", - "genomePositionSearchBox": { - "autocompleteServer": - "https://higlass.4dnucleome.org/api/v1", - "autocompleteId": "IUcqX4GzTNWJIzE2-b_sZg", - "chromInfoServer": "https://higlass.4dnucleome.org/api/v1", - "chromInfoId": "GRCm38", - "visible": True - }, - "tracks": { - "top": [{ - "server": "https://higlass.4dnucleome.org/api/v1", - "tilesetUid": "IUcqX4GzTNWJIzE2-b_sZg", - "type": "horizontal-gene-annotations", - "options": { - "labelColor": "black", - "labelPosition": "hidden", - "plusStrandColor": "black", - "minusStrandColor": "black", - "trackBorderWidth": 0, - "trackBorderColor": "black", - "name": "Gene Annotations (GRCm38)", - "showMousePosition": False, - "mousePositionColor": "#999999" - }, - "height": 55, - "uid": "top-annotation-track" - }, { - "server": "https://higlass.4dnucleome.org/api/v1", - "tilesetUid": "JXbq7f-GTeq3FJy_ilIomQ", - "type": "horizontal-chromosome-labels", - "options": { - "showMousePosition": False, - "mousePositionColor": "#999999" - }, - "height": 30, - "uid": "top-chromosome-track" - }], - "left": [{ - "server": "https://higlass.4dnucleome.org/api/v1", - "tilesetUid": "IUcqX4GzTNWJIzE2-b_sZg", - "uid": "left-annotation-track", - "type": "vertical-gene-annotations", - "options": { - "labelColor": "black", - "labelPosition": "hidden", - "plusStrandColor": "black", - "minusStrandColor": "black", - "trackBorderWidth": 0, - "trackBorderColor": "black", - "name": "Gene Annotations (GRCm38)", - "showMousePosition": False, - "mousePositionColor": "#999999" - }, - "width": 55 - }, { - "server": "https://higlass.4dnucleome.org/api/v1", - "tilesetUid": "JXbq7f-GTeq3FJy_ilIomQ", - "uid": "left-chromosome-track", - "type": "vertical-chromosome-labels", - "options": { - "showMousePosition": False, - "mousePositionColor": "#999999" - }, - "width": 20 - }], - "center": [{ - "uid": "center-mcool-track", - "type": "combined", - "height": 250, - "contents": [{ - "server": "https://higlass.4dnucleome.org/api/v1", - "tilesetUid": "LTiacew8TjCOaP9gpDZwZw", - "type": "heatmap", - "position": "center", - "uid": "GjuZed1ySGW1IzZZqFB9BA", - "options": { - "backgroundColor": "#eeeeee", - "labelPosition": "topLeft", - "colorRange": [ - "white", "rgba(245,166,35,1.0)", - "rgba(208,2,27,1.0)", "black" - ], - "maxZoom": None, - "colorbarPosition": "topRight", - "trackBorderWidth": 0, - "trackBorderColor": "black", - "heatmapValueScaling": "log", - "showMousePosition": False, - "mousePositionColor": "#999999", - "showTooltip": False, - "name": "4DNFI1TBYKV3.mcool", - "scaleStartPercent": "0.00000", - "scaleEndPercent": "1.00000" - } - }], - "position": "center", - "options": {} - }], - "right": [], - "bottom": [], - "whole": [], - "gallery": [] - } - }], - "valueScaleLocks": { - "locksByViewUid": {}, - "locksDict": {} - } - } - } - return testapp.post_json('/higlass-view-configs/', viewconf).json - - -@pytest.fixture -def higlass_blank_viewconf(testapp, lab, award): - """ Creates a fixture for a blank Higlass view config (lacks any files or genome assembly). - - Args: - testapp(obj): This object can make RESTful API calls to the test server. - - Returns: - Dictionary representing the JSON response after posting the blank view config. - """ - viewconf = { - 'award': award['@id'], - 'lab': lab['@id'], - "title": "Empty Higlass Viewconfig", - "description": "No files in viewconf, ready to clone.", - "uuid": "00000000-1111-0000-1111-000000000000", - "name": "empty-higlass-viewconf", - "genome_assembly": "GRCm38", - "viewconfig": { - "editable": - True, - "zoomFixed": - False, - "trackSourceServers": ["//higlass.4dnucleome.org/api/v1"], - "exportViewUrl": - "/api/v1/viewconfs", - "views": [{ - "uid": - "aa", - "initialXDomain": [-167962308.59835115, 3260659599.528857], - "chromInfoPath": - "//s3.amazonaws.com/pkerp/data/hg19/chromSizes.tsv", - "tracks": { - "top": [], - "left": [], - "center": [{ - "contents": [] - }], - "right": [], - "bottom": [], - "whole": [], - "gallery": [] - }, - "layout": { - "w": 12, - "h": 12, - "x": 0, - "y": 0 - }, - "initialYDomain": [549528857.4793874, 2550471142.5206127] - }], - "zoomLocks": { - "locksByViewUid": {}, - "locksDict": {} - }, - "locationLocks": { - "locksByViewUid": {}, - "locksDict": {} - }, - "valueScaleLocks": { - "locksByViewUid": {}, - "locksDict": {} - } - } - } - return testapp.post_json('/higlass-view-configs/', viewconf).json - - -def assert_true(bool_to_test, comment=""): - """ Raises AssertionError if bool is not true. - Args: - bool(boolean): Value to be asserted. - comment(str, optional): String to help explain the error. - - Returns: - Nothing - - Raises: - AssertionError: if bool does not evaluate to True. - """ - if not bool_to_test: - raise AssertionError(comment) - - -def test_higlass_noop(testapp, higlass_mcool_viewconf): - """ Test the python endpoint exists. - Given a viewconf and no experiments, the viewconf should remain unchanged. - - Args: - testapp(obj): This object can make RESTful API calls to the test server. - higlass_mcool_viewconf(obj): Higlass view configuration for an mcool file. - - Returns: - Nothing - - Raises: - AssertionError if the test fails. - """ - - # Get the Higlass Viewconf that will be edited. - higlass_conf_uuid = "00000000-1111-0000-1111-000000000002" - response = testapp.get( - "/higlass-view-configs/{higlass_conf_uuid}/?format=json".format( - higlass_conf_uuid=higlass_conf_uuid)) - higlass_json = response.json - - # Try to add nothing to the viewconf. - response = testapp.post_json("/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': - higlass_json["viewconfig"], - 'genome_assembly': - higlass_json["genome_assembly"], - 'files': [], - }) - new_higlass_json = response.json["new_viewconfig"] - - # The new viewconf should be a subset of the old one. - assert_true( - len(higlass_json["viewconfig"]["views"]) == len( - new_higlass_json["views"])) - assert_true(len(new_higlass_json["views"]) == 1) - for index in range(len(new_higlass_json["views"])): - new_higlass = new_higlass_json["views"][index] - old_higlass = higlass_json["viewconfig"]["views"][index] - for key in new_higlass: - assert_true(old_higlass[key] == new_higlass[key]) - - -def test_add_mcool(testapp, higlass_blank_viewconf, mcool_file_json): - """ Don't pass in an existing higlass viewconf, but do add a file. - Expect a new higlass view containing the file. - - Args: - testapp(obj): This object can make RESTful API calls to the test server. - higlass_blank_viewconf(obj): Empty Higlass view configuration with no file or genome assembly. - mcool_file_json(dict): Fixture refers to an mcool file. - - Returns: - Nothing - - Raises: - AssertionError if the test fails. - """ - - genome_assembly = "GRCm38" - - # Post an mcool file and retrieve its uuid. Add a higlass_uid. - mcool_file_json['higlass_uid'] = "LTiacew8TjCOaP9gpDZwZw" - mcool_file_json['genome_assembly'] = genome_assembly - mcool_file = testapp.post_json('/file_processed', - mcool_file_json).json['@graph'][0] - - # Try to create a view, adding a file but no base view. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", - {'files': ["{uuid}".format(uuid=mcool_file['uuid'])]}) - - new_higlass_view_json = response.json["new_viewconfig"] - - assert_true(response.json["new_genome_assembly"] == genome_assembly) - - # There should be 1 view. - assert_true(len(new_higlass_view_json["views"]) == 1) - - view = new_higlass_view_json["views"][0] - - assert_true("layout" in view) - assert_true("uid" in view) - assert_true("tracks" in view) - assert_true("center" in view["tracks"]) - assert_true(len(view["tracks"]["center"]) == 1) - - center_track = view["tracks"]["center"][0] - assert_true(center_track["type"] == "combined") - assert_true("contents" in center_track) - - # The contents should have the mcool's heatmap - contents = center_track["contents"] - assert_true(len(contents) == 1) - - # The central contents should have the mcool file. - if "tilesetUid" in contents and contents[0]["tilesetUid"] == mcool_file_json['higlass_uid']: - assert_true(track["type"] == "heatmap") - - # Check the initial domain to make sure the view is centered. - assert_true( - abs(view["initialXDomain"][0] + 681380342) < 100, - "initialXDomain left side is wrong. Should be around -681380342, instead got " - + str(view["initialXDomain"][0])) - assert_true( - abs(view["initialXDomain"][1] - 3406901712) < 100, - "initialXDomain right side is wrong. Should be around 3406901712, instead got " - + str(view["initialXDomain"][1])) - assert_true( - abs(view["initialYDomain"][0] + 681380342) < 100, - "initialYDomain top side is wrong. Should be around -681380342, instead got " - + str(view["initialYDomain"][0])) - assert_true( - abs(view["initialYDomain"][1] - 3406901712) < 100, - "initialYDomain bottom side is wrong. Should be around 3406901712, instead got " - + str(view["initialYDomain"][1])) - - -def test_add_bedGraph_higlass(testapp, higlass_mcool_viewconf, - bedGraph_file_json): - """ Given a viewconf with an mcool file, the viewconf should add a bedGraph on top. - - Args: - testapp(obj): This object can make RESTful API calls to the test server. - higlass_mcool_viewconf(obj): Higlass view configuration for an mcool file. - bedGraph_file_json(dict): Fixture refers to a bedgraph file. - - Returns: - Nothing - - Raises: - AssertionError if the test fails. - """ - - # Get a bedGraph file to add. - bedGraph_file_json['higlass_uid'] = "Y08H_toDQ-OxidYJAzFPXA" - bedGraph_file_json['md5sum'] = '00000000000000000000000000000001' - bedGraph_file_json['genome_assembly'] = "GRCm38" - bg_file = testapp.post_json('/file_processed', - bedGraph_file_json).json['@graph'][0] - - # Get the Higlass Viewconf that will be edited. - higlass_conf_uuid = "00000000-1111-0000-1111-000000000002" - response = testapp.get( - "/higlass-view-configs/{higlass_conf_uuid}/?format=json".format( - higlass_conf_uuid=higlass_conf_uuid)) - higlass_json = response.json - - # Try to add the bedGraph to the existing viewconf. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': higlass_json["viewconfig"], - 'genome_assembly': higlass_json["genome_assembly"], - 'files': ["{uuid}".format(uuid=bg_file['uuid'])] - }) - - # Get the new json. - new_higlass_json = response.json["new_viewconfig"] - - # Make sure the bedGraph has been added above the mcool file. - assert_true(len(new_higlass_json["views"]) == 1) - - tracks = new_higlass_json["views"][0]["tracks"] - old_tracks = higlass_json["viewconfig"]["views"][0]["tracks"] - - # Assert_true(there is still 1 central view) - assert_true(len(tracks["center"][0]["contents"]) == 1) - assert_true(tracks["center"][0]["contents"][0]["type"] == "heatmap") - - # Only one new top track should have appeared. - assert_true(len(tracks["left"]) == len(old_tracks["left"])) - assert_true(len(tracks["top"]) == len(old_tracks["top"]) + 1) - - # The new top track should not be very tall, since there is a 2D file in the center. - bedGraph_track = [ - t for t in tracks["top"] if "-divergent-bar" in t["type"] - ][0] - assert_true( - bedGraph_track["height"] < 100, - "1D file is too big: height should be less than 100, got {actual} instead.". - format( - actual=bedGraph_track["height"], )) - - -def test_add_bedGraph_to_bedGraph(testapp, higlass_blank_viewconf, - bedGraph_file_json): - """ Given a viewconf with an mcool file, the viewconf should add a bedGraph on top. - - Args: - testapp(obj): This object can make RESTful API calls to the test server. - higlass_blank_viewconf(obj): Empty Higlass view configuration with no file or genome assembly. - bedGraph_file_json(dict): Fixture refers to a bedgraph file. - - Returns: - Nothing - - Raises: - AssertionError if the test fails. - """ - - # Add the bedGraph file with a higlass uid and a genome asssembly. - bedGraph_file_json['higlass_uid'] = "Y08H_toDQ-OxidYJAzFPXA" - bedGraph_file_json['genome_assembly'] = "GRCm38" - bg_file = testapp.post_json('/file_processed', - bedGraph_file_json).json['@graph'][0] - - # Add a higlass file and get the json. - higlass_conf_uuid = "00000000-1111-0000-1111-000000000000" - response = testapp.get( - "/higlass-view-configs/{higlass_conf_uuid}/?format=json".format( - higlass_conf_uuid=higlass_conf_uuid)) - higlass_json = response.json - - # Add a bedGraph file. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': higlass_json["viewconfig"], - 'genome_assembly': higlass_json["genome_assembly"], - 'files': ["{uuid}".format(uuid=bg_file['uuid'])] - }) - - new_higlass_json = response.json["new_viewconfig"] - assert_true(response.json["success"] == True) - - assert_true(len(new_higlass_json["views"]) == 1) - assert_true(len(new_higlass_json["views"][0]["tracks"]["top"]) == 1) - - assert_true( - isinstance(new_higlass_json["views"][0]["tracks"]["top"][0]["height"], - int), "Not an integer") - - # The new top track should be tall, since there are no other tracks. But not too tall. - assert_true( - new_higlass_json["views"][0]["tracks"]["top"][0]["height"] >= 100 - and new_higlass_json["views"][0]["tracks"]["top"][0]["height"] < 600, - "1D file is wrong size: height should be at least 100 and less than 600, got {actual} instead.". - format( - actual=new_higlass_json["views"][0]["tracks"]["top"][0]["height"], - )) - - # Make sure there is a label. - assert_true(new_higlass_json["views"][0]["tracks"]["top"][0]["options"] - ["labelPosition"] != "hidden") - - # Add another bedGraph file. Make sure the bedGraphs are stacked atop each other. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': new_higlass_json, - 'files': ["{uuid}".format(uuid=bg_file['uuid'])] - }) - - new_higlass_json = response.json["new_viewconfig"] - assert_true(response.json["success"] == True) - - assert_true(len(new_higlass_json["views"]) == 1) - assert_true(len(new_higlass_json["views"][0]["tracks"]["top"]) == 2) - - # The new top tracks should be very tall, since there are no other tracks. - bedGraph_tracks = [ - t for t in new_higlass_json["views"][0]["tracks"]["top"] - if "-divergent-bar" in t["type"] - ] - for t in bedGraph_tracks: - assert_true( - t["height"] >= 100, - "1D file is too small: height should be at least 100, got {actual} instead.". - format( - actual=t["height"], )) - - -def test_add_mcool_to_mcool(testapp, higlass_mcool_viewconf, mcool_file_json): - """ Given a viewconf with a mcool file, the viewconf should add anohter mcool on the side. - - Args: - testapp(obj): This object can make RESTful API calls to the test server. - higlass_mcool_viewconf(obj): Higlass view configuration for an mcool file. - mcool_file_json(dict): Fixture refers to an mcool file. - - Returns: - Nothing - - Raises: - AssertionError if the test fails. - """ - - # Post an mcool file and retrieve its uuid. Add a higlass_uid. - mcool_file_json['higlass_uid'] = "LTiacew8TjCOaP9gpDZwZw" - mcool_file_json['genome_assembly'] = "GRCm38" - mcool_file = testapp.post_json('/file_processed', - mcool_file_json).json['@graph'][0] - - # Also add an mcool file with a different genome assembly. - mcool_file_json['higlass_uid'] = "LTiacew8TjCOaP9gpDZwZw" - mcool_file_json['genome_assembly'] = "GRCh38" - mcool_file_json['md5sum'] = '00000000000000000000000000000001' - mcool_file_with_different_genome_assembly = testapp.post_json( - '/file_processed', mcool_file_json).json['@graph'][0] - - # Get the json for a viewconfig with a mcool file. - higlass_conf_uuid = "00000000-1111-0000-1111-000000000002" - response = testapp.get( - "/higlass-view-configs/{higlass_conf_uuid}/?format=json".format( - higlass_conf_uuid=higlass_conf_uuid)) - higlass_json = response.json - - # Try to add the mcool with a different genome assembly, it should fail. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': - higlass_json["viewconfig"], - 'genome_assembly': - higlass_json["genome_assembly"], - 'files': [ - "{uuid}".format( - uuid=mcool_file_with_different_genome_assembly['uuid']) - ] - }) - - assert_true(response.json["success"] == False) - assert_true("All files are not" in response.json["errors"]) - assert_true(mcool_file_with_different_genome_assembly['uuid'] in - response.json["errors"]) - - # Try to add an mcool with the same genome assembly. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': higlass_json["viewconfig"], - 'genome_assembly': higlass_json["genome_assembly"], - 'files': ["{uuid}".format(uuid=mcool_file['uuid'])] - }) - - assert_true(response.json["success"] == True) - - # Make sure the mcool displays are next to each other. - new_higlass_json = response.json["new_viewconfig"] - - assert_true(len(new_higlass_json["views"]) == 2) - - layout0 = new_higlass_json["views"][0]["layout"] - assert_true(layout0["x"] == 0) - assert_true(layout0["y"] == 0) - assert_true(layout0["w"] == 6) - assert_true(layout0["h"] == 12) - - layout1 = new_higlass_json["views"][1]["layout"] - assert_true(layout1["x"] == 6) - assert_true(layout1["y"] == 0) - assert_true(layout1["w"] == 6) - assert_true(layout1["h"] == 12) - - # mcools have locked views - for lock_name in ("locationLocks", "zoomLocks"): - locks = new_higlass_json[lock_name] - - view0_uid = new_higlass_json["views"][0]["uid"] - view1_uid = new_higlass_json["views"][1]["uid"] - - # The same lock applies to both views - assert_true(view0_uid in locks["locksByViewUid"]) - assert_true(view1_uid in locks["locksByViewUid"]) - assert_true(locks["locksByViewUid"][view0_uid] == locks[ - "locksByViewUid"][view1_uid]) - - lockUuid = locks["locksByViewUid"][view0_uid] - # The locks have non-None values - for view_uid in (view0_uid, view1_uid): - for index, lock_value in enumerate( - locks["locksDict"][lockUuid][view_uid]): - assert_true( - lock_value != None, - "{lock_name} for view {view_uid} should not be None: {actual}". - format( - lock_name=lock_name, - view_uid=view_uid, - actual=locks["locksDict"][lockUuid][view_uid], )) - - # Locks should have the same values - assert_true( - len(locks["locksDict"][lockUuid][view0_uid]) == len( - locks["locksDict"][lockUuid][view1_uid])) - - for index, lock_value in enumerate( - locks["locksDict"][lockUuid][view0_uid]): - assert_true( - lock_value == locks["locksDict"][lockUuid][view0_uid][index], - "{lock_name} values do not match for index {index}. Expected {lock_value}, Actual {actual}". - format( - lock_name=lock_name, - lock_value=lock_value, - index=index, - actual=locks["locksDict"][lockUuid][view_uid], )) - - -def test_correct_duplicate_tracks(testapp, higlass_mcool_viewconf, - mcool_file_json): - """When creating new views, make sure the correct number of 2D tracks are copied over. - """ - # Post an mcool file and retrieve its uuid. Add a higlass_uid. - mcool_file_json['higlass_uid'] = "LTiacew8TjCOaP9gpDZwZw" - mcool_file_json['genome_assembly'] = "GRCm38" - mcool_file = testapp.post_json('/file_processed', - mcool_file_json).json['@graph'][0] - - # Get the json for a viewconfig with a mcool file. - higlass_conf_uuid = "00000000-1111-0000-1111-000000000002" - response = testapp.get( - "/higlass-view-configs/{higlass_conf_uuid}/?format=json".format( - higlass_conf_uuid=higlass_conf_uuid)) - higlass_json = response.json - - # Try to add an mcool with the same genome assembly. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': higlass_json["viewconfig"], - 'genome_assembly': higlass_json["genome_assembly"], - 'files': ["{uuid}".format(uuid=mcool_file['uuid'])] - }) - - assert_true(response.json["success"] == True) - - # Make sure the mcool displays are next to each other. - new_higlass_json = response.json["new_viewconfig"] - - assert_true(len(new_higlass_json["views"]) == 2) - - # Both views should have the same types of tracks. - view0 = new_higlass_json["views"][0] - view1 = new_higlass_json["views"][1] - - for side in ("top", "left"): - assert_true( - len(view0["tracks"][side]) == 2, - "{side} does not have 2 tracks".format(side=side)) - - assert_true( - len(view0["tracks"][side]) == len(view1["tracks"][side]), - "{side} number of tracks do not match: {zero} versus {one}".format( - side=side, - zero=len(view0["tracks"][side]), - one=len(view1["tracks"][side]), )) - for i in range(len(view0["tracks"][side])): - assert_true( - view0["tracks"][side][i]["uid"] == view1["tracks"][side][i][ - "uid"], - "{side} track {index} do not match: {zero} versus {one}". - format( - side=side, - index=i, - zero=view0["tracks"][side][i]["uid"], - one=view1["tracks"][side][i]["uid"])) - - -def assert_expected_viewconf_dimensions(viewconf, expected_dimensions): - """ Given a viewconf and a list of expected dimensions, assert_true(each view has the correct dimensions in each.) - - Args: - viewconf(obj): A nested dictionary containing a Higlass viewconfig. - expected_dimensions(list): A list of dictionaries, in the order of Higlass Viewconfig views. - Each dictionary should have the "x", "w", "w", "h" keys to represent the position and size of each view. - - Returns: - Nothing - - Raises: - AssertionError if a dimension is the wrong size. - """ - - # The correct number of views exist. - assert_true( - len(viewconf["views"]) == len(expected_dimensions), - "Expected {num_expected} views, but there were {num_actual} views instead.". - format( - num_expected=len(expected_dimensions), - num_actual=len(viewconf["views"]), )) - - for index, expected_layout in enumerate(expected_dimensions): - layout = viewconf["views"][index]["layout"] - - # Make sure each dimension matches. - for dimension in ("x", "y", "w", "h"): - assert_true( - layout[dimension] == expected_layout[dimension], - "While looking at {num_expected} expected dimensions, index: {index}, dimension: {dim} mismatched.". - format( - num_expected=len(expected_dimensions), - index=index, - dim=dimension)) - - -def test_add_multiple_mcool_one_at_a_time(testapp, higlass_mcool_viewconf, - mcool_file_json): - """ Make sure you can add multiple mcool displays together, up to six. - Eventually we'll see a 3 x 2 grid. - - Args: - testapp(obj): This object can make RESTful API calls to the test server. - higlass_mcool_viewconf(obj): Higlass view configuration for an mcool file. - mcool_file_json(dict): Fixture refers to an mcool file. - - Returns: - Nothing - - Raises: - AssertionError if the test fails. - """ - - # Post an mcool file and retrieve its uuid. Add a higlass_uid. - mcool_file_json['higlass_uid'] = "LTiacew8TjCOaP9gpDZwZw" - mcool_file_json['genome_assembly'] = "GRCm38" - mcool_file = testapp.post_json('/file_processed', - mcool_file_json).json['@graph'][0] - - # Get the json for a viewconfig with a mcool file. - higlass_conf_uuid = "00000000-1111-0000-1111-000000000002" - response = testapp.get( - "/higlass-view-configs/{higlass_conf_uuid}/?format=json".format( - higlass_conf_uuid=higlass_conf_uuid)) - higlass_json = response.json - - # Add another mcool file. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': higlass_json["viewconfig"], - 'genome_assembly': higlass_json["genome_assembly"], - 'files': ["{uuid}".format(uuid=mcool_file['uuid'])] - }) - - new_higlass_json = response.json["new_viewconfig"] - assert_true(response.json["success"] == True) - - # Add the third mcool file. It should be to the right of the first. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': new_higlass_json, - 'genome_assembly': higlass_json["genome_assembly"], - 'files': ["{uuid}".format(uuid=mcool_file['uuid'])] - }) - - new_higlass_json = response.json["new_viewconfig"] - assert_true(response.json["success"] == True) - assert_true(len(new_higlass_json["views"]) == 3) - expected_dimensions = ({ - "x": 0, - "y": 0, - "w": 6, - "h": 6 - }, { - "x": 6, - "y": 0, - "w": 6, - "h": 6 - }, { - "x": 0, - "y": 6, - "w": 6, - "h": 6 - }, ) - assert_expected_viewconf_dimensions(new_higlass_json, expected_dimensions) - - # Add the fourth mcool file. It should be underneath the second. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': new_higlass_json, - 'genome_assembly': higlass_json["genome_assembly"], - 'files': ["{uuid}".format(uuid=mcool_file['uuid'])] - }) - - new_higlass_json = response.json["new_viewconfig"] - assert_true(response.json["success"] == True) - assert_true(len(new_higlass_json["views"]) == 4) - expected_dimensions = ({ - "x": 0, - "y": 0, - "w": 6, - "h": 6 - }, { - "x": 6, - "y": 0, - "w": 6, - "h": 6 - }, { - "x": 0, - "y": 6, - "w": 6, - "h": 6 - }, { - "x": 6, - "y": 6, - "w": 6, - "h": 6 - }, ) - assert_expected_viewconf_dimensions(new_higlass_json, expected_dimensions) - - # Add the fifth mcool file. It should be to the right of the fourth. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': new_higlass_json, - 'genome_assembly': higlass_json["genome_assembly"], - 'files': ["{uuid}".format(uuid=mcool_file['uuid'])] - }) - - new_higlass_json = response.json["new_viewconfig"] - assert_true(response.json["success"] == True) - assert_true(len(new_higlass_json["views"]) == 5) - expected_dimensions = ({ - "x": 0, - "y": 0, - "w": 4, - "h": 6 - }, { - "x": 4, - "y": 0, - "w": 4, - "h": 6 - }, { - "x": 8, - "y": 0, - "w": 4, - "h": 6 - }, { - "x": 0, - "y": 6, - "w": 4, - "h": 6 - }, { - "x": 4, - "y": 6, - "w": 4, - "h": 6 - }, ) - assert_expected_viewconf_dimensions(new_higlass_json, expected_dimensions) - - # Add the sixth mcool file. It should be underneath the fifth. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': new_higlass_json, - 'genome_assembly': higlass_json["genome_assembly"], - 'files': ["{uuid}".format(uuid=mcool_file['uuid'])] - }) - - new_higlass_json = response.json["new_viewconfig"] - assert_true(response.json["success"] == True) - assert_true(len(new_higlass_json["views"]) == 6) - expected_dimensions = ({ - "x": 0, - "y": 0, - "w": 4, - "h": 6 - }, { - "x": 4, - "y": 0, - "w": 4, - "h": 6 - }, { - "x": 8, - "y": 0, - "w": 4, - "h": 6 - }, { - "x": 0, - "y": 6, - "w": 4, - "h": 6 - }, { - "x": 4, - "y": 6, - "w": 4, - "h": 6 - }, { - "x": 8, - "y": 6, - "w": 4, - "h": 6 - }, ) - assert_expected_viewconf_dimensions(new_higlass_json, expected_dimensions) - - # Try to add a seventh mcool file. It should fail because there are six already. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': new_higlass_json, - 'files': ["{uuid}".format(uuid=mcool_file['uuid'])] - }) - assert_true(response.json["success"] == False) - assert_true("You cannot have more than 6 views in a single display." in - response.json["errors"]) - - -def test_add_multiple_mcool_at_once(testapp, higlass_mcool_viewconf, - mcool_file_json): - """ Make sure you can add multiple mcool displays together, up to six. - Eventually we'll see a 3 x 2 grid. - - Args: - testapp(obj): This object can make RESTful API calls to the test server. - higlass_mcool_viewconf(obj): Higlass view configuration for an mcool file. - mcool_file_json(dict): Fixture refers to an mcool file. - - Returns: - Nothing - - Raises: - AssertionError if the test fails. - """ - - # Post an mcool file and retrieve its uuid. Add a higlass_uid. - mcool_file_json['higlass_uid'] = "LTiacew8TjCOaP9gpDZwZw" - mcool_file_json['genome_assembly'] = "GRCm38" - mcool_file = testapp.post_json('/file_processed', - mcool_file_json).json['@graph'][0] - - # Get the json for a viewconfig with a mcool file. - higlass_conf_uuid = "00000000-1111-0000-1111-000000000002" - response = testapp.get( - "/higlass-view-configs/{higlass_conf_uuid}/?format=json".format( - higlass_conf_uuid=higlass_conf_uuid)) - higlass_json = response.json - - # Try to add 2 mcool files. Confirm there are now 3 mcool files. - response = testapp.post_json("/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': - higlass_json["viewconfig"], - 'genome_assembly': - higlass_json["genome_assembly"], - 'files': [mcool_file['uuid'], mcool_file['uuid']] - }) - - new_higlass_json = response.json["new_viewconfig"] - assert_true(response.json["success"] == True) - assert_true(len(new_higlass_json["views"]) == 3) - - # Try to add 4 files. This should fail because you tried to have more than 6 views. - response = testapp.post_json("/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': - new_higlass_json, - 'genome_assembly': - higlass_json["genome_assembly"], - 'files': [ - mcool_file['uuid'], mcool_file['uuid'], mcool_file['uuid'], - mcool_file['uuid'] - ] - }) - assert_true(response.json["success"] == False) - assert_true("You cannot have more than 6 views in a single display." in - response.json["errors"]) - - -def test_add_bedGraph_to_multiple_mcool( - testapp, mcool_file_json, higlass_mcool_viewconf, bedGraph_file_json): - """ With at least 2 mcool displays, try to add a bedGraph. - The bedGraph should be atop the mcool displays. - - Args: - testapp(obj): This object can make RESTful API calls to the test server. - mcool_file_json(dict): Fixture refers to an mcool file. - higlass_mcool_viewconf(obj): Higlass view configuration for an mcool file. - bedGraph_file_json(dict): Fixture refers to a bedgraph file. - - Returns: - Nothing - - Raises: - AssertionError if the test fails. - """ - - # Post an mcool file and retrieve its uuid. Add a higlass_uid. - mcool_file_json['higlass_uid'] = "LTiacew8TjCOaP9gpDZwZw" - mcool_file_json['genome_assembly'] = "GRCm38" - mcool_file = testapp.post_json('/file_processed', - mcool_file_json).json['@graph'][0] - - # Add the bedGraph file with a higlass uid and a genome asssembly. - bedGraph_file_json['higlass_uid'] = "Y08H_toDQ-OxidYJAzFPXA" - bedGraph_file_json['genome_assembly'] = "GRCm38" - bedGraph_file_json['md5sum'] = '00000000000000000000000000000001' - bg_file = testapp.post_json('/file_processed', - bedGraph_file_json).json['@graph'][0] - - # Get the json for a viewconfig with a mcool file. - higlass_conf_uuid = "00000000-1111-0000-1111-000000000002" - response = testapp.get( - "/higlass-view-configs/{higlass_conf_uuid}/?format=json".format( - higlass_conf_uuid=higlass_conf_uuid)) - higlass_json = response.json - - # Add another mcool file. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': higlass_json["viewconfig"], - 'genome_assembly': higlass_json["genome_assembly"], - 'files': ["{uuid}".format(uuid=mcool_file['uuid'])] - }) - - new_higlass_json = response.json["new_viewconfig"] - assert_true(response.json["success"] == True) - assert_true(len(new_higlass_json["views"]) == 2) - - old_top_track_count = {} - for index, view in enumerate(new_higlass_json["views"]): - old_top_track_count[index] = len(view["tracks"]["top"]) - - # Add a bedGraph file. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': new_higlass_json, - 'genome_assembly': higlass_json["genome_assembly"], - 'files': ["{uuid}".format(uuid=bg_file['uuid'])] - }) - - # The bedGraph file should be above the mcool displays. - new_higlass_json = response.json["new_viewconfig"] - assert_true(response.json["success"] == True) - assert_true(len(new_higlass_json["views"]) == 2) - - top_track_count = {} - for index, view in enumerate(new_higlass_json["views"]): - top_track_count[index] = len(view["tracks"]["top"]) - - # It should be on top of every view, and it did not create a new view. - assert_true(len(top_track_count.keys()) == len(old_top_track_count.keys())) - for index in range(len(top_track_count.keys())): - assert_true(top_track_count[index] - old_top_track_count[index] == 1) - - -def test_add_new_mcool_file(testapp, mcool_file_json, higlass_mcool_viewconf, - bedGraph_file_json): - """ Create one view with a mcool and bedgraph file. Add another mcool file. - The bedGraph should be atop the mcool displays. - - Args: - testapp(obj): This object can make RESTful API calls to the test server. - mcool_file_json(dict): Fixture refers to an mcool file. - higlass_mcool_viewconf(obj): Higlass view configuration for an mcool file. - bedGraph_file_json(dict): Fixture refers to a bedgraph file. - - Returns: - Nothing - - Raises: - AssertionError if the test fails. - """ - - # Post an mcool file and retrieve its uuid. Add a higlass_uid. - mcool_file_json['higlass_uid'] = "LTiacew8TjCOaP9gpDZwZw" - mcool_file_json['genome_assembly'] = "GRCm38" - mcool_file = testapp.post_json('/file_processed', - mcool_file_json).json['@graph'][0] - - # Add the bedGraph file with a higlass uid and a genome asssembly. - bedGraph_file_json['higlass_uid'] = "Y08H_toDQ-OxidYJAzFPXA" - bedGraph_file_json['genome_assembly'] = "GRCm38" - bedGraph_file_json['md5sum'] = '00000000000000000000000000000001' - bg_file = testapp.post_json('/file_processed', - bedGraph_file_json).json['@graph'][0] - - # Get the json for a viewconfig with a mcool file. - higlass_conf_uuid = "00000000-1111-0000-1111-000000000002" - response = testapp.get( - "/higlass-view-configs/{higlass_conf_uuid}/?format=json".format( - higlass_conf_uuid=higlass_conf_uuid)) - higlass_json = response.json - - old_top_track_count = len( - higlass_json["viewconfig"]["views"][0]["tracks"]["top"]) - - # Add a bedGraph file. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': higlass_json["viewconfig"], - 'genome_assembly': higlass_json["genome_assembly"], - 'files': ["{uuid}".format(uuid=bg_file['uuid'])] - }) - - new_higlass_json = response.json["new_viewconfig"] - assert_true(response.json["success"] == True, "911") - assert_true(len(new_higlass_json["views"]) == 1, "912") - assert_true( - len(new_higlass_json["views"][0]["tracks"]["top"]) == - old_top_track_count + 1, "913") - assert_true( - len(new_higlass_json["views"][0]["tracks"]["center"][0]["contents"]) == - 1, "914") - - # Add another mcool file. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': new_higlass_json, - 'genome_assembly': higlass_json["genome_assembly"], - 'files': ["{uuid}".format(uuid=mcool_file['uuid'])] - }) - - # The bedGraph file should be above both views. - new_higlass_json = response.json["new_viewconfig"] - assert_true(response.json["success"] == True, "925") - assert_true(len(new_higlass_json["views"]) == 2, "926") - - top_track_count = {} - for index, view in enumerate(new_higlass_json["views"]): - top_track_count[index] = len(view["tracks"]["top"]) - - # It should be on top of every view, and it did not create a new view. - for index in range(len(top_track_count.keys())): - assert_true( - top_track_count[index] == 3, - "Expected 3 tracks on top for view {view}, found {actual} instead.". - format(view=index, actual=top_track_count[index])) - - -def test_bogus_fileuuid(testapp, higlass_mcool_viewconf): - """ Function should fail gracefully if there is no file with the given uuid. - - Args: - testapp(obj): This object can make RESTful API calls to the test server. - higlass_mcool_viewconf(obj): Higlass view configuration for an mcool file. - - Returns: - Nothing - - Raises: - AssertionError if the test fails. - """ - - # Get the json for a viewconfig with a mcool file. - higlass_conf_uuid = "00000000-1111-0000-1111-000000000002" - response = testapp.get( - "/higlass-view-configs/{higlass_conf_uuid}/?format=json".format( - higlass_conf_uuid=higlass_conf_uuid)) - higlass_json = response.json - - # Add a nonexistent file. - response = testapp.post_json("/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': - higlass_json["viewconfig"], - 'genome_assembly': - higlass_json["genome_assembly"], - 'files': ["Bogus"] - }) - - # Expect failure. - assert_true(response.json["success"] == False) - assert_true("does not exist" in response.json["errors"]) - - -def test_add_files_by_accession(testapp, mcool_file_json, - higlass_blank_viewconf, bedGraph_file_json): - """ Add files by the accession instead of the uuid. - - Args: - testapp(obj): This object can make RESTful API calls to the test server. - mcool_file_json(dict): Fixture refers to an mcool file. - higlass_blank_viewconf(obj): Empty Higlass view configuration with no file or genome assembly. - bedGraph_file_json(dict): Fixture refers to a bedgraph file. - - Returns: - Nothing - - Raises: - AssertionError if the test fails. - """ - # Add an mcool file. Add a higlass_uid. - mcool_file_json['higlass_uid'] = "LTiacew8TjCOaP9gpDZwZw" - mcool_file_json['genome_assembly'] = "GRCm38" - mcool_file = testapp.post_json('/file_processed', - mcool_file_json).json['@graph'][0] - assert_true(mcool_file["accession"]) - - # Add a bg file. - bedGraph_file_json['higlass_uid'] = "Y08H_toDQ-OxidYJAzFPXA" - bedGraph_file_json['genome_assembly'] = "GRCm38" - bedGraph_file_json['md5sum'] = '00000000000000000000000000000001' - bg_file = testapp.post_json('/file_processed', - bedGraph_file_json).json['@graph'][0] - assert_true(bg_file["accession"]) - - # Get the Higlass Viewconf that will be edited. - higlass_conf_uuid = "00000000-1111-0000-1111-000000000000" - response = testapp.get( - "/higlass-view-configs/{higlass_conf_uuid}/?format=json".format( - higlass_conf_uuid=higlass_conf_uuid)) - higlass_json = response.json - - # Try to add the files to the viewconf by passing in the acession. - response = testapp.post_json("/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': - higlass_json["viewconfig"], - 'genome_assembly': - higlass_json["genome_assembly"], - 'files': [mcool_file["accession"], bg_file["accession"]] - }) - - assert_true(response.json["success"] == True) - - # Get the new json. - new_higlass_json = response.json["new_viewconfig"] - - # There should be 1 view. - assert_true(len(new_higlass_json["views"]) == 1) - - old_tracks = higlass_json["viewconfig"]["views"][0]["tracks"] - tracks = new_higlass_json["views"][0]["tracks"] - - # 1 central track should be in the new view. - assert_true(len(tracks["center"][0]["contents"]) == 1) - assert_true(tracks["center"][0]["contents"][0]["type"] == "heatmap") - - # 1 more track should be on top. - assert_true(len(tracks["top"]) == len(old_tracks["top"]) + 1) - - -def test_add_bedGraph_to_mcool(testapp, higlass_mcool_viewconf, - bedGraph_file_json): - """ Given a viewconf with a mcool file, the viewconf should add anohter mcool on the side. - - Args: - testapp(obj): This object can make RESTful API calls to the test server. - higlass_mcool_viewconf(obj): Higlass view configuration for an mcool file. - bedGraph_file_json(dict): Fixture refers to a bedgraph file. - - Returns: - Nothing - - Raises: - AssertionError if the test fails. - """ - - # Add the bedGraph file with a higlass uid and a genome asssembly. - bedGraph_file_json['higlass_uid'] = "Y08H_toDQ-OxidYJAzFPXA" - bedGraph_file_json['genome_assembly'] = "GRCm38" - bg_file = testapp.post_json('/file_processed', - bedGraph_file_json).json['@graph'][0] - - # Add the bedGraph file with a different genome asssembly. - bedGraph_file_json['higlass_uid'] = "Y08H_toDQ-OxidYJAzFPXA" - bedGraph_file_json['genome_assembly'] = "GRCh38" - bedGraph_file_json['md5sum'] = '00000000000000000000000000000001' - bg_file_with_different_genome_assembly = testapp.post_json( - '/file_processed', bedGraph_file_json).json['@graph'][0] - - # Get the json for a viewconfig with a mcool file. - higlass_conf_uuid = "00000000-1111-0000-1111-000000000002" - response = testapp.get( - "/higlass-view-configs/{higlass_conf_uuid}/?format=json".format( - higlass_conf_uuid=higlass_conf_uuid)) - higlass_json = response.json - - # Try to add the bedGraph with a different genome assembly, it should fail. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': - higlass_json["viewconfig"], - 'genome_assembly': - higlass_json["genome_assembly"], - 'files': [ - "{uuid}".format( - uuid=bg_file_with_different_genome_assembly['uuid']) - ] - }) - - assert_true(response.json["success"] == False) - assert_true("genome assembly" in response.json["errors"]) - assert_true(bg_file_with_different_genome_assembly['uuid'] in - response.json["errors"]) - - # Try to add an mcool with the same genome assembly. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': higlass_json["viewconfig"], - 'genome_assembly': higlass_json["genome_assembly"], - 'files': ["{uuid}".format(uuid=bg_file['uuid'])] - }) - - assert_true(response.json["success"] == True) - - # Make sure the mcool displays are next to each other. - new_higlass_json = response.json["new_viewconfig"] - - assert_true(len(new_higlass_json["views"]) == 1) - - -def test_add_bigwig_higlass(testapp, higlass_mcool_viewconf, bigwig_file_json): - """ Given a viewconf with an mcool file, the viewconf should add a bigwig on top. - - Args: - testapp(obj): This object can make RESTful API calls to the test server. - higlass_mcool_viewconf(obj): Higlass view configuration for an mcool file. - bigwig_file_json(dict): Fixture refers to a bigwig file. - - Returns: - Nothing - - Raises: - AssertionError if the test fails. - """ - - # Get a bigwig file to add. - bigwig_file_json['higlass_uid'] = "Y08H_toDQ-OxidYJAzFPXA" - bigwig_file_json['md5sum'] = '00000000000000000000000000000001' - bigwig_file_json['genome_assembly'] = "GRCm38" - bigwig_file = testapp.post_json('/file_processed', - bigwig_file_json).json['@graph'][0] - - # Get the Higlass Viewconf that will be edited. - higlass_conf_uuid = "00000000-1111-0000-1111-000000000002" - response = testapp.get( - "/higlass-view-configs/{higlass_conf_uuid}/?format=json".format( - higlass_conf_uuid=higlass_conf_uuid)) - higlass_json = response.json - - # Try to add the bedGraph to the existing viewconf. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': higlass_json["viewconfig"], - 'genome_assembly': higlass_json["genome_assembly"], - 'files': ["{uuid}".format(uuid=bigwig_file['uuid'])] - }) - - # Get the new json. - new_higlass_json = response.json["new_viewconfig"] - - # Make sure the bedGraph has been added above the mcool file. - assert_true(len(new_higlass_json["views"]) == 1) - - tracks = new_higlass_json["views"][0]["tracks"] - old_tracks = higlass_json["viewconfig"]["views"][0]["tracks"] - - # Assert_true(there is still 1 central view) - assert_true(len(tracks["center"][0]["contents"]) == 1) - assert_true(tracks["center"][0]["contents"][0]["type"] == "heatmap") - - # Only one new top track should have appeared. - assert_true(len(tracks["left"]) == len(old_tracks["left"])) - assert_true(len(tracks["top"]) == len(old_tracks["top"]) + 1) - - -def test_add_bigbed_higlass(testapp, higlass_mcool_viewconf, bigbed_file_json): - """ Given a viewconf with an mcool file, the viewconf should add a bigbed on top. - - Args: - testapp(obj): This object can make RESTful API calls to the test server. - higlass_mcool_viewconf(obj): Higlass view configuration for an mcool file. - bigbed_file_json(dict): Fixture refers to a bigbed file. - - Returns: - Nothing - - Raises: - AssertionError if the test fails. - """ - - # Get a bigbed file to add. - bigbed_file_json['higlass_uid'] = "FTv3kHMmSlm0YTmtdOYAPA" - bigbed_file_json['md5sum'] = '00000000000000000000000000000001' - bigbed_file_json['genome_assembly'] = "GRCm38" - bigbed_file = testapp.post_json('/file_processed', - bigbed_file_json).json['@graph'][0] - - # Get the Higlass Viewconf that will be edited. - higlass_conf_uuid = "00000000-1111-0000-1111-000000000002" - response = testapp.get( - "/higlass-view-configs/{higlass_conf_uuid}/?format=json".format( - higlass_conf_uuid=higlass_conf_uuid)) - higlass_json = response.json - - # Try to add the bigbed to the existing viewconf. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': higlass_json["viewconfig"], - 'genome_assembly': higlass_json["genome_assembly"], - 'files': ["{uuid}".format(uuid=bigbed_file['uuid'])] - }) - - # Get the new json. - new_higlass_json = response.json["new_viewconfig"] - - # Make sure the bigbed has been added above the mcool file. - assert_true(len(new_higlass_json["views"]) == 1) - - tracks = new_higlass_json["views"][0]["tracks"] - old_tracks = higlass_json["viewconfig"]["views"][0]["tracks"] - - # Assert_true(there is still 1 central view) - assert_true(len(tracks["center"][0]["contents"]) == 1) - assert_true(tracks["center"][0]["contents"][0]["type"] == "heatmap") - - # Make sure the view has an initialXDomain and initialYDomain. - assert_true(len(new_higlass_json["views"][0]["initialXDomain"]) == 2) - assert_true(new_higlass_json["views"][0]["initialXDomain"][0] != None) - assert_true(new_higlass_json["views"][0]["initialXDomain"][1] != None) - - assert_true(len(new_higlass_json["views"][0]["initialYDomain"]) == 2) - assert_true(new_higlass_json["views"][0]["initialYDomain"][0] != None) - assert_true(new_higlass_json["views"][0]["initialYDomain"][1] != None) - - # Only one new top track should have appeared. - assert_true(len(tracks["left"]) == len(old_tracks["left"])) - assert_true(len(tracks["top"]) == len(old_tracks["top"]) + 1) - - # Get the top track and check the format. - found_annotation_track = False - found_chromosome_track = False - found_data_track = False - - for track in tracks["top"]: - if not found_annotation_track and "annotation-track" in track["uid"]: - found_annotation_track = True - if not found_chromosome_track and "chromosome-track" in track["uid"]: - found_chromosome_track = True - if not found_data_track and "tilesetUid" in track and track["tilesetUid"] == bigbed_file_json['higlass_uid']: - found_data_track = True - - assert_true(track["type"] == "horizontal-vector-heatmap") - - assert_true(track["height"] < 20) - - assert_true("options" in track) - options = track["options"] - - assert_true("colorRange" in options) - assert_true(len(options["colorRange"]) == 256) - - assert_true("labelPosition" in options) - assert_true(options["labelPosition"] == "topLeft") - - assert_true(found_annotation_track == True) - assert_true(found_chromosome_track == True) - assert_true(found_data_track == True) - - -def test_add_bed_with_beddb(testapp, higlass_mcool_viewconf, - bed_beddb_file_json): - """ Add a bed file (with a beddb used as a supporting file) to the HiGlass file. - - Args: - testapp(obj): This object can make RESTful API calls to the test server. - higlass_mcool_viewconf(obj): Higlass view configuration for an mcool file. - bed_beddb_file_json(dict): Fixture refers to a bed file with a supporting beddb file. - - Returns: - Nothing - - Raises: - AssertionError if the test fails. - """ - - # Get a file to add. - bed_beddb_file_json['higlass_uid'] = "Y08H_toDQ-OxidYJAzFPXA" - bed_beddb_file_json['md5sum'] = '00000000000000000000000000000001' - bed_beddb_file_json['genome_assembly'] = "GRCm38" - bed_file = testapp.post_json('/file_processed', - bed_beddb_file_json).json['@graph'][0] - - # Get the Higlass Viewconf that will be edited. - higlass_conf_uuid = "00000000-1111-0000-1111-000000000002" - response = testapp.get( - "/higlass-view-configs/{higlass_conf_uuid}/?format=json".format( - higlass_conf_uuid=higlass_conf_uuid)) - higlass_json = response.json - - # Try to add the file to the existing viewconf. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': higlass_json["viewconfig"], - 'genome_assembly': higlass_json["genome_assembly"], - 'files': ["{uuid}".format(uuid=bed_file['uuid'])] - }) - - # Get the new json. - new_higlass_json = response.json["new_viewconfig"] - - # There should be 1 view - assert_true(response.json["errors"] == '') - assert_true(response.json["success"]) - - assert_true(len(new_higlass_json["views"]) == 1) - - # The view should have a new top track - tracks = new_higlass_json["views"][0]["tracks"] - old_tracks = higlass_json["viewconfig"]["views"][0]["tracks"] - - assert_true(len(tracks["left"]) == len(old_tracks["left"])) - assert_true(len(tracks["top"]) == len(old_tracks["top"]) + 1) - - # Central track is unchanged - assert_true(len(tracks["center"][0]["contents"]) == 1) - assert_true(tracks["center"][0]["contents"][0]["type"] == "heatmap") - - # The top track should be a bed-like track - found_data_track = False - for track in tracks["top"]: - if "tilesetUid" in track and track["tilesetUid"] == bed_beddb_file_json['higlass_uid']: - found_data_track = True - assert_true(track["type"] == "bedlike") - - assert_true(found_data_track == True) - - -def test_add_beddb(testapp, higlass_mcool_viewconf, beddb_file_json): - """ Add a beddb file to the HiGlass file. - - Args: - testapp(obj): This object can make RESTful API calls to the test server. - higlass_blank_viewconf(obj): Empty Higlass view configuration with no file or genome assembly. - beddb_file_json(dict): Fixture refers to a beddb file. - - Returns: - Nothing - - Raises: - AssertionError if the test fails. - """ - - # Add the beddb file. - genome_assembly = "GRCm38" - beddb_file_json['higlass_uid'] = "Y08H_toDQ-OxidYJAzFPXA" - beddb_file_json['md5sum'] = '00000000000000000000000000000001' - beddb_file_json['genome_assembly'] = genome_assembly - bed_file = testapp.post_json('/file_processed', - beddb_file_json).json['@graph'][0] - - # Get the Higlass Viewconf that will be edited. - higlass_conf_uuid = "00000000-1111-0000-1111-000000000002" - response = testapp.get( - "/higlass-view-configs/{higlass_conf_uuid}/?format=json".format( - higlass_conf_uuid=higlass_conf_uuid)) - higlass_json = response.json - - # Try to add the file to the existing viewconf. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': higlass_json["viewconfig"], - 'genome_assembly': higlass_json["genome_assembly"], - 'files': ["{uuid}".format(uuid=bed_file['uuid'])] - }) - - # Get the new json. - new_higlass_json = response.json["new_viewconfig"] - - # There should be 1 view - assert_true(response.json["errors"] == '') - assert_true(response.json["success"]) - - assert_true(len(new_higlass_json["views"]) == 1) - - # The view should have a new top track and a new left track - tracks = new_higlass_json["views"][0]["tracks"] - old_tracks = higlass_json["viewconfig"]["views"][0]["tracks"] - - assert_true(len(tracks["left"]) == len(old_tracks["left"]) + 1) - assert_true(len(tracks["top"]) == len(old_tracks["top"]) + 1) - - # Central track is unchanged - assert_true(len(tracks["center"][0]["contents"]) == 1) - - # The top track should contain a bed-like track in the first spot - track = tracks["top"][0] - assert_true(track["tilesetUid"] == beddb_file_json['higlass_uid']) - assert_true(track["height"] == 55) - assert_true(track["options"]["geneAnnotationHeight"] == 12) - assert_true(track["type"] == "horizontal-gene-annotations") - - # The left track should contain a bed-like track in the first spot - left_track = tracks["left"][0] - assert_true(left_track["tilesetUid"] == beddb_file_json['higlass_uid']) - assert_true(left_track["width"] == 55) - assert_true(left_track["options"]["geneAnnotationHeight"] == 12) - assert_true(left_track["type"] == "vertical-gene-annotations") - - # uids should be different - assert_true(left_track["uid"] != track["uid"]) - - # The searchbar needs to be updated, too - main_view = new_higlass_json["views"][0] - assert_true("genomePositionSearchBox" in main_view) - assert_true("chromInfoId" in main_view["genomePositionSearchBox"]) - assert_true( - main_view["genomePositionSearchBox"]["chromInfoId"] == genome_assembly) - assert_true("autocompleteId" in main_view["genomePositionSearchBox"]) - assert_true(main_view["genomePositionSearchBox"]["autocompleteId"] == - beddb_file_json['higlass_uid']) - assert_true("visible" in main_view["genomePositionSearchBox"]) - assert_true(main_view["genomePositionSearchBox"]["visible"] == True) - - assert_true("autocompleteSource" in main_view) - assert_true( - beddb_file_json['higlass_uid'] in main_view["autocompleteSource"]) - - -def test_add_chromsizes(testapp, higlass_blank_viewconf, chromsizes_file_json): - """ Add a chromsizes file and add a top, left and center tracks to the view. - - Args: - testapp(obj): This object can make RESTful API calls to the test server. - higlass_blank_viewconf(obj): Empty Higlass view configuration with no file or genome assembly. - chromsizes_file_json(dict): Fixture refers to a chromsizes file. - - Returns: - Nothing - - Raises: - AssertionError if the test fails. - """ - # Get a file to add. - genome_assembly = "GRCm38" - chromsizes_file_json['higlass_uid'] = "Y08H_toDQ-OxidYJAzFPXA" - chromsizes_file_json['md5sum'] = '00000000000000000000000000000001' - chromsizes_file_json['genome_assembly'] = genome_assembly - chrom_file = testapp.post_json('/file_reference', - chromsizes_file_json).json['@graph'][0] - - # Get the Higlass Viewconf that will be edited. - higlass_conf_uuid = "00000000-1111-0000-1111-000000000000" - response = testapp.get( - "/higlass-view-configs/{higlass_conf_uuid}/?format=json".format( - higlass_conf_uuid=higlass_conf_uuid)) - higlass_json = response.json - - # Try to add the file to the existing viewconf. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'genome_assembly': higlass_json["genome_assembly"], - 'files': ["{uuid}".format(uuid=chrom_file['uuid'])] - }) - - # Get the new json. - new_higlass_json = response.json["new_viewconfig"] - - # There should be 1 view - assert_true(response.json["errors"] == '') - assert_true(response.json["success"]) - - assert_true(len(new_higlass_json["views"]) == 1) - - # The view should have a new top track - tracks = new_higlass_json["views"][0]["tracks"] - old_tracks = higlass_json["viewconfig"]["views"][0]["tracks"] - - assert_true(len(tracks["top"]) == len(old_tracks["top"]) + 1) - - # There is no left track or central content, so don't add the track to the left. - assert_true(len(tracks["left"]) == len(old_tracks["left"])) - - # There are no other 2D views, so we should not have a center track. - assert_true(len(tracks["center"][0]["contents"]) == 0) - - # The top track should have chromosome labels - found_top_data_track = False - for track in tracks["top"]: - if "tilesetUid" in track and track["tilesetUid"] == chromsizes_file_json['higlass_uid']: - found_top_data_track = True - assert_true(track["type"] == "horizontal-chromosome-labels") - - assert_true(found_top_data_track == True) - - -def test_add_2d_chromsizes(testapp, higlass_blank_viewconf, - chromsizes_file_json, mcool_file_json): - """ Add a chromsizes file and add a top, left and center tracks to the view. - - Args: - testapp(obj): This object can make RESTful API calls to the test server. - higlass_blank_viewconf(obj): Empty Higlass view configuration with no file or genome assembly. - chromsizes_file_json(dict): Fixture refers to a chromsizes file. - mcool_file_json(dict): Fixture refers to an mcool file. - - Returns: - Nothing - - Raises: - AssertionError if the test fails. - """ - # Get a file to add. - genome_assembly = "GRCm38" - chromsizes_file_json['higlass_uid'] = "Y08H_toDQ-OxidYJAzFPXA" - chromsizes_file_json['md5sum'] = '00000000000000000000000000000001' - chromsizes_file_json['genome_assembly'] = genome_assembly - chrom_file = testapp.post_json('/file_reference', - chromsizes_file_json).json['@graph'][0] - - # Get the Higlass Viewconf that will be edited. - higlass_conf_uuid = "00000000-1111-0000-1111-000000000000" - response = testapp.get( - "/higlass-view-configs/{higlass_conf_uuid}/?format=json".format( - higlass_conf_uuid=higlass_conf_uuid)) - higlass_json = response.json - - # Post an mcool file and retrieve its uuid. Add a higlass_uid. - mcool_file_json['higlass_uid'] = "LTiacew8TjCOaP9gpDZwZw" - mcool_file_json['genome_assembly'] = "GRCm38" - mcool_file = testapp.post_json('/file_processed', - mcool_file_json).json['@graph'][0] - - # Try to add the file to the existing viewconf. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'genome_assembly': - higlass_json["genome_assembly"], - 'files': [ - "{uuid}".format(uuid=chrom_file['uuid']), - "{uuid}".format(uuid=mcool_file['uuid']), - ] - }) - - # Get the new json. - new_higlass_json = response.json["new_viewconfig"] - - # There should be 1 view - assert_true(response.json["errors"] == '') - assert_true(response.json["success"]) - - assert_true(len(new_higlass_json["views"]) == 1) - - # The view should have a new top track and a new left track - tracks = new_higlass_json["views"][0]["tracks"] - old_tracks = higlass_json["viewconfig"]["views"][0]["tracks"] - - assert_true(len(tracks["left"]) == len(old_tracks["left"]) + 1) - assert_true(len(tracks["top"]) == len(old_tracks["top"]) + 1) - - # The top and left tracks should have chromosome labels as the last track. - for side in ("top", "left"): - assert_true("tilesetUid" in tracks[side][-1]) - assert_true(tracks[side][-1]["tilesetUid"] == chromsizes_file_json[ - 'higlass_uid']) - - type_by_side = { - "top": "horizontal-chromosome-labels", - "left": "vertical-chromosome-labels", - } - assert_true(tracks[side][-1]["type"] == type_by_side[side]) - - # The view should also have a new center track with 2 views inside - assert_true(len(tracks["center"][0]["contents"]) == 2) - - # The central contents should have a chromosome grid and the mcool file. - found_central_data_track = False - - for track in tracks["center"][0]["contents"]: - found_central_data_track = True - if "tilesetUid" in track and track["tilesetUid"] == chromsizes_file_json['higlass_uid']: - assert_true(track["type"] == "2d-chromosome-grid") - if "tilesetUid" in track and track["tilesetUid"] == mcool_file_json['higlass_uid']: - assert_true(track["type"] == "heatmap") - assert_true(found_central_data_track == True) - - # Add another 2D view to this existing view - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': new_higlass_json, - 'genome_assembly': higlass_json["genome_assembly"], - 'files': [ - "{uuid}".format(uuid=mcool_file['uuid']), - ] - }) - - two_view_higlass_json = response.json["new_viewconfig"] - assert_true(response.json["errors"] == '') - assert_true(response.json["success"]) - - # Assert there are 2 views - assert_true(len(two_view_higlass_json["views"]) == 2) - - # There is only 1 grid in each view and it's the last item (draw order is important) - for view in two_view_higlass_json["views"]: - center_track_contents = view["tracks"]["center"][0]["contents"] - assert_true(center_track_contents[0]["type"] != "2d-chromosome-grid") - assert_true(center_track_contents[1]["type"] == "2d-chromosome-grid") - - -def test_remove_1d(testapp, higlass_mcool_viewconf, chromsizes_file_json, - bigwig_file_json, mcool_file_json): - genome_assembly = "GRCm38" - - # Save the mcool file and add a higlass_uid. - mcool_file_json['higlass_uid'] = "LTiacew8TjCOaP9gpDZwZw" - mcool_file_json['genome_assembly'] = genome_assembly - mcool_file = testapp.post_json('/file_processed', - mcool_file_json).json['@graph'][0] - - # Add the chromsizes file. - chromsizes_file_json['higlass_uid'] = "Y08H_toDQ-OxidYJAzFPXA" - chromsizes_file_json['md5sum'] = '00000000000000000000000000000001' - chromsizes_file_json['genome_assembly'] = genome_assembly - chrom_file = testapp.post_json('/file_reference', - chromsizes_file_json).json['@graph'][0] - - # Get a bedGraph file to add. - bigwig_file_json['higlass_uid'] = "Y08H_toDQ-OxidYJAzFPXA" - bigwig_file_json['md5sum'] = '00000000000000000000000000000001' - bigwig_file_json['genome_assembly'] = genome_assembly - bigwig_file = testapp.post_json('/file_processed', - bigwig_file_json).json['@graph'][0] - - # Post the chromsizes file. - higlass_conf_uuid = "00000000-1111-0000-1111-000000000002" - response = testapp.get( - "/higlass-view-configs/{higlass_conf_uuid}/?format=json".format( - higlass_conf_uuid=higlass_conf_uuid)) - higlass_json = response.json - - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': higlass_json["viewconfig"], - 'genome_assembly': genome_assembly, - 'files': [ - "{uuid}".format(uuid=chrom_file['uuid']), - ] - }) - - # Check the left and top sides to make sure there are tracks. - full_higlass_json = response.json["new_viewconfig"] - assert_true(response.json["errors"] == '') - assert_true(response.json["success"]) - assert_true(len(full_higlass_json["views"]) == 1) - - # The chromsizes should have been added to the left and top sides. - assert_true( - len(higlass_json["viewconfig"]["views"][0]["tracks"]["left"]) + - 1 == len(full_higlass_json["views"][0]["tracks"]["left"]), - "left side mismatch") - - assert_true( - len(higlass_json["viewconfig"]["views"][0]["tracks"]["top"]) + - 1 == len(full_higlass_json["views"][0]["tracks"]["top"]), - "top side mismatch") - - assert_true(full_higlass_json["views"][0]["tracks"]["top"][0]["type"], - "horizontal-chromosome-labels") - - # Add a height to the chromosome labels. - for t in full_higlass_json["views"][0]["tracks"]["top"]: - if t["type"] == "horizontal-chromosome-labels": - t['height'] = 50 - - # Remove the mcool from the central contents. - full_higlass_json["views"][0]["tracks"]["center"] = [] - - # Add another 1D file. Tell the view to clean up the view. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': full_higlass_json, - 'genome_assembly': genome_assembly, - 'files': [ - "{uuid}".format(uuid=bigwig_file['uuid']), - ], - 'remove_unneeded_tracks': True, - }) - - all_1d_higlass_json = response.json["new_viewconfig"] - - # Make sure there are no left tracks. - assert_true( - len(all_1d_higlass_json["views"][0]["tracks"]["left"]) == 0, - "Left tracks found") - - top_chromsizes_tracks = [ - t for t in all_1d_higlass_json["views"][0]["tracks"]["top"] - if t["type"] == "horizontal-chromosome-labels" - ] - assert_true(top_chromsizes_tracks[0]["height"] == 50, - "Top chromsize track lost its height") - - # Add a 2D file. Tell the view to clean up the view. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': all_1d_higlass_json, - 'genome_assembly': genome_assembly, - 'files': [ - "{uuid}".format(uuid=mcool_file['uuid']), - ], - 'remove_unneeded_tracks': True, - }) - - # Make sure the left chromsize tracks have been added. - restored_2d_track_higlass_json = response.json["new_viewconfig"] - - found_left_chromsizes = any([ - t for t in restored_2d_track_higlass_json["views"][0]["tracks"]["left"] - if t["type"] == "vertical-chromosome-labels" - ]) - assert_true(found_left_chromsizes, "Could not find left chromsizes track") - - # Make sure the top tracks have horizontal types and the left side have vertical types - types_to_find = { - "horizontal-gene-annotations": 0, - "horizontal-chromosome-labels": 0, - "vertical-gene-annotations": 0, - "vertical-chromosome-labels": 0, - } - - top_track_uids = [] - for track in restored_2d_track_higlass_json["views"][0]["tracks"]["top"]: - if "uid" in track: - top_track_uids.append(track["uid"]) - if track["type"] in types_to_find: - types_to_find[track["type"]] += 1 - - assert_true(types_to_find["horizontal-gene-annotations"] > 0) - assert_true(types_to_find["horizontal-chromosome-labels"] > 0) - - vertical_tracks_found = 0 - for track in restored_2d_track_higlass_json["views"][0]["tracks"]["left"]: - vertical_tracks_found += 1 - if "uid" in track: - assert_true( - track["uid"] not in top_track_uids, - "Top track uid reused for left track: {uid}".format( - uid=track["uid"])) - if track["type"] in types_to_find: - types_to_find[track["type"]] += 1 - - assert_true(types_to_find["vertical-gene-annotations"] > 0) - assert_true(types_to_find["vertical-chromosome-labels"] > 0) - assert_true(vertical_tracks_found > 0) - - # The chromsizes had a height when it was horizontal. Make sure it has a width when vertical. - top_chromsizes_tracks = [ - t for t in restored_2d_track_higlass_json["views"][0]["tracks"]["top"] - if t["type"] == "horizontal-chromosome-labels" - ] - - assert_true(top_chromsizes_tracks[0]["height"] == 50, - "Top chromsize track lost its height") - - left_chromsizes_tracks = [ - t for t in restored_2d_track_higlass_json["views"][0]["tracks"]["left"] - if t["type"] == "vertical-chromosome-labels" - ] - assert_true(left_chromsizes_tracks[0]["width"] == 50, - "Left chromsize track has no width") - assert_true("height" not in left_chromsizes_tracks[0], - "Left chromsize track still has a height") - - -def test_2d_chromsize_always_last_track(testapp, higlass_blank_viewconf, - mcool_file_json, chromsizes_file_json): - """ 2d-chromsize-grid tracks need to be the last center track, or other tracks will occlude them during rendering. - - Args: - testapp(obj): This object can make RESTful API calls to the test server. - higlass_blank_viewconf(obj): Empty Higlass view - mcool_file_json(dict): Fixture refers to an mcool file. - chromsizes_file_json(dict): Fixture refers to a chromsizes file. - - Returns: - Nothing - - Raises: - AssertionError if the test fails. - """ - - genome_assembly = "GRCm38" - - # Post an mcool file and retrieve its uuid. Add a higlass_uid. - mcool_file_json['higlass_uid'] = "LTiacew8TjCOaP9gpDZwZw" - mcool_file_json['genome_assembly'] = genome_assembly - mcool_file = testapp.post_json('/file_processed', - mcool_file_json).json['@graph'][0] - - # Add the chromsizes file. - chromsizes_file_json['higlass_uid'] = "Y08H_toDQ-OxidYJAzFPXA" - chromsizes_file_json['md5sum'] = '00000000000000000000000000000001' - chromsizes_file_json['genome_assembly'] = genome_assembly - chrom_file = testapp.post_json('/file_reference', - chromsizes_file_json).json['@graph'][0] - - # Get the Higlass Viewconf that will be edited. - higlass_conf_uuid = "00000000-1111-0000-1111-000000000000" - response = testapp.get( - "/higlass-view-configs/{higlass_conf_uuid}/?format=json".format( - higlass_conf_uuid=higlass_conf_uuid)) - higlass_json = response.json - - # Add an mcool file, then add the chromsize file. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'genome_assembly': higlass_json["genome_assembly"], - 'files': [ - "{uuid}".format(uuid=mcool_file['uuid']), - ] - }) - - step1a_viewconfig = response.json["new_viewconfig"] - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': step1a_viewconfig, - 'genome_assembly': higlass_json["genome_assembly"], - 'files': [ - "{uuid}".format(uuid=chrom_file['uuid']), - ] - }) - - # Center track should have a mcool heatmat and a chromsize grid, in that order. - assert_true(response.json["errors"] == '') - assert_true(response.json["success"]) - - mcool_first_viewconf = response.json["new_viewconfig"] - assert_true(len(mcool_first_viewconf["views"]) == 1) - assert_true(len(mcool_first_viewconf["views"][0]['tracks']['center']) == 1) - assert_true( - len(mcool_first_viewconf["views"][0]['tracks']['center'][0][ - "contents"]) == 2) - assert_true(mcool_first_viewconf["views"][0]['tracks']['center'][0][ - "contents"][0]["type"] == "heatmap") - assert_true(mcool_first_viewconf["views"][0]['tracks']['center'][0][ - "contents"][1]["type"] == "2d-chromosome-grid") - - # Add another mcool file. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': mcool_first_viewconf, - 'genome_assembly': higlass_json["genome_assembly"], - 'files': [ - "{uuid}".format(uuid=mcool_file['uuid']), - ] - }) - - assert_true(response.json["errors"] == '') - assert_true(response.json["success"]) - - mcool_two_viewconf = response.json["new_viewconfig"] - - # There should be two views. - assert_true(len(mcool_two_viewconf["views"]) == 2) - - # The second view's center track should have a mcool heatmat and a chromsize grid, in that order. - assert_true(len(mcool_two_viewconf["views"][1]['tracks']['center']) == 1) - assert_true( - len(mcool_two_viewconf["views"][1]['tracks']['center'][0]["contents"]) - == 2) - assert_true(mcool_two_viewconf["views"][1]['tracks']['center'][0][ - "contents"][0]["type"] == "heatmap") - assert_true(mcool_two_viewconf["views"][1]['tracks']['center'][0][ - "contents"][1]["type"] == "2d-chromosome-grid") - - -def test_custom_display_height(testapp, higlass_blank_viewconf, - bedGraph_file_json): - """ Add an mcool file to a higlass display, given a default height. - Make sure the view has an expected height. - """ - - # Get a bedGraph file to add. - bedGraph_file_json['higlass_uid'] = "Y08H_toDQ-OxidYJAzFPXA" - bedGraph_file_json['md5sum'] = '00000000000000000000000000000001' - bedGraph_file_json['genome_assembly'] = "GRCm38" - bg_file = testapp.post_json('/file_processed', - bedGraph_file_json).json['@graph'][0] - - # Get the Higlass Viewconf that will be edited. - higlass_conf_uuid = "00000000-1111-0000-1111-000000000000" - response = testapp.get( - "/higlass-view-configs/{higlass_conf_uuid}/?format=json".format( - higlass_conf_uuid=higlass_conf_uuid)) - higlass_json = response.json - - # Try to add the bedGraph to the viewconf. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': higlass_json["viewconfig"], - 'genome_assembly': higlass_json["genome_assembly"], - 'files': ["{uuid}".format(uuid=bg_file['uuid'])] - }) - - assert_true(response.json["errors"] == '') - assert_true(response.json["success"]) - - # Get the new json. - default_height_viewconf_json = response.json["new_viewconfig"] - - assert_true(len(default_height_viewconf_json["views"]) == 1) - tracks = default_height_viewconf_json["views"][0]["tracks"] - - # Make sure the default height for a 1d track is correct. - bedGraph_track = [ - t for t in tracks["top"] if "-divergent-bar" in t["type"] - ][0] - assert_true(( - bedGraph_track["height"] - 125 > -5 - and bedGraph_track["height"] - 125 < 5 - ), "1D file is too big: height should be less than 125, got {actual} instead.". - format( - actual=bedGraph_track["height"], )) - - # Create a new display with a large height. The 1D track should still be capped (we don't want it to be too big.) - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': higlass_json["viewconfig"], - 'genome_assembly': higlass_json["genome_assembly"], - 'files': ["{uuid}".format(uuid=bg_file['uuid'])], - 'height': 600, - }) - - assert_true(response.json["errors"] == '') - assert_true(response.json["success"]) - - # Get the new json. - big_height_viewconf_json = response.json["new_viewconfig"] - tracks = big_height_viewconf_json["views"][0]["tracks"] - bedGraph_track = [ - t for t in tracks["top"] if "-divergent-bar" in t["type"] - ][0] - assert_true(( - bedGraph_track["height"] - 125 > -5 - and bedGraph_track["height"] - 125 < 5 - ), "1D file is too big: height should be less than 125, got {actual} instead.". - format( - actual=bedGraph_track["height"], )) - - # Create a default height display with multiple 1D tracks. The heights of each track should be smaller. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': - higlass_json["viewconfig"], - 'genome_assembly': - higlass_json["genome_assembly"], - 'files': [ - "{uuid}".format(uuid=bg_file['uuid']), - "{uuid}".format(uuid=bg_file['uuid']), - "{uuid}".format(uuid=bg_file['uuid']), - ], - 'height': - 300, - }) - - assert_true(response.json["errors"] == '') - assert_true(response.json["success"]) - - # Get the new json. - default_height_viewconf_json2 = response.json["new_viewconfig"] - assert_true(len(default_height_viewconf_json2["views"]) == 1) - tracks = default_height_viewconf_json2["views"][0]["tracks"] - assert_true(len(tracks["top"]) == 3) - - for track in [t for t in tracks["top"] if "-divergent-bar" in t["type"]]: - assert_true(( - track["height"] - 83 > -5 and track["height"] - 83 < 5 - ), "1D file is too big: height should be around 83, got {actual} instead.". - format( - actual=track["height"], )) - - # Create a large height display with multiple 1D tracks. The heights of each track should be the normal size since the display is large enough. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': - higlass_json["viewconfig"], - 'genome_assembly': - higlass_json["genome_assembly"], - 'files': [ - "{uuid}".format(uuid=bg_file['uuid']), - "{uuid}".format(uuid=bg_file['uuid']), - "{uuid}".format(uuid=bg_file['uuid']), - ], - 'height': - 600, - }) - - assert_true(response.json["errors"] == '') - assert_true(response.json["success"]) - - # Get the new json. - big_height_viewconf_json2 = response.json["new_viewconfig"] - assert_true(len(big_height_viewconf_json2["views"]) == 1) - tracks = big_height_viewconf_json2["views"][0]["tracks"] - assert_true(len(tracks["top"]) == 3) - - for track in [t for t in tracks["top"] if "-divergent-bar" in t["type"]]: - assert_true(( - track["height"] - 125 > -5 and track["height"] - 125 < 5 - ), "1D file is too big: height should be around 125, got {actual} instead.". - format( - actual=track["height"], )) - - -def test_add_higlass_defaults(testapp, higlass_blank_viewconf, mcool_file_json): - """ Add an mcool file to a higlass display, with user-defined higlass options. - Make sure the view has the expected options. - """ - - genome_assembly = "GRCm38" - # Get a mcool file to add. - mcool_file_json['higlass_uid'] = "LTiacew8TjCOaP9gpDZwZw" - mcool_file_json['genome_assembly'] = genome_assembly - mcool_file_json['higlass_defaults'] = {'heatmapValueScaling': 'linear'} - mcool_file = testapp.post_json('/file_processed', - mcool_file_json).json['@graph'][0] - - # Get the Higlass Viewconf that will be edited. - higlass_conf_uuid = "00000000-1111-0000-1111-000000000000" - response = testapp.get( - "/higlass-view-configs/{higlass_conf_uuid}/?format=json".format( - higlass_conf_uuid=higlass_conf_uuid)) - higlass_json = response.json - - # Try to add the mcool to the viewconf. - response = testapp.post_json( - "/add_files_to_higlass_viewconf/", { - 'higlass_viewconfig': higlass_json["viewconfig"], - 'genome_assembly': higlass_json["genome_assembly"], - 'files': ["{uuid}".format(uuid=mcool_file['uuid'])] - }) - - assert_true(response.json["errors"] == '') - assert_true(response.json["success"]) - - # Get the new json. - new_higlass_options_viewconf_json = response.json["new_viewconfig"] - - assert_true(len(new_higlass_options_viewconf_json["views"]) == 1) - view = new_higlass_options_viewconf_json["views"][0] - - assert_true("center" in view["tracks"]) - assert_true(len(view["tracks"]["center"]) == 1) - - center_track = view["tracks"]["center"][0] - assert_true(center_track["type"] == "combined") - assert_true("contents" in center_track) - - # The contents should have the mcool's heatmap - contents = center_track["contents"] - assert_true(len(contents) == 1) - - # The central contents should have the mcool file and the user-defined higlass option should be there - if "tilesetUid" in contents and contents[0]["tilesetUid"] == mcool_file_json['higlass_uid']: - assert_true(contents[0]["type"] == "heatmap") - assert_true(contents[0]["options"]["heatmapValueScaling"] == 'linear') diff --git a/src/encoded/tests/test_indexing.py b/src/encoded/tests/test_indexing.py deleted file mode 100644 index 0685ff2a57..0000000000 --- a/src/encoded/tests/test_indexing.py +++ /dev/null @@ -1,488 +0,0 @@ -""" Test full indexing setup - -The fixtures in this module setup a full system with postgresql and -elasticsearch running as subprocesses. -""" - -import datetime -import json -import os -import pkg_resources -import pytest -import re -import time -import transaction -import uuid - -from elasticsearch.exceptions import NotFoundError -from snovault import DBSESSION, TYPES -from snovault.storage import Base -from snovault.elasticsearch import create_mapping, ELASTIC_SEARCH -from snovault.elasticsearch.create_mapping import ( - type_mapping, - create_mapping_by_type, - build_index_record, - compare_against_existing_mapping -) -from snovault.elasticsearch.interfaces import INDEXER_QUEUE -from snovault.elasticsearch.indexer_utils import get_namespaced_index, compute_invalidation_scope -from sqlalchemy import func -from timeit import default_timer as timer -from unittest import mock -from zope.sqlalchemy import mark_changed -from .. import main -from ..util import delay_rerun -from ..verifier import verify_item - - -pytestmark = [pytest.mark.working, pytest.mark.indexing, pytest.mark.workbook] - - -# These 4 versions are known to be compatible, older versions should not be -# used, odds are 15 can be used as well - Will Jan 7 2023 -POSTGRES_COMPATIBLE_MAJOR_VERSIONS = ['11', '12', '13', '14'] - - -def test_postgres_version(session): - - (version_info,) = session.query(func.version()).one() - print("version_info=", version_info) - assert isinstance(version_info, str) - assert re.match("PostgreSQL (%s)([.][0-9]+)? " % '|'.join(POSTGRES_COMPATIBLE_MAJOR_VERSIONS), version_info) - - -# subset of collections to run test on -TEST_COLLECTIONS = ['testing_post_put_patch', 'file_processed'] - - -@pytest.yield_fixture(scope='session') -def app(es_app_settings, request): - # for now, don't run with mpindexer. Add `True` to params above to do so - # if request.param: - # es_app_settings['mpindexer'] = True - app = main({}, **es_app_settings) - - yield app - - DBSession = app.registry[DBSESSION] - # Dispose connections so postgres can tear down. - DBSession.bind.pool.dispose() - - -# explicitly specify now (invalidation scope tests don't need this) -@pytest.yield_fixture -def setup_and_teardown(es_app): - """ - Run create mapping and purge queue before tests and clear out the - DB tables after the test - """ - # BEFORE THE TEST - run create mapping for tests types and clear queues - create_mapping.run(es_app, collections=TEST_COLLECTIONS, skip_indexing=True, purge_queue=True) - - yield # run the test - - # AFTER THE TEST - session = es_app.registry[DBSESSION] - connection = session.connection().connect() - connection.execute('TRUNCATE {} RESTART IDENTITY;'.format( - ','.join(table.name - for table in reversed(Base.metadata.sorted_tables)))) - session.flush() - mark_changed(session()) - transaction.commit() - - -@pytest.mark.slow -@pytest.mark.flaky(rerun_filter=delay_rerun, max_runs=2) -def test_indexing_simple(setup_and_teardown, es_app, es_testapp, indexer_testapp): - es = es_app.registry['elasticsearch'] - namespaced_ppp = get_namespaced_index(es_app, 'testing_post_put_patch') - doc_count = es.count(index=namespaced_ppp).get('count') - assert doc_count == 0 - # First post a single item so that subsequent indexing is incremental - es_testapp.post_json('/testing-post-put-patch/', {'required': ''}) - res = indexer_testapp.post_json('/index', {'record': True}) - assert res.json['indexing_count'] == 1 - res = es_testapp.post_json('/testing-post-put-patch/', {'required': ''}) - uuid = res.json['@graph'][0]['uuid'] - res = indexer_testapp.post_json('/index', {'record': True}) - assert res.json['indexing_count'] == 1 - time.sleep(3) - # check es directly - doc_count = es.count(index=namespaced_ppp).get('count') - assert doc_count == 2 - res = es_testapp.get('/search/?type=TestingPostPutPatch') - uuids = [indv_res['uuid'] for indv_res in res.json['@graph']] - count = 0 - while uuid not in uuids and count < 20: - time.sleep(1) - res = es_testapp.get('/search/?type=TestingPostPutPatch') - uuids = [indv_res['uuid'] for indv_res in res.json['@graph']] - count += 1 - assert res.json['total'] >= 2 - assert uuid in uuids - namespaced_indexing = get_namespaced_index(es_app, 'indexing') - indexing_doc = es.get(index=namespaced_indexing, id='latest_indexing') - indexing_source = indexing_doc['_source'] - assert 'indexing_count' in indexing_source - assert 'indexing_finished' in indexing_source - assert 'indexing_content' in indexing_source - assert indexing_source['indexing_status'] == 'finished' - assert indexing_source['indexing_count'] > 0 - testing_ppp_mappings = es.indices.get_mapping(index=namespaced_ppp)[namespaced_ppp] - assert 'mappings' in testing_ppp_mappings - testing_ppp_settings = es.indices.get_settings(index=namespaced_ppp)[namespaced_ppp] - assert 'settings' in testing_ppp_settings - # ensure we only have 1 shard for tests - assert testing_ppp_settings['settings']['index']['number_of_shards'] == '1' - - -@pytest.mark.flaky(rerun_filter=delay_rerun, max_runs=2) -def test_create_mapping_on_indexing(setup_and_teardown, es_app, es_testapp, registry, elasticsearch): - """ - Test overall create_mapping functionality using app. - Do this by checking es directly before and after running mapping. - Delete an index directly, run again to see if it recovers. - """ - es = registry[ELASTIC_SEARCH] - item_types = TEST_COLLECTIONS - # check that mappings and settings are in index - for item_type in item_types: - item_mapping = type_mapping(registry[TYPES], item_type) - try: - namespaced_index = get_namespaced_index(es_app, item_type) - item_index = es.indices.get(index=namespaced_index) - except Exception: - assert False - found_index_mapping_emb = item_index[namespaced_index]['mappings']['properties']['embedded'] - found_index_settings = item_index[namespaced_index]['settings'] - assert found_index_mapping_emb - assert found_index_settings - # compare the manually created mapping to the one in ES - full_mapping = create_mapping_by_type(item_type, registry) - item_record = build_index_record(full_mapping, item_type) - # below is True if the found mapping matches manual one - assert compare_against_existing_mapping(es, namespaced_index, item_type, item_record, True) - - -@pytest.mark.broken # Doesn't work on GitHub Actions -@pytest.mark.skip -@pytest.mark.flaky(rerun_filter=delay_rerun, max_runs=2) -def test_file_processed_detailed(setup_and_teardown, es_app, es_testapp, indexer_testapp, award, lab, file_formats): - # post file_processed - item = { - 'award': award['uuid'], - 'lab': lab['uuid'], - 'file_format': file_formats.get('pairs').get('@id'), - 'filename': 'test.pairs.gz', - 'status': 'uploading' - } - fp_res = es_testapp.post_json('/file_processed', item) - test_fp_uuid = fp_res.json['@graph'][0]['uuid'] - res = es_testapp.post_json('/file_processed', item) - indexer_testapp.post_json('/index', {'record': True}) - - # Todo, input a list of accessions / uuids: - verify_item(test_fp_uuid, indexer_testapp, es_testapp, app.registry) - # While we're here, test that _update of the file properly - # queues the file with given relationship - indexer_queue = app.registry[INDEXER_QUEUE] - rel_file = { - 'award': award['uuid'], - 'lab': lab['uuid'], - 'file_format': file_formats.get('pairs').get('@id') - } - rel_res = es_testapp.post_json('/file_processed', rel_file) - rel_uuid = rel_res.json['@graph'][0]['uuid'] - # now update the original file with the relationship - # ensure rel_file is properly queued - related_files = [{'relationship_type': 'derived from', 'file': rel_uuid}] - es_testapp.patch_json('/' + test_fp_uuid, {'related_files': related_files}, status=200) - time.sleep(2) - # may need to make multiple calls to indexer_queue.receive_messages - received = [] - received_batch = None - while received_batch is None or len(received_batch) > 0: - received_batch = indexer_queue.receive_messages() - received.extend(received_batch) - to_replace = [] - to_delete = [] - found_fp_sid = None - found_rel_sid = None - # keep track of the PATCH of the original file and the associated PATCH - # of the related file. Compare uuids - for msg in received: - json_body = json.loads(msg.get('Body', {})) - if json_body['uuid'] == test_fp_uuid and json_body['method'] == 'PATCH': - found_fp_sid = json_body['sid'] - to_delete.append(msg) - elif json_body['uuid'] == rel_uuid and json_body['method'] == 'PATCH': - assert json_body['info'] == "queued from %s _update" % test_fp_uuid - found_rel_sid = json_body['sid'] - to_delete.append(msg) - else: - to_replace.append(msg) - indexer_queue.delete_messages(to_delete) - indexer_queue.replace_messages(to_replace, vis_timeout=0) - assert found_fp_sid is not None and found_rel_sid is not None - assert found_rel_sid > found_fp_sid # sid of related file is greater - - -@pytest.mark.flaky(rerun_filter=delay_rerun, max_runs=2) -def test_real_validation_error(setup_and_teardown, es_app, indexer_testapp, es_testapp, lab, award, file_formats): - """ - Create an item (file-processed) with a validation error and index, - to ensure that validation errors work - """ - indexer_queue = es_app.registry[INDEXER_QUEUE] - es = es_app.registry[ELASTIC_SEARCH] - fp_body = { - 'schema_version': '3', - 'uuid': str(uuid.uuid4()), - 'file_format': file_formats.get('mcool').get('uuid'), - 'lab': lab['uuid'], - 'award': award['uuid'], - 'accession': '4DNFIBBBBBBB', - 'higlass_uid': 1 # validation error -- higlass_uid should be string - } - res = es_testapp.post_json('/files-processed/?validate=false&upgrade=False', - fp_body, status=201).json - fp_id = res['@graph'][0]['@id'] - val_err_view = es_testapp.get(fp_id + '@@validation-errors', status=200).json - assert val_err_view['@id'] == fp_id - assert val_err_view['validation_errors'] == [] - - # call to /index will throw MissingIndexItemException multiple times, - # since associated file_format, lab, and award are not indexed. - # That's okay if we don't detect that it succeeded, keep trying until it does - indexer_testapp.post_json('/index', {'record': True}) - to_queue = { - 'uuid': fp_id, - 'strict': True, - 'timestamp': datetime.datetime.utcnow().isoformat() - } - counts = 0 - es_res = None - while not es_res and counts < 15: - time.sleep(2) - try: - namespaced_fp = get_namespaced_index(es_app, 'file_processed') - es_res = es.get(index=namespaced_fp, - id=res['@graph'][0]['uuid']) - except NotFoundError: - indexer_queue.send_messages([to_queue], target_queue='primary') - indexer_testapp.post_json('/index', {'record': True}) - counts += 1 - assert es_res - assert len(es_res['_source'].get('validation_errors', [])) == 1 - # check that validation-errors view works - val_err_view = es_testapp.get(fp_id + '@@validation-errors', status=200).json - assert val_err_view['@id'] == fp_id - assert val_err_view['validation_errors'] == es_res['_source']['validation_errors'] - - -# @pytest.mark.performance -@pytest.mark.skip(reason="need to update perf-testing inserts") -def test_load_and_index_perf_data(setup_and_teardown, es_testapp, indexer_testapp): - ''' - ~~ CURRENTLY NOT WORKING ~~ - - PERFORMANCE TESTING - Loads all the perf-testing data and then indexes it - Prints time for both - - this test is to ensure the performance testing data that is run - nightly through the mastertest_deployment process in the torb repo - it takes roughly 25 to run. - Note: run with bin/test -s -m performance to see the prints from the test - ''' - - insert_dir = pkg_resources.resource_filename('encoded', 'tests/data/perf-testing/') - inserts = [f for f in os.listdir(insert_dir) if os.path.isfile(os.path.join(insert_dir, f))] - json_inserts = {} - - # pluck a few uuids for testing - test_types = ['biosample', 'user', 'lab', 'experiment_set_replicate'] - test_inserts = [] - for insert in inserts: - type_name = insert.split('.')[0] - json_inserts[type_name] = json.loads(open(insert_dir + insert).read()) - # pluck a few uuids for testing - if type_name in test_types: - test_inserts.append({'type_name': type_name, 'data': json_inserts[type_name][0]}) - - # load -em up - start = timer() - with mock.patch('snovault.loadxl.get_app') as mocked_app: - mocked_app.return_value = es_testapp.app - data = {'store': json_inserts} - res = es_testapp.post_json('/load_data', data) - assert res.json['status'] == 'success' - stop_insert = timer() - print("PERFORMANCE: Time to load data is %s" % (stop_insert - start)) - index_res = indexer_testapp.post_json('/index', {'record': True}) - assert index_res.json['indexing_status'] == 'finished' - stop_index = timer() - print("PERFORMANCE: Time to index is %s" % (stop_index - start)) - - # check a couple random inserts - for item in test_inserts: - start = timer() - assert es_testapp.get("/" + item['data']['uuid'] + "?frame=raw").json['uuid'] - stop = timer() - frame_time = stop - start - - start = timer() - assert es_testapp.get("/" + item['data']['uuid']).follow().json['uuid'] - stop = timer() - embed_time = stop - start - - print("PERFORMANCE: Time to query item %s - %s raw: %s embed %s" % (item['type_name'], item['data']['uuid'], - frame_time, embed_time)) - # userful for seeing debug messages - # assert False - - -class TestInvalidationScopeViewFourfront: - """ Integrated testing of invalidation scope - requires ES component, so in this file. """ - DEFAULT_SCOPE = ['status', 'uuid'] # --> this is what you get if there is nothing - - class MockedRequest: - def __init__(self, registry, source_type, target_type): - self.registry = registry - self.json = { - 'source_type': source_type, - 'target_type': target_type - } - - @pytest.mark.parametrize('source_type, target_type, invalidated', [ - # Test WorkflowRun - ('FileProcessed', 'WorkflowRunAwsem', - DEFAULT_SCOPE + ['accession', 'filename', 'file_format', 'file_size', 'quality_metric'] - ), - ('Software', 'WorkflowRunAwsem', - DEFAULT_SCOPE + ['name', 'title', 'version', 'commit', 'source_url'] - ), - ('Workflow', 'WorkflowRunAwsem', - DEFAULT_SCOPE + ['title', 'name', 'experiment_types', 'category', 'app_name', 'steps.name'] - ), - ('WorkflowRunAwsem', 'FileProcessed', - DEFAULT_SCOPE + ['input_files.workflow_argument_name', 'output_files.workflow_argument_name', 'title', - 'workflow'] - ), - # Test FileProcessed - ('Enzyme', 'FileProcessed', # embeds 'name' - DEFAULT_SCOPE + ['name'] - ), - ('ExperimentType', 'FileProcessed', # embeds 'title' - DEFAULT_SCOPE + ['title'] - ), - ('ExperimentSet', 'FileProcessed', # embeds 'title' - DEFAULT_SCOPE + ['last_modified.date_modified', 'accession', 'experimentset_type'] - ), - ('Biosample', 'FileProcessed', # embeds 'accession' + calc props (not detected) - DEFAULT_SCOPE + ['accession', 'biosource', 'cell_culture_details', 'modifications'] - ), - ('Biosource', 'FileProcessed', - DEFAULT_SCOPE + ['biosource_type', 'cell_line', 'cell_line_tier', 'individual', 'modifications', - 'override_biosource_name', 'tissue'] - ), - ('BioFeature', 'FileProcessed', - DEFAULT_SCOPE + ['cellular_structure', 'feature_mods.mod_position', 'feature_mods.mod_type', 'feature_type', - 'genome_location', 'organism_name', 'preferred_label', 'relevant_genes'] - ), - ('OntologyTerm', 'FileProcessed', - DEFAULT_SCOPE + ['term_id', 'term_name', 'preferred_name'] - ), - ('Organism', 'FileProcessed', - DEFAULT_SCOPE + ['name', 'scientific_name'] - ), - ('Modification', 'FileProcessed', - DEFAULT_SCOPE + ['modification_type', 'genomic_change', 'target_of_mod', 'override_modification_name'] - ), - # Test ExperimentSet - ('FileProcessed', 'ExperimentSet', - DEFAULT_SCOPE + ['accession', 'contributing_labs', 'dbxrefs', 'description', 'extra_files.file_size', - 'extra_files.href', 'extra_files.md5sum', 'extra_files.use_for', 'file_classification', - 'file_format', 'file_size', 'file_type', 'genome_assembly', 'higlass_uid', 'lab', - 'last_modified.date_modified', 'md5sum', 'notes_to_tsv', 'quality_metric', - 'related_files.relationship_type', 'static_content.description', 'static_content.location'] - ), - ('User', 'ExperimentSet', - DEFAULT_SCOPE + ['email', 'first_name', 'job_title', 'lab', 'last_name', 'preferred_email', 'submitted_by', - 'timezone'] - ), - ('Badge', 'ExperimentSet', - DEFAULT_SCOPE + ['title', 'badge_classification', 'badge_icon', 'description'] - ), - ('TreatmentAgent', 'ExperimentSet', - DEFAULT_SCOPE + ['biological_agent', 'chemical', 'concentration', 'concentration_units', 'constructs', - 'description', 'duration', 'duration_units', 'temperature', 'treatment_type',] - ), - ('OntologyTerm', 'ExperimentSet', - DEFAULT_SCOPE + ['preferred_name', 'slim_terms', 'synonyms', 'term_id', 'term_name'] - ), - ('Organism', 'ExperimentSet', - DEFAULT_SCOPE + ['name', 'scientific_name'] - ), - ('Modification', 'ExperimentSet', - DEFAULT_SCOPE + ['modification_type', 'genomic_change', 'target_of_mod', 'override_modification_name'] - ), - ('BioFeature', 'ExperimentSet', - DEFAULT_SCOPE + ['cellular_structure', 'feature_mods.mod_position', 'feature_mods.mod_type', 'feature_type', - 'genome_location', 'organism_name', 'preferred_label', 'relevant_genes'] - ), - ('Biosample', 'ExperimentSet', - DEFAULT_SCOPE + ['accession', 'badges.messages', 'biosource', 'cell_culture_details', 'description', - 'modifications', 'treatments'] - ), - ('Biosource', 'ExperimentSet', - DEFAULT_SCOPE + ['accession', 'biosource_type', 'cell_line', 'cell_line_tier', 'override_biosource_name', - 'override_organism_name', 'tissue'] - ), - ('Construct', 'ExperimentSet', - DEFAULT_SCOPE + ['name'] - ), - ('Enzyme', 'ExperimentSet', - DEFAULT_SCOPE + ['name'] - ), - ('FileReference', 'ExperimentSet', - DEFAULT_SCOPE + ['accession', 'contributing_labs', 'dbxrefs', 'description', 'extra_files.file_size', - 'extra_files.href', 'extra_files.md5sum', 'extra_files.use_for', 'file_classification', - 'file_format', 'file_size', 'file_type', 'genome_assembly', 'higlass_uid', 'lab', - 'last_modified.date_modified', 'md5sum', 'notes_to_tsv', 'quality_metric', - 'related_files.relationship_type', 'static_content.description', 'static_content.location'] - ), - ('FileFormat', 'ExperimentSet', - DEFAULT_SCOPE + ['file_format'] - ), - ('QualityMetricFastqc', 'ExperimentSet', - DEFAULT_SCOPE + ['overall_quality_status', 'url', 'Total Sequences', 'Sequence length'] - ), - ('QualityMetricMargi', 'ExperimentSet', - DEFAULT_SCOPE + ['overall_quality_status', 'url'] - ), - ('QualityMetricBamqc', 'ExperimentSet', - DEFAULT_SCOPE + ['overall_quality_status', 'url'] - ), - ]) - def test_invalidation_scope_view_parametrized(self, indexer_testapp, source_type, target_type, invalidated): - """ Just call the route function - test some basic interactions. - In this test, the source_type is the type on which we simulate a modification and target type is - the type we are simulating an invalidation on. In all cases uuid and status will trigger invalidation - if a linkTo exists, so those fields are always returned as part of the invalidation scope (even when no - link exists). - """ - req = self.MockedRequest(indexer_testapp.app.registry, source_type, target_type) - scope = compute_invalidation_scope(None, req) - assert sorted(scope['Invalidated']) == sorted(invalidated) - - # @pytest.mark.broken - # @pytest.mark.parametrize('source_type, target_type, invalidated', [ - # # Put test here - # ]) - # def test_invalidation_scope_view_parametrized_broken(self, indexer_testapp, source_type, target_type, invalidated): - # """ Collect error cases on this test when found. """ - # req = self.MockedRequest(indexer_testapp.app.registry, source_type, target_type) - # scope = compute_invalidation_scope(None, req) - # assert sorted(scope['Invalidated']) == sorted(invalidated) diff --git a/src/encoded/tests/test_init.py b/src/encoded/tests/test_init.py deleted file mode 100644 index 74a1f6a0c5..0000000000 --- a/src/encoded/tests/test_init.py +++ /dev/null @@ -1,6 +0,0 @@ -# import pytest -# -# pytestmark = pytest.mark.working -# -# Tests that were here aren't needed because the functionality comes from dcicutils.env_utils now, -# and these tests were moved to that library. -kmp 27-Mar-2020 diff --git a/src/encoded/tests/test_inserts.py b/src/encoded/tests/test_inserts.py deleted file mode 100644 index 00df10b760..0000000000 --- a/src/encoded/tests/test_inserts.py +++ /dev/null @@ -1,76 +0,0 @@ -import pytest - -from ..commands.run_upgrader_on_inserts import get_inserts - - -pytestmark = [pytest.mark.setone, pytest.mark.working] - - -def test_check_wf_links(): - workflow_inserts = list(get_inserts('inserts', 'workflow')) - software_inserts = list(get_inserts('inserts', 'software')) - fileformat_inserts = list(get_inserts('master-inserts', 'file_format')) - errs = check_wf_links(workflow_inserts, software_inserts, fileformat_inserts) - if errs: - print('\n'.join(errs)) - assert errs == [] - - -def check_wf_links(workflow_inserts, software_inserts, fileformat_inserts): - """ - Return a list of string errors found for easy tracking of fixes - """ - errors = [] - # get all sf inserts uuid - all_sf = [] - for sf in software_inserts: - all_sf.append(sf['uuid']) - - # get all file format inserts uuid and format name - all_ff = [] - for ff in fileformat_inserts: - if 'uuid' in ff: - all_ff.append(ff['uuid']) - if 'file_format' in ff: - all_ff.append(ff['file_format']) - - # check workflow inserts - for wf in workflow_inserts: - # compare software - for st in wf.get('steps', []): - if 'software_used' in st.get('meta', {}): - for sf in st['meta']['software_used']: - parsed_sf = parse_software_uuid(sf) - if parsed_sf not in all_sf: - errors.append('Could not find software %s from workflow %s' % (parsed_sf, wf['uuid'])) - # compare file format - # file format in arguments - for arg in wf.get('arguments', []): - if 'argument_format' in arg: - if arg['argument_format'] not in all_ff: - errors.append('Could not find file_format %s from workflow %s' % (arg['argument_format'], wf['uuid'])) - # file format in step input/output - for st in wf.get('steps', []): - for ip in st.get('inputs', []): - if 'file_format' in ip.get('meta', {}): - if ip['meta']['file_format'] not in all_ff: - errors.append('Could not find file_format %s from workflow %s' % (ip['meta']['file_format'], wf['uuid'])) - for op in st.get('outputs', []): - if 'file_format' in op.get('meta', {}): - if op['meta']['file_format'] not in all_ff: - errors.append('Could not find file_format %s from workflow %s' % (op['meta']['file_format'], wf['uuid'])) - return errors - - -def parse_software_uuid(s): - """parses '/software/lalala/' or '/software/lalala' into 'lalala' - if input is already 'lalala', returns 'lalala' as well. - if something else, returns None - """ - ss = s.split('/') - if len(ss) == 4 or len(ss) == 3: - return(ss[2]) - elif len(ss) == 1: - return(ss) - else: - return(None) diff --git a/src/encoded/tests/test_key.py b/src/encoded/tests/test_key.py deleted file mode 100644 index dfd5f914bb..0000000000 --- a/src/encoded/tests/test_key.py +++ /dev/null @@ -1,58 +0,0 @@ -import pytest - - -pytestmark = [pytest.mark.setone, pytest.mark.working] - -items = [ - {'name': 'one', 'accession': 'TEST1'}, - {'name': 'two', 'accession': 'TEST2'}, -] - -bad_items = [ - {'name': 'one', 'accession': 'BAD1'}, - {'name': 'bad', 'accession': 'TEST1'}, -] - - -@pytest.fixture -def content(testapp): - url = '/testing-keys/' - for item in items: - testapp.post_json(url, item, status=201) - - -@pytest.mark.parametrize('item', items) -def test_unique_key(testapp, content, item): - url = '/testing-keys/' + item['accession'] - res = testapp.get(url).maybe_follow() - assert res.json['name'] == item['name'] - - -def test_keys_bad_items(testapp, content): - url = '/testing-keys/' - for item in bad_items: - testapp.post_json(url, item, status=409) - - -def test_keys_update(testapp): - url = '/testing-keys/' - item = items[0] - res = testapp.post_json(url, item, status=201) - location = res.location - new_item = {'name': 'new_one', 'accession': 'NEW1'} - testapp.put_json(location, new_item, status=200) - testapp.post_json(url, item, status=201) - testapp.put_json(location, item, status=409) - - -def test_keys_conflict(testapp): - url = '/testing-keys/' - item = items[1] - initial = testapp.get(url).json['@graph'] - testapp.post_json(url, item, status=201) - posted = testapp.get(url).json['@graph'] - assert(len(initial)+1 == len(posted)) - conflict = testapp.post_json(url, item, status=409) - assert(conflict.status_code == 409) - conflicted = testapp.get(url).json['@graph'] - assert(len(posted) == len(conflicted)) diff --git a/src/encoded/tests/test_link.py b/src/encoded/tests/test_link.py deleted file mode 100644 index 6e2b1938ec..0000000000 --- a/src/encoded/tests/test_link.py +++ /dev/null @@ -1,85 +0,0 @@ -import pytest - -from snovault.storage import Link - - -pytestmark = [pytest.mark.setone, pytest.mark.working] - - -targets = [ - {'name': 'one', 'uuid': '775795d3-4410-4114-836b-8eeecf1d0c2f'}, - {'name': 'two', 'uuid': 'd6784f5e-48a1-4b40-9b11-c8aefb6e1377'}, - {'name': 'quote:name', 'uuid': '0e627b3b-f5d2-41db-ac34-8e97bb8a028c'}, -] - -sources = [ - { - 'name': 'A', - 'target': 'one', - 'uuid': '16157204-8c8f-4672-a1a4-14f4b8021fcd', - 'status': 'current', - }, - { - 'name': 'B', - 'target': 'two', - 'uuid': '1e152917-c5fd-4aec-b74f-b0533d0cc55c', - 'status': 'deleted', - }, -] - - -@pytest.fixture -def content(testapp): - url = '/testing-link-targets/' - for item in targets: - testapp.post_json(url, item, status=201) - - url = '/testing-link-sources/' - for item in sources: - testapp.post_json(url, item, status=201) - - -def test_links_add(content, session): - links = sorted([ - (str(link.source_rid), link.rel, str(link.target_rid)) - for link in session.query(Link).all() - ]) - expected = sorted([ - (sources[0]['uuid'], u'target', targets[0]['uuid']), - (sources[1]['uuid'], u'target', targets[1]['uuid']), - ]) - assert links == expected - - -def test_links_update(content, testapp, session): - url = '/testing-link-sources/' + sources[1]['uuid'] - new_item = {'name': 'B updated', 'target': targets[0]['name']} - testapp.put_json(url, new_item, status=200) - - links = sorted([ - (str(link.source_rid), link.rel, str(link.target_rid)) - for link in session.query(Link).all() - ]) - expected = sorted([ - (sources[0]['uuid'], u'target', targets[0]['uuid']), - (sources[1]['uuid'], u'target', targets[0]['uuid']), - ]) - assert links == expected - - -def test_links_reverse(content, testapp, session): - target = targets[0] - res = testapp.get('/testing-link-targets/%s/?frame=object' % target['name']) - assert res.json['reverse'] == ['/testing-link-sources/%s/' % sources[0]['uuid']] - - # DELETED sources are hidden from the list. - target = targets[1] - res = testapp.get('/testing-link-targets/%s/' % target['name']) - assert res.json['reverse'] == [] - - -def test_links_quoted_ids(content, testapp, session): - res = testapp.get('/testing-link-targets/quote:name/?frame=object') - target = res.json - source = {'name': 'C', 'target': target['@id']} - testapp.post_json('/testing-link-sources/', source, status=201) diff --git a/src/encoded/tests/test_load_access_key.py b/src/encoded/tests/test_load_access_key.py deleted file mode 100644 index cb5675fa1a..0000000000 --- a/src/encoded/tests/test_load_access_key.py +++ /dev/null @@ -1,19 +0,0 @@ -import pytest - -from unittest import mock -from ..commands.load_access_keys import generate_access_key - - -pytestmark = [pytest.mark.setone, pytest.mark.working] - -# TODO: test load_access_keys.get_existing_key_ids, which would use ES - - -def test_gen_access_keys(testapp, admin): - with mock.patch('encoded.commands.load_access_keys.get_beanstalk_real_url') as mocked_url: - mocked_url.return_value = 'http://fourfront-hotseat' - res = generate_access_key(testapp, 'test_env', admin['uuid'], 'test_desc') - assert res['server'] == 'http://fourfront-hotseat' - assert res['secret'] - assert res['key'] - mocked_url.assert_called_once() diff --git a/src/encoded/tests/test_loadxl.py b/src/encoded/tests/test_loadxl.py deleted file mode 100644 index 137b70490b..0000000000 --- a/src/encoded/tests/test_loadxl.py +++ /dev/null @@ -1,190 +0,0 @@ -import pytest - -from past.builtins import basestring -from pkg_resources import resource_filename -from unittest import mock -from snovault import loadxl -from ..commands.run_upgrader_on_inserts import get_inserts - - -pytestmark = [pytest.mark.setone, pytest.mark.working] - - -def test_load_data_endpoint(testapp): - data = {'fdn_dir': 'master-inserts', - 'itype': ['award', 'lab', 'user']} - with mock.patch(f'snovault.loadxl.get_app') as mocked_app: - mocked_app.return_value = testapp.app - res = testapp.post_json('/load_data', data, status=200) - assert res.json['status'] == 'success' - - -def test_load_data_endpoint_returns_error_if_incorrect_keyword(testapp): - data = {'mdn_dir': 'master-inserts', - 'itype': ['user']} - with mock.patch(f'snovault.loadxl.get_app') as mocked_app: - mocked_app.return_value = testapp.app - res = testapp.post_json('/load_data', data, status=422) - assert res.json['status'] == 'error' - assert res.json['@graph'] - - -def test_load_data_endpoint_returns_error_if_incorrect_data(testapp): - data = {'fdn_dir': 'master-inserts', - 'itype': ['user']} - with mock.patch(f'snovault.loadxl.get_app') as mocked_app: - mocked_app.return_value = testapp.app - res = testapp.post_json('/load_data', data, status=422) - assert res.json['status'] == 'error' - assert res.json['@graph'] - - -def test_load_data_user_specified_config(testapp): - data = {'fdn_dir': 'master-inserts', - 'itype': ['user', 'lab', 'award']} - config_uri = 'test.ini' - data['config_uri'] = config_uri - with mock.patch(f'snovault.loadxl.get_app') as mocked_app: - mocked_app.return_value = testapp.app - res = testapp.post_json('/load_data', data, status=200) - assert res.json['status'] == 'success' - mocked_app.assert_called_once_with(config_uri, 'app') - - -def test_load_data_local_dir(testapp): - expected_dir = resource_filename('encoded', 'tests/data/perf-testing/') - with mock.patch(f'snovault.loadxl.get_app') as mocked_app: - with mock.patch(f'snovault.loadxl.load_all') as load_all: - mocked_app.return_value = testapp.app - load_all.return_value = None - res = testapp.post_json('/load_data', {'fdn_dir': 'perf-testing'}, status=200) - assert res.json['status'] == 'success' - load_all.assert_called_once_with(mock.ANY, expected_dir, None, itype=None, overwrite=False, from_json=False) - - -def test_load_data_from_json(testapp): - user_inserts = list(get_inserts('master-inserts', 'user')) - lab_inserts = list(get_inserts('master-inserts', 'lab')) - award_inserts = list(get_inserts('master-inserts', 'award')) - data = {'store': {'user': user_inserts, 'lab': lab_inserts, 'award': award_inserts}, - 'itype': ['user', 'lab', 'award']} - with mock.patch(f'snovault.loadxl.get_app') as mocked_app: - mocked_app.return_value = testapp.app - res = testapp.post_json('/load_data', data, status=200) - assert res.json['status'] == 'success' - - -def test_load_data_local_path(testapp): - local_path = resource_filename('encoded', 'tests/data/master-inserts/') - data = {'local_path': local_path, 'itype': ['user', 'lab', 'award']} - with mock.patch(f'snovault.loadxl.get_app') as mocked_app: - mocked_app.return_value = testapp.app - res = testapp.post_json('/load_data', data, status=200) - assert res.json['status'] == 'success' - - -def test_load_data_iter_response(testapp): - """ - Use iter_response=True in the request json to return a Pyramid Response - that leverages app.iter. The output here will be directly from the - generator - """ - user_inserts = list(get_inserts('master-inserts', 'user')) - lab_inserts = list(get_inserts('master-inserts', 'lab')) - award_inserts = list(get_inserts('master-inserts', 'award')) - # the total number of items we expect - expected = len(user_inserts) + len(lab_inserts) + len(award_inserts) - data = {'store': {'user': user_inserts, 'lab': lab_inserts, 'award': award_inserts}, - 'itype': ['user', 'lab', 'award'], 'iter_response': True} - with mock.patch(f'snovault.loadxl.get_app') as mocked_app: - mocked_app.return_value = testapp.app - res = testapp.post_json('/load_data', data, status=200) - assert res.content_type == 'text/plain' - # this is number of successfully POSTed items - assert res.text.count('POST:') == expected - # this is number of successfully PATCHed items - assert res.text.count('PATCH:') == expected - # this is the number of items that were skipped completely - assert res.text.count('SKIP:') == 0 - assert res.text.count('ERROR:') == 0 - - -def test_load_data_iter_response_fail(testapp): - """ - Use iter_response=True in the request json to return a Pyramid Response - that leverages app.iter. The output here will be directly from the - generator - For this test, expect a validation error because we use incomplete data - """ - user_inserts = list(get_inserts('master-inserts', 'user')) - # the total number of items we expect - expected = len(user_inserts) - data = {'store': {'user': user_inserts}, 'itype': ['user'], 'iter_response': True} - with mock.patch(f'snovault.loadxl.get_app') as mocked_app: - mocked_app.return_value = testapp.app - res = testapp.post_json('/load_data', data, status=200) - assert res.content_type == 'text/plain' - # this is number of successfully POSTed items - assert res.text.count('POST:') == expected - # no users should be successfully PATCHed due to missing links - assert res.text.count('PATCH:') == 0 - assert res.text.count('SKIP:') == 0 - # one exception should be encountered - assert res.text.count('ERROR:') == 1 - assert 'Bad response: 422 Unprocessable Entity' in res.text - - -def test_load_all_gen(testapp): - """ - The load_all_gen generator is pretty thoroughly tested by the other - tests here, but let's test it a bit more explicitly - """ - user_inserts = list(get_inserts('master-inserts', 'user')) - lab_inserts = list(get_inserts('master-inserts', 'lab')) - award_inserts = list(get_inserts('master-inserts', 'award')) - # the total number of items we expect - expected = len(user_inserts) + len(lab_inserts) + len(award_inserts) - data = {'store': {'user': user_inserts, 'lab': lab_inserts, 'award': award_inserts}, - 'itype': ['user', 'lab', 'award']} - with mock.patch(f'snovault.loadxl.get_app') as mocked_app: - mocked_app.return_value = testapp.app - # successful load cases - gen1 = loadxl.load_all_gen(testapp, data['store'], None, - itype=data['itype'], from_json=True) - res1 = b''.join([v for v in gen1]).decode() - assert res1.count('POST:') == expected - assert res1.count('PATCH:') == expected - assert res1.count('SKIP:') == 0 - assert res1.count('ERROR:') == 0 - # do the same with LoadGenWrapper - # items should be SKIP instead of POST, since they were already POSTed - gen2 = loadxl.load_all_gen(testapp, data['store'], None, - itype=data['itype'], from_json=True) - catch2 = loadxl.LoadGenWrapper(gen=gen2) - res2 = b''.join([v for v in catch2]).decode() - assert catch2.caught is None # no Exception hit - assert res2.count('POST:') == 0 - assert res2.count('PATCH:') == expected - assert res2.count('SKIP:') == expected - assert res1.count('ERROR:') == 0 - # now handle error cases, both with using LoadGenWrapper and without - # let's use an bad directory path to cause Exception - bad_dir = resource_filename('encoded', 'tests/data/not-a-fdn-dir/') - gen3 = loadxl.load_all_gen(testapp, bad_dir, None) - res3 = b''.join([v for v in gen3]).decode() - assert res3.count('POST:') == 0 - assert res3.count('PATCH:') == 0 - assert res3.count('SKIP:') == 0 - assert res3.count('ERROR:') == 1 - assert 'Failure loading inserts' in res3 - # the LoadGenWrapper will give use access to the Exception - gen4 = loadxl.load_all_gen(testapp, bad_dir, None) - catch4 = loadxl.LoadGenWrapper(gen=gen4) - res4 = b''.join([v for v in catch4]).decode() - assert res4.count('POST:') == 0 - assert res4.count('PATCH:') == 0 - assert res4.count('SKIP:') == 0 - assert res4.count('ERROR:') == 1 - assert 'Failure loading inserts' in res4 - assert isinstance(catch4.caught, basestring) - assert 'Failure loading inserts' in catch4.caught diff --git a/src/encoded/tests/test_misc.py b/src/encoded/tests/test_misc.py deleted file mode 100644 index cfde9e7185..0000000000 --- a/src/encoded/tests/test_misc.py +++ /dev/null @@ -1,13 +0,0 @@ -import os - -from dcicutils.qa_utils import VersionChecker -from .conftest_settings import REPOSITORY_ROOT_DIR - - -def test_version_and_changelog(): - - class MyAppVersionChecker(VersionChecker): - PYPROJECT = os.path.join(REPOSITORY_ROOT_DIR, "pyproject.toml") - CHANGELOG = os.path.join(REPOSITORY_ROOT_DIR, "CHANGELOG.rst") - - MyAppVersionChecker.check_version() diff --git a/src/encoded/tests/test_owltools.py b/src/encoded/tests/test_owltools.py deleted file mode 100644 index bbd25a663c..0000000000 --- a/src/encoded/tests/test_owltools.py +++ /dev/null @@ -1,180 +0,0 @@ -import pytest - -from rdflib import BNode, Literal -from unittest import mock -from ..commands import owltools as ot - - -pytestmark = [pytest.mark.setone, pytest.mark.working] - - -@pytest.fixture -def owler(): - return mock.patch.object(ot, 'Owler') - - -@pytest.fixture -def rdf_objects(): - rdfobjs = ['testrdfobj1', 'testrdfobj2'] - return [Literal(rdfobj) for rdfobj in rdfobjs] - - -@pytest.fixture -def rdf_objects_2_1(): - rdfobjs = ['testrdfobj1'] - return [Literal(rdfobj) for rdfobj in rdfobjs] - - -@pytest.fixture -def rdf_objects_2_3(): - rdfobjs = ['testrdfobj1', 'testrdfobj2', 'testrdfobj3'] - return [Literal(rdfobj) for rdfobj in rdfobjs] - - -def test_get_rdfobjects_one_type_two_rdfobjs(owler, rdf_objects): - checks = ['testrdfobj1', 'testrdfobj2'] - with mock.patch('encoded.commands.owltools.ConjunctiveGraph') as graph: - graph.objects.return_value = rdf_objects - owler = ot.Owler('http://test.com') - owler.rdfGraph = graph - class_ = 'test_class' - rdfobject_terms = ['1'] - rdfobjects = ot.getObjectLiteralsOfType(class_, owler, rdfobject_terms) - assert len(rdfobjects) == 2 - for rdfobj in rdfobjects: - assert rdfobj in checks - - -def test_get_rdfobjects_two_types_one_rdfobj(owler, rdf_objects_2_1): - check = 'testrdfobj1' - with mock.patch('encoded.commands.owltools.ConjunctiveGraph') as graph: - graph.objects.return_value = rdf_objects_2_1 - owler = ot.Owler('http://test.com') - owler.rdfGraph = graph - class_ = 'test_class' - rdfobject_terms = ['1', '2'] - rdfobjects = ot.getObjectLiteralsOfType(class_, owler, rdfobject_terms) - assert rdfobjects[0] == check - - -def test_get_rdfobjects_two_types_three_rdfobj(rdf_objects_2_3): - checks = ['testrdfobj1', 'testrdfobj2', 'testrdfobj3'] - with mock.patch('encoded.commands.owltools.ConjunctiveGraph') as graph: - graph.objects.return_value = rdf_objects_2_3 - owler = ot.Owler('http://test.com') - owler.rdfGraph = graph - class_ = 'test_class' - rdfobject_terms = ['1', '2'] - rdfobjects = ot.getObjectLiteralsOfType(class_, owler, rdfobject_terms) - assert len(rdfobjects) == 3 - for rdfobj in rdfobjects: - assert rdfobj in checks - - -def test_get_rdfobjects_none_there(owler): - with mock.patch('encoded.commands.owltools.ConjunctiveGraph') as graph: - graph.objects.return_value = [] - owler = ot.Owler('http://test.com') - owler.rdfGraph = graph - owler = ot.Owler('http://test.com') - class_ = 'test_class' - rdfobject_terms = [] - rdfobjects = ot.getObjectLiteralsOfType(class_, owler, rdfobject_terms) - assert not rdfobjects - - -def test_convert2URIRef_nostring(): - result = ot.convert2URIRef(None) - assert result is None - - -def test_convert2URIRef_string(): - result = ot.convert2URIRef('test_string') - assert ot.isURIRef(result) - assert result.toPython() == 'test_string' - - -@pytest.fixture -def uri_list(): - strings = [ - 'testwithoutslashes', - 'test/namespace#name', - 'test/namespace/name' - ] - uris = [ot.convert2URIRef(s) for s in strings] - return strings + uris - - -def test_splitNameFromNamespace(uri_list): - for i, uri in enumerate(uri_list): - name, ns = ot.splitNameFromNamespace(uri) - if i % 3 == 0: - assert not name - assert ns == uri_list[0] - else: - assert name == 'name' - assert ns == 'test/namespace' - - -@pytest.fixture -def unsorted_uris(): - strings = [ - 'test/Z', - 'test/1', - 'test#B', - 'test#a' - ] - uris = [ot.convert2URIRef(s) for s in strings] - return strings + uris - - -def test_sortUriListByName(unsorted_uris): - wanted = ['test/1', 'test/1', 'test#B', 'test#B', 'test/Z', 'test/Z', 'test#a', 'test#a'] - sorted_uris = ot.sortUriListByName(unsorted_uris) - sorted_uris = [s.__str__() for s in sorted_uris] - assert sorted_uris == wanted - - -@pytest.fixture -def blank_node(): - return BNode() - - -def test_isBlankNode_bnode(blank_node): - assert ot.isBlankNode(blank_node) - - -def test_isBlankNode_non_bnode(unsorted_uris): - assert not ot.isBlankNode(unsorted_uris[4]) - - -@pytest.fixture -def dupe_lists(): - return [ - [[1, 2], [1, 3], [1, 4]], - [['a', 'b', 'c'], ['a', 'a', 'b', 'b', 'b', 'c']], - [[1, 2, 3], [1, 2, 3, 3, 3, 2, 2, 1]], - [[None, None]], - [None] - ] - - -def test_removeDuplicates(dupe_lists): - for i, l in enumerate(dupe_lists): - if i == 0: - def id_func(l): - return l[0] - result = ot.removeDuplicates(l, id_func) - assert len(result) == 1 - assert result == [[1, 2]] - else: - for sl in l: - result = ot.removeDuplicates(sl) - if i == 1: - assert result == ['a', 'b', 'c'] - elif i == 2: - assert result == [1, 2, 3] - elif i == 3: - assert result == [None] - else: - assert result == [] diff --git a/src/encoded/tests/test_permissions.py b/src/encoded/tests/test_permissions.py deleted file mode 100644 index 41395e9210..0000000000 --- a/src/encoded/tests/test_permissions.py +++ /dev/null @@ -1,1264 +0,0 @@ -import pytest -import webtest - -from datetime import date -from urllib.parse import urlencode -from ..types.lab import Lab -from ..acl import LAB_MEMBER_ROLE, LAB_SUBMITTER_ROLE - - -pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema] - - -@pytest.fixture -def remc_lab(testapp): - item = { - 'name': 'remc-lab', - 'title': 'REMC lab', - 'status': 'current' - } - return testapp.post_json('/lab', item).json['@graph'][0] - - -@pytest.fixture -def somelab_w_shared_award(testapp, award): - item = { - 'name': 'some-lab', - 'title': 'SOME lab', - 'status': 'current', - 'awards': [award['@id']] - } - return testapp.post_json('/lab', item).json['@graph'][0] - - -@pytest.fixture -def remc_award(testapp): - item = { - 'name': 'remc-award', - 'description': 'REMC test award', - 'viewing_group': 'Not 4DN', - } - return testapp.post_json('/award', item).json['@graph'][0] - - -@pytest.fixture -def nofic_award(testapp): - item = { - 'name': 'NOFIC-award', - 'description': 'NOFIC test award', - 'viewing_group': 'NOFIC', - } - return testapp.post_json('/award', item).json['@graph'][0] - - -@pytest.fixture -def wrangler(testapp): - item = { - 'first_name': 'Wrangler', - 'last_name': 'Admin', - 'email': 'wrangler@example.org', - 'groups': ['admin'], - } - - # User @@object view has keys omitted. - res = testapp.post_json('/user', item) - return testapp.get(res.location).json - - -@pytest.fixture -def lab_viewer(testapp, lab, award): - item = { - 'first_name': 'ENCODE', - 'last_name': 'lab viewer', - 'email': 'encode_viewer@example.org', - 'lab': lab['name'], - 'status': 'current', - 'viewing_groups': [award['viewing_group']] - } - # User @@object view has keys omitted. - res = testapp.post_json('/user', item) - return testapp.get(res.location).json - - -@pytest.fixture -def award_viewer(testapp, somelab_w_shared_award): - item = { - 'first_name': 'SOME', - 'last_name': 'award viewer', - 'email': 'awardee@example.org', - 'lab': somelab_w_shared_award['@id'], - 'status': 'current', - } - # User @@object view has keys omitted. - res = testapp.post_json('/user', item) - return testapp.get(res.location).json - - -# this user has the 4DN viewing group -@pytest.fixture -def viewing_group_member(testapp, award): - item = { - 'first_name': 'Viewing', - 'last_name': 'Group', - 'email': 'viewing_group_member@example.org', - 'viewing_groups': [award['viewing_group']], - 'status': 'current' - } - # User @@object view has keys omitted. - res = testapp.post_json('/user', item) - return testapp.get(res.location).json - - -# this user has the NOFIC viewing group -@pytest.fixture -def nofic_group_member(testapp, nofic_award): - item = { - 'first_name': 'NOFIC', - 'last_name': 'Group', - 'email': 'viewing_group_member@example.org', - 'viewing_groups': [nofic_award['viewing_group']], - 'status': 'current' - } - # User @@object view has keys omitted. - res = testapp.post_json('/user', item) - return testapp.get(res.location).json - - -@pytest.fixture -def multi_viewing_group_member(testapp, award, nofic_award): - item = { - 'first_name': 'Viewing', - 'last_name': 'Group', - 'email': 'viewing_group_member@example.org', - 'viewing_groups': [award['viewing_group'], nofic_award['viewing_group']], - 'status': 'current' - } - # User @@object view has keys omitted. - res = testapp.post_json('/user', item) - return testapp.get(res.location).json - - -@pytest.fixture -def remc_submitter(testapp, remc_lab, remc_award): - item = { - 'first_name': 'REMC', - 'last_name': 'Submitter', - 'email': 'remc_submitter@example.org', - 'submits_for': [remc_lab['@id']], - 'viewing_groups': [remc_award['viewing_group']], - 'status': 'current' - } - # User @@object view has keys omitted. - res = testapp.post_json('/user', item) - return testapp.get(res.location).json - - -def remote_user_testapp(app, remote_user): - environ = { - 'HTTP_ACCEPT': 'application/json', - 'REMOTE_USER': str(remote_user), - } - return webtest.TestApp(app, environ) - - -@pytest.fixture -def revoked_user(testapp, lab, award): - item = { - 'first_name': 'ENCODE', - 'last_name': 'Submitter', - 'email': 'no_login_submitter@example.org', - 'submits_for': [lab['@id']], - 'status': 'revoked', - } - # User @@object view has keys omitted. - res = testapp.post_json('/user', item) - return testapp.get(res.location).json - - -@pytest.fixture -def other_lab(testapp): - item = { - 'title': 'Other lab', - 'name': 'other-lab', - } - return testapp.post_json('/lab', item, status=201).json['@graph'][0] - - -@pytest.fixture -def simple_file(testapp, lab, award, file_formats): - item = { - 'uuid': '3413218c-3d86-498b-a0a2-9a406638e777', - 'file_format': file_formats.get('fastq').get('@id'), - 'paired_end': '1', - 'lab': lab['@id'], - 'award': award['@id'], - 'status': 'uploaded', # avoid s3 upload codepath - } - return testapp.post_json('/file_fastq', item).json['@graph'][0] - - -@pytest.fixture -def step_run(testapp, lab, award): - software = { - 'name': 'do-thing', - 'description': 'It does the thing', - 'title': 'THING_DOER', - 'version': '1.0', - 'software_type': "normalizer", - 'award': award['@id'], - 'lab': lab['@id'] - } - sw = testapp.post_json('/software', software, status=201).json['@graph'][0] - - analysis_step = { - 'name': 'do-thing-step', - 'version': 1, - 'software_used': sw['@id'] - } - return testapp.post_json('/analysis-steps', analysis_step, status=201).json['@graph'][0] - - -@pytest.fixture -def expt_w_cont_lab_item(lab, remc_lab, award, human_biosample, exp_types): - return { - 'lab': lab['@id'], - 'award': award['@id'], - 'biosample': human_biosample['@id'], - 'experiment_type': exp_types['microc']['@id'], - 'contributing_labs': [remc_lab['@id']] - } - - -@pytest.fixture -def wrangler_testapp(wrangler, app, external_tx, zsa_savepoints): - return remote_user_testapp(app, wrangler['uuid']) - - -@pytest.fixture -def remc_member_testapp(remc_submitter, app, external_tx, zsa_savepoints): - return remote_user_testapp(app, remc_submitter['uuid']) - - -@pytest.fixture -def submitter_testapp(submitter, app, external_tx, zsa_savepoints): - return remote_user_testapp(app, submitter['uuid']) - - -@pytest.fixture -def lab_viewer_testapp(lab_viewer, app, external_tx, zsa_savepoints): - return remote_user_testapp(app, lab_viewer['uuid']) - - -@pytest.fixture -def award_viewer_testapp(award_viewer, app, external_tx, zsa_savepoints): - return remote_user_testapp(app, award_viewer['uuid']) - - -@pytest.fixture -def viewing_group_member_testapp(viewing_group_member, app, external_tx, zsa_savepoints): - # app for 4DN viewing group member - return remote_user_testapp(app, viewing_group_member['uuid']) - - -@pytest.fixture -def multi_viewing_group_member_testapp(multi_viewing_group_member, app, external_tx, zsa_savepoints): - # app with both 4DN and NOFIC viewing group - return remote_user_testapp(app, multi_viewing_group_member['uuid']) - - -@pytest.fixture -def nofic_group_member_testapp(nofic_group_member, app, external_tx, zsa_savepoints): - # app for 4DN viewing group member - return remote_user_testapp(app, nofic_group_member['uuid']) - - -@pytest.fixture -def indexer_testapp(app, external_tx, zsa_savepoints): - return remote_user_testapp(app, 'INDEXER') - - -@pytest.fixture -def iwg_member(testapp): - item = { - 'first_name': 'IWG', - 'last_name': 'Member', - 'email': 'iwgmember@example.org', - 'viewing_groups': ['IWG'], - 'status': 'current' - } - # User @@object view has keys omitted. - res = testapp.post_json('/user', item) - return testapp.get(res.location).json - - -@pytest.fixture -def arbitrary_group_member_testapp(iwg_member, app, external_tx, zsa_savepoints): - # app for arbitrary viewing_group member - return remote_user_testapp(app, iwg_member['uuid']) - - -@pytest.fixture -def bs_item(lab, award): - return { - 'biosource_type': 'primary cell', - 'lab': lab['@id'], - 'award': award['@id'], - 'status': 'submission in progress' - } - - -vg_test_stati = ['planned', 'submission in progress', 'pre-release'] - - -@pytest.mark.parametrize('status', vg_test_stati) -def test_arbitrary_viewing_group_can_view_item_w_viewable_by( - testapp, arbitrary_group_member_testapp, bs_item, iwg_member, status): - # post the item - the award has the 4DN viewing group and nothing related to IWG - bsres = testapp.post_json('/biosource', bs_item, status=201).json['@graph'][0] - # the vg testapp should not be able to get this item - arbitrary_group_member_testapp.get(bsres['@id'], status=403) - # now add viewable by property to the item - vgres = testapp.patch_json(bsres['@id'], {'viewable_by': ['IWG'], "status": status}, status=200) - # now should be able to get for each of the statuses - arbitrary_group_member_testapp.get(vgres.json['@graph'][0]['@id'], status=200) - - -@pytest.mark.parametrize('status', vg_test_stati) -def test_user_w_vg_cannot_view_item_w_vg_from_award( - testapp, remc_member_testapp, remc_award, bs_item, status): - """ For stati - planned, submission in progress, and pre-release - test that an item - does not have viewing_group prinicipal added via the award so the item cannot be - viewed - this tests for an arbitrary viewing_group, there are other tests for the - special handling of NOFIC and JA items, this test is not for those special cases - """ - bs_item['award'] = remc_award['@id'] # iwg award has 'not 4DN' vg as does the remc_submitter in the remc app - res = testapp.post_json('/biosource', bs_item, status=201).json['@graph'][0] - remc_member_testapp.get(res['@id'], status=403) - - -def test_wrangler_post_non_lab_collection(wrangler_testapp): - item = { - 'name': 'human', - 'scientific_name': 'Homo sapiens', - 'taxon_id': '9606', - } - return wrangler_testapp.post_json('/organism', item, status=201) - - -def test_submitter_cant_post_non_lab_collection(submitter_testapp): - item = { - 'name': 'human', - 'scientific_name': 'Homo sapiens', - 'taxon_id': '9606', - } - return submitter_testapp.post_json('/organism', item, status=403) - - -def test_submitter_post_update_experiment(submitter_testapp, lab, award, human_biosample, exp_types): - experiment = {'lab': lab['@id'], 'award': award['@id'], - 'experiment_type': exp_types['microc']['@id'], 'biosample': human_biosample['@id']} - res = submitter_testapp.post_json('/experiments-hi-c', experiment, status=201) - location = res.location - res = submitter_testapp.get(location + '@@testing-allowed?permission=edit', status=200) - assert res.json['has_permission'] is True - assert 'submits_for.%s' % lab['uuid'] in res.json['principals_allowed_by_permission'] - submitter_testapp.patch_json(location, {'description': 'My experiment'}, status=200) - - -def test_submitter_cant_post_other_lab(submitter_testapp, other_lab, award, exp_types): - experiment = {'lab': other_lab['@id'], 'award': award['@id'], 'experiment_type': exp_types['microc']['@id']} - res = submitter_testapp.post_json('/experiments-hi-c', experiment, status=422) - # jsonschema - error ordering change - assert res.json['errors'][1]['name'] == 'Schema: lab' - assert "not in user submits_for" in res.json['errors'][1]['description'] - - -def test_wrangler_post_other_lab(wrangler_testapp, other_lab, award, human_biosample, exp_types): - experiment = {'lab': other_lab['@id'], 'award': award['@id'], - 'experiment_type': exp_types['microc']['@id'], 'biosample': human_biosample['@id']} - wrangler_testapp.post_json('/experiments-hi-c', experiment, status=201) - - -def test_submitter_view_experiement(submitter_testapp, submitter, lab, award, human_biosample, exp_types): - experiment = {'lab': lab['@id'], 'award': award['@id'], - 'experiment_type': exp_types['microc']['@id'], 'biosample': human_biosample['@id']} - res = submitter_testapp.post_json('/experiments-hi-c', experiment, status=201) - - submitter_testapp.get(res.json['@graph'][0]['@id'], status=200) - - -def test_user_view_details_admin(submitter, access_key, testapp): - res = testapp.get(submitter['@id']) - assert 'email' in res.json - - -def test_users_view_details_self(submitter, access_key, submitter_testapp): - res = submitter_testapp.get(submitter['@id']) - assert 'email' in res.json - - -def test_users_patch_self(submitter, access_key, submitter_testapp): - submitter_testapp.patch_json(submitter['@id'], {}) - - -def test_users_post_disallowed(submitter, access_key, submitter_testapp): - item = { - 'first_name': 'ENCODE', - 'last_name': 'Submitter2', - 'email': 'encode_submitter2@example.org', - } - submitter_testapp.post_json('/user', item, status=403) - - -def test_users_cannot_view_other_users_info_with_basic_authenticated(submitter, authenticated_testapp): - authenticated_testapp.get(submitter['@id'], status=403) - - -def test_users_can_see_their_own_user_info(submitter, submitter_testapp): - res = submitter_testapp.get(submitter['@id']) - assert 'title' in res.json - assert 'email' in res.json - - -def test_users_view_basic_anon(submitter, anontestapp): - anontestapp.get(submitter['@id'], status=403) - - -def test_users_view_basic_indexer(submitter, indexer_testapp): - res = indexer_testapp.get(submitter['@id']) - assert 'title' in res.json - assert 'email' not in res.json - assert 'access_keys' not in res.json - - -def test_viewing_group_member_view(viewing_group_member_testapp, experiment_project_release): - return viewing_group_member_testapp.get(experiment_project_release['@id'], status=200) - - -def test_lab_viewer_view(lab_viewer_testapp, experiment): - lab_viewer_testapp.get(experiment['@id'], status=200) - - -def test_award_viewer_view(award_viewer_testapp, experiment): - award_viewer_testapp.get(experiment['@id'], status=200) - - -def test_submitter_patch_lab_disallowed(submitter, other_lab, submitter_testapp): - res = submitter_testapp.get(submitter['@id']) - lab = {'lab': other_lab['@id']} - submitter_testapp.patch_json(res.json['@id'], lab, status=422) # is that the right status? - - -def test_wrangler_patch_lab_allowed(submitter, other_lab, wrangler_testapp): - res = wrangler_testapp.get(submitter['@id']) - lab = {'lab': other_lab['@id']} - wrangler_testapp.patch_json(res.json['@id'], lab, status=200) - - -def test_submitter_patch_submits_for_disallowed(submitter, other_lab, submitter_testapp): - res = submitter_testapp.get(submitter['@id']) - submits_for = {'submits_for': [res.json['submits_for'][0]['@id']] + [other_lab['@id']]} - submitter_testapp.patch_json(res.json['@id'], submits_for, status=422) - - -def test_wrangler_patch_submits_for_allowed(submitter, other_lab, wrangler_testapp): - res = wrangler_testapp.get(submitter['@id']) - submits_for = {'submits_for': [res.json['submits_for'][0]['@id']] + [other_lab['@id']]} - wrangler_testapp.patch_json(res.json['@id'], submits_for, status=200) - - -def test_submitter_patch_groups_disallowed(submitter, submitter_testapp): - res = submitter_testapp.get(submitter['@id']) - groups = {'groups': res.json.get('groups', []) + ['admin']} - submitter_testapp.patch_json(res.json['@id'], groups, status=422) - - -def test_wrangler_patch_groups_allowed(submitter, other_lab, wrangler_testapp): - res = wrangler_testapp.get(submitter['@id']) - groups = {'groups': res.json.get('groups', []) + ['admin']} - wrangler_testapp.patch_json(res.json['@id'], groups, status=200) - - -def test_submitter_patch_viewing_groups_disallowed(submitter, other_lab, submitter_testapp): - res = submitter_testapp.get(submitter['@id']) - vgroups = {'viewing_groups': res.json['viewing_groups'] + ['GGR']} - submitter_testapp.patch_json(res.json['@id'], vgroups, status=422) - - -def test_wrangler_patch_viewing_groups_allowed(submitter, wrangler_testapp): - res = wrangler_testapp.get(submitter['@id']) - vgroups = {'viewing_groups': res.json['viewing_groups'] + ['Not 4DN']} - wrangler_testapp.patch_json(res.json['@id'], vgroups, status=200) - - -def test_revoked_user_denied_authenticated(authenticated_testapp, revoked_user): - authenticated_testapp.get(revoked_user['@id'], status=403) - - -def test_revoked_user_denied_submitter(submitter_testapp, revoked_user): - submitter_testapp.get(revoked_user['@id'], status=403) - - -def test_revoked_user_wrangler(wrangler_testapp, revoked_user): - wrangler_testapp.get(revoked_user['@id'], status=200) - - -def test_labs_view_wrangler(wrangler_testapp, other_lab): - labs = wrangler_testapp.get('/labs/', status=200) - assert(len(labs.json['@graph']) == 1) - - -############################################## -# Permission tests based on different statuses -# Submitter created item and wants to view -@pytest.fixture -def ind_human_item(human, award, lab): - return { - 'award': award['@id'], - 'lab': lab['@id'], - 'organism': human['@id'] - } - - -@pytest.fixture -def file_item(award, lab, file_formats): - return { - 'award': award['@id'], - 'lab': lab['@id'], - 'file_format': file_formats.get('fastq').get('@id'), - 'paired_end': '1' - } - - -@pytest.fixture -def lab_item(lab): - return { - 'name': 'test-lab', - 'title': 'test lab', - } - - -def test_submitter_cannot_view_ownitem(ind_human_item, submitter_testapp, wrangler_testapp): - statuses = ['deleted'] - res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - for status in statuses: - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200) - submitter_testapp.get(res.json['@graph'][0]['@id'], status=403) - - -def test_contributing_lab_member_can_view_item(expt_w_cont_lab_item, submitter_testapp, - remc_member_testapp, wrangler_testapp): - statuses = ['released', 'revoked', 'archived', 'released to project', - 'archived to project', 'in review by lab', 'submission in progress', 'planned'] - res = submitter_testapp.post_json('/experiment_hi_c', expt_w_cont_lab_item, status=201) - for status in statuses: - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200) - remc_member_testapp.get(res.json['@graph'][0]['@id'], status=200) - - -# Submitter created item and lab member wants to patch -def test_contributing_lab_member_cannot_patch(expt_w_cont_lab_item, submitter_testapp, - remc_member_testapp, wrangler_testapp): - statuses = ['released', 'revoked', 'archived', 'released to project', 'archived to project', - 'in review by lab', 'submission in progress', 'planned'] - res = submitter_testapp.post_json('/experiment_hi_c', expt_w_cont_lab_item, status=201) - for status in statuses: - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200) - remc_member_testapp.patch_json(res.json['@graph'][0]['@id'], {'sex': 'female'}, status=422) - - -def test_submitter_can_view_ownitem(ind_human_item, submitter_testapp, wrangler_testapp): - statuses = ['current', 'released', 'revoked', 'archived', 'released to project', 'archived to project', 'in review by lab', 'submission in progress', 'planned'] - res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - for status in statuses: - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200) - submitter_testapp.get(res.json['@graph'][0]['@id'], status=200) - - -def test_submitter_cannot_view_ownitem_replaced_using_accession(ind_human_item, submitter_testapp, wrangler_testapp): - res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": "replaced"}, status=200) - my_accession = '/' + res.json['@graph'][0]['accession'] - submitter_testapp.get(my_accession, status=404) - - -def test_submitter_can_view_ownitem_replaced_using_uuid(ind_human_item, submitter_testapp, wrangler_testapp): - res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": "replaced"}, status=200) - my_uuid = '/individuals-human/' + res.json['@graph'][0]['uuid'] + '/' - rep_res = submitter_testapp.get(my_uuid, status=200) - - -def test_submitter_can_view_ownitem_replaced_using_alias(ind_human_item, submitter_testapp, wrangler_testapp): - # alias will redirect to uuid - res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - res_p = wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": "replaced", "aliases": ['test:human']}, status=200) - my_alias = '/' + res_p.json['@graph'][0]['aliases'][0] - rep_res = submitter_testapp.get(my_alias, status=301) - # get the landing url, which is /object_type/uuid in this case - landing = rep_res.headers['Location'].replace('http://localhost', '') - submitter_testapp.get(landing, status=200) - - -def test_submitter_replaced_item_redirects_to_new_one_with_accession(ind_human_item, submitter_testapp, wrangler_testapp): - # posting 2 individual, changing 1 to replaced, and giving its accession to alternate accession field of the - # second one. This should result in redirect when the old accession is used - # item that will be replaced (old item) - old = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - # item that will replace (new item) - new = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - # patch old one wih status - wrangler_testapp.patch_json(old.json['@graph'][0]['@id'], {"status": "replaced"}, status=200) - # patch new one with alternate accession - wrangler_testapp.patch_json(new.json['@graph'][0]['@id'], {"alternate_accessions": [old.json['@graph'][0]['accession']]}, status=200) - # visit old item and assert that it lands on new item - rep_res = submitter_testapp.get(old.json['@graph'][0]['@id'], status=301) - # get the landing url, which includes a 'redirected_from' query param - redir_param = '?' + urlencode({ 'redirected_from' : old.json['@graph'][0]['@id'] }) - landing = rep_res.headers['Location'].replace('http://localhost', '') - assert landing == new.json['@graph'][0]['@id'] + redir_param - submitter_testapp.get(landing, status=200) - - -def test_submitter_replaced_item_doesnot_redirect_to_new_one_with_uuid(ind_human_item, submitter_testapp, wrangler_testapp): - # posting 2 individual, changing 1 to replaced, and giving its accession to alternate accession field of the - # second one. This should result in redirect when the old accession is used - # Old item should still be accessible with its uuid - old = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - old_uuid = '/individuals-human/' + old.json['@graph'][0]['uuid'] + '/' - print(old_uuid) - # item that will replace (new item) - new = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - # patch old one wih status - wrangler_testapp.patch_json(old.json['@graph'][0]['@id'], {"status": "replaced"}, status=200) - # patch new one with alternate accession - patch_data = {"alternate_accessions": [old.json['@graph'][0]['accession']]} - wrangler_testapp.patch_json(new.json['@graph'][0]['@id'], patch_data, status=200) - # visit old uuid and assert that it lands on old item - submitter_testapp.get(old_uuid, status=200) - - -def test_submitter_can_not_add_to_alternate_accession_if_not_replaced(ind_human_item, submitter_testapp, wrangler_testapp): - # an accession that's status is not replaced, can not be added to alternate_accessions - old = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - # item that will replace (new item) - new = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - # patch old one wih status - statuses = ['current', 'released', 'revoked', 'archived', 'released to project', - 'archived to project', 'in review by lab', 'submission in progress', 'planned'] - for status in statuses: - wrangler_testapp.patch_json(old.json['@graph'][0]['@id'], {"status": status}, status=200) - # try adding the accession to alternate accessions - # should result in conflict (409) - wrangler_testapp.patch_json(new.json['@graph'][0]['@id'], {"alternate_accessions": [old.json['@graph'][0]['accession']]}, status=409) - - -# Submitter created item and wants to patch -def test_submitter_cannot_patch_statuses(ind_human_item, submitter_testapp, wrangler_testapp): - statuses = ['deleted', 'current', 'released', 'revoked', 'archived', 'archived to project', 'released to project'] - res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - for status in statuses: - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200) - submitter_testapp.patch_json(res.json['@graph'][0]['@id'], {'sex': 'female'}, status=403) - - -def test_submitter_can_patch_statuses(ind_human_item, submitter_testapp, wrangler_testapp): - statuses = ['in review by lab', 'submission in progress', 'planned'] - res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - for status in statuses: - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200) - submitter_testapp.patch_json(res.json['@graph'][0]['@id'], {'sex': 'female'}, status=200) - - -def test_submitter_can_patch_file_statuses(file_item, submitter_testapp, wrangler_testapp): - statuses = ['uploading', 'uploaded', 'upload failed'] - res = submitter_testapp.post_json('/file_fastq', file_item, status=201) - for status in statuses: - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200) - submitter_testapp.patch_json(res.json['@graph'][0]['@id'], {'paired_end': '1'}, status=200) - - -def test_submitter_cannot_patch_file_statuses(file_item, submitter_testapp, wrangler_testapp): - statuses = ['released', 'revoked', 'deleted', 'released to project', 'archived to project', 'archived'] - res = submitter_testapp.post_json('/file_fastq', file_item, status=201) - for status in statuses: - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200) - submitter_testapp.patch_json(res.json['@graph'][0]['@id'], {'paired_end': '1'}, status=403) - - -def test_submitter_cannot_patch_replaced(ind_human_item, submitter_testapp, wrangler_testapp): - res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": "replaced"}, status=200) - # replaced items are not accessible by accession - my_uuid = '/' + res.json['@graph'][0]['uuid'] - submitter_testapp.patch_json(my_uuid, {'sex': 'female'}, status=403) - - -# Submitter created item and lab member wants to view -def test_labmember_cannot_view_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, lab_viewer_testapp): - statuses = ['deleted'] - res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - for status in statuses: - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200) - lab_viewer_testapp.get(res.json['@graph'][0]['@id'], status=403) - - -def test_labmember_can_view_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, lab_viewer_testapp): - statuses = ['current', 'released', 'revoked', 'released to project', 'in review by lab', - 'archived', 'archived to project', 'submission in progress', 'planned'] - res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - for status in statuses: - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200) - lab_viewer_testapp.get(res.json['@graph'][0]['@id'], status=200) - - -def test_labmember_can_view_submitter_file(file_item, submitter_testapp, wrangler_testapp, lab_viewer_testapp): - statuses = ['released', 'revoked', 'released to project', 'uploading', 'uploaded', 'upload failed', - 'archived', 'archived to project'] - res = submitter_testapp.post_json('/file_fastq', file_item, status=201) - for status in statuses: - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200) - lab_viewer_testapp.get(res.json['@graph'][0]['@id'], status=200) - - -def test_labmember_cannot_view_submitter_item_replaced_accession(ind_human_item, submitter_testapp, wrangler_testapp, lab_viewer_testapp): - res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": "replaced"}, status=200) - lab_viewer_testapp.get(res.json['@graph'][0]['@id'], status=404) - - -def test_labmember_can_view_submitter_item_replaced_uuid(ind_human_item, submitter_testapp, wrangler_testapp, lab_viewer_testapp): - res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": "replaced"}, status=200) - my_uuid = '/individuals-human/' + res.json['@graph'][0]['uuid'] + '/' - lab_viewer_testapp.get(my_uuid, status=200) - - -# Submitter created item and lab member wants to patch -def test_labmember_cannot_patch_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, lab_viewer_testapp): - statuses = ['current', 'released', 'revoked', 'archived', 'released to project', - 'archived to project', 'in review by lab', 'submission in progress', 'planned'] - res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - for status in statuses: - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200) - lab_viewer_testapp.patch_json(res.json['@graph'][0]['@id'], {'sex': 'female'}, status=403) - - -# Submitter created item and lab member wants to patch -def test_labmember_cannot_patch_submitter_file(file_item, submitter_testapp, wrangler_testapp, lab_viewer_testapp): - statuses = ['released', 'revoked', 'released to project', 'uploading', 'uploaded', - 'upload failed', 'archived', 'archived to project'] - res = submitter_testapp.post_json('/file_fastq', file_item, status=201) - for status in statuses: - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200) - lab_viewer_testapp.patch_json(res.json['@graph'][0]['@id'], {'paired_end': '2'}, status=403) - - -# person with shared award tests -def test_awardmember_cannot_view_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, award_viewer_testapp): - statuses = ['deleted'] - res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - for status in statuses: - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200) - award_viewer_testapp.get(res.json['@graph'][0]['@id'], status=403) - - -# people who share the same award should be able to view items that have yet to be released generally -def test_awardmember_can_view_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, award_viewer_testapp): - statuses = ['current', 'released', 'revoked', 'archived', 'in review by lab', 'pre-release', - 'released to project', 'submission in progress', 'planned'] - res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - for status in statuses: - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200) - award_viewer_testapp.get(res.json['@graph'][0]['@id'], status=200) - - -def test_awardmember_cannot_view_submitter_item_replaced(ind_human_item, submitter_testapp, wrangler_testapp, award_viewer_testapp): - res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": "replaced"}, status=200) - award_viewer_testapp.get(res.json['@graph'][0]['@id'], status=404) - - -# Submitter created item and lab member wants to patch -def test_awardmember_cannot_patch_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, award_viewer_testapp): - statuses = ['current', 'released', 'revoked', 'archived', 'released to project', 'in review by lab', - 'submission in progress', 'planned', 'archived to project', 'pre-release'] - res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - for status in statuses: - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200) - award_viewer_testapp.patch_json(res.json['@graph'][0]['@id'], {'sex': 'female'}, status=403) - - -# Submitter created item and project member wants to view -def test_viewing_group_member_cannot_view_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, viewing_group_member_testapp): - statuses = ['deleted', 'in review by lab', 'submission in progress', 'planned'] - res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - for status in statuses: - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200) - viewing_group_member_testapp.get(res.json['@graph'][0]['@id'], status=403) - - -# Submitter created item and project member wants to view -def test_viewing_group_member_cannot_view_submitter_file(file_item, submitter_testapp, wrangler_testapp, viewing_group_member_testapp): - statuses = ['deleted', 'uploading', 'uploaded', 'upload failed', 'pre-release'] - res = submitter_testapp.post_json('/file_fastq', file_item, status=201) - for status in statuses: - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200) - viewing_group_member_testapp.get(res.json['@graph'][0]['@id'], status=403) - - -def test_viewing_group_member_can_view_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, viewing_group_member_testapp): - statuses = ['current', 'released', 'revoked', 'released to project', - 'archived', 'archived to project'] - res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - for status in statuses: - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200) - viewing_group_member_testapp.get(res.json['@graph'][0]['@id'], status=200) - - -def test_viewing_group_member_can_view_submitter_file(file_item, submitter_testapp, wrangler_testapp, viewing_group_member_testapp): - statuses = ['released', 'revoked', 'released to project', 'archived to project'] - res = submitter_testapp.post_json('/file_fastq', file_item, status=201) - for status in statuses: - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200) - viewing_group_member_testapp.get(res.json['@graph'][0]['@id'], status=200) - - -def test_viewing_group_member_can_view_submitter_item_replaced_with_uuid(ind_human_item, submitter_testapp, wrangler_testapp, viewing_group_member_testapp): - res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": "replaced"}, status=200) - my_uuid = '/individuals-human/' + res.json['@graph'][0]['uuid'] + '/' - viewing_group_member_testapp.get(my_uuid, status=200) - - -def test_viewing_group_member_cannot_view_submitter_item_replaced_with_accession(ind_human_item, submitter_testapp, wrangler_testapp, viewing_group_member_testapp): - res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": "replaced"}, status=200) - my_accession = '/' + res.json['@graph'][0]['accession'] - viewing_group_member_testapp.get(my_accession, status=404) - - -# Submitter created item and viewing group member wants to patch -def test_viewing_group_member_cannot_patch_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, viewing_group_member_testapp): - statuses = ['current', 'released', 'revoked', 'archived', 'released to project', 'in review by lab', - 'archived to project', 'submission in progress', 'planned'] - res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - for status in statuses: - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200) - viewing_group_member_testapp.patch_json(res.json['@graph'][0]['@id'], {'sex': 'female'}, status=403) - - -def test_viewing_group_member_cannot_patch_submitter_file(file_item, submitter_testapp, wrangler_testapp, viewing_group_member_testapp): - statuses = ['released', 'revoked', 'archived', 'released to project', 'archived to project', - 'uploading', 'uploaded', 'upload failed'] - res = submitter_testapp.post_json('/file_fastq', file_item, status=201) - for status in statuses: - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200) - viewing_group_member_testapp.patch_json(res.json['@graph'][0]['@id'], {'paired_end': '2'}, status=403) - - -def test_non_member_can_view_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, remc_member_testapp): - statuses = ['current', 'released', 'revoked', 'archived'] - res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - for status in statuses: - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200) - remc_member_testapp.get(res.json['@graph'][0]['@id'], status=200) - - -def test_non_member_can_view_submitter_file(file_item, submitter_testapp, wrangler_testapp, remc_member_testapp): - statuses = ['released', 'revoked', 'archived'] - res = submitter_testapp.post_json('/file_fastq', file_item, status=201) - for status in statuses: - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200) - remc_member_testapp.get(res.json['@graph'][0]['@id'], status=200) - - -def test_non_member_cannot_view_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, remc_member_testapp): - statuses = ['released to project', 'archived to project', 'submission in progress', - 'in review by lab', 'deleted', 'planned'] - res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201) - for status in statuses: - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200) - remc_member_testapp.get(res.json['@graph'][0]['@id'], status=403) - - -def test_non_member_cannot_view_submitter_file(file_item, submitter_testapp, wrangler_testapp, remc_member_testapp): - statuses = ['released to project', 'archived to project', 'uploading', 'uploaded', 'upload failed'] - res = submitter_testapp.post_json('/file_fastq', file_item, status=201) - for status in statuses: - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200) - remc_member_testapp.get(res.json['@graph'][0]['@id'], status=403) - - -def test_everyone_can_view_lab_item(lab_item, submitter_testapp, wrangler_testapp, remc_member_testapp): - statuses = ['current', 'revoked', 'inactive'] - apps = [submitter_testapp, wrangler_testapp, remc_member_testapp] - res = wrangler_testapp.post_json('/lab', lab_item, status=201) - for status in statuses: - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200) - for app in apps: - app.get(res.json['@graph'][0]['@id'], status=200) - - -def test_noone_can_view_deleted_lab_item(lab_item, submitter_testapp, wrangler_testapp, remc_member_testapp): - lab_item['status'] = 'deleted' - viewing_apps = [submitter_testapp, remc_member_testapp] - res = wrangler_testapp.post_json('/lab', lab_item, status=201) - for app in viewing_apps: - app.get(res.json['@graph'][0]['@id'], status=403) - - -def test_lab_submitter_can_edit_lab(lab, submitter_testapp, wrangler_testapp): - res = submitter_testapp.get(lab['@id']) - wrangler_testapp.patch_json(res.json['@id'], {'status': 'current'}, status=200) - submitter_testapp.patch_json(res.json['@id'], {'city': 'My fair city'}, status=200) - - -def test_statuses_that_lab_submitter_cannot_edit_lab(lab, submitter_testapp, wrangler_testapp): - statuses = ['deleted', 'revoked', 'inactive'] - res = submitter_testapp.get(lab['@id']) - for status in statuses: - wrangler_testapp.patch_json(res.json['@id'], {'status': status}, status=200) - submitter_testapp.patch_json(res.json['@id'], {'city': 'My fair city'}, status=403) - - -def test_lab_submitter_cannot_edit_lab_name_or_title(lab, submitter_testapp, wrangler_testapp): - res = submitter_testapp.get(lab['@id']) - wrangler_testapp.patch_json(res.json['@id'], {'status': 'current'}, status=200) - submitter_testapp.patch_json(res.json['@id'], {'title': 'Test Lab, HMS'}, status=422) - submitter_testapp.patch_json(res.json['@id'], {'name': 'test-lab'}, status=422) - - -def test_wrangler_can_edit_lab_name_or_title(lab, submitter_testapp, wrangler_testapp): - statuses = ['deleted', 'revoked', 'inactive', 'current'] - new_name = 'test-lab' - new_id = '/labs/test-lab/' - res = submitter_testapp.get(lab['@id']) - original_id = res.json['@id'] - original_name = res.json['name'] - for status in statuses: - wrangler_testapp.patch_json(original_id, {'status': status}, status=200) - wrangler_testapp.patch_json(original_id, {'title': 'Test Lab, HMS'}, status=200) - wrangler_testapp.patch_json(original_id, {'name': new_name}, status=200) - wrangler_testapp.patch_json(new_id, {'name': original_name}, status=200) - - -def test_ac_local_roles_for_lab(registry): - lab_data = { - 'status': 'in review by lab', - 'award': 'b0b9c607-bbbb-4f02-93f4-9895baa1334b', - 'uuid': '828cd4fe-aaaa-4b36-a94a-d2e3a36aa989' - } - test_lab = Lab.create(registry, None, lab_data) - lab_ac_locals = test_lab.__ac_local_roles__() - assert(LAB_SUBMITTER_ROLE in lab_ac_locals.values()) - assert(LAB_MEMBER_ROLE in lab_ac_locals.values()) - - -def test_last_modified_works_correctly(ind_human_item, submitter, wrangler, submitter_testapp, wrangler_testapp): - res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201).json['@graph'][0] - assert res['last_modified']['modified_by'] == submitter['@id'] - # patch same item using a different user - res2 = wrangler_testapp.patch_json(res['@id'], {"status": "current"}, status=200).json['@graph'][0] - assert res2['last_modified']['modified_by'] == wrangler['@id'] - assert res2['last_modified']['date_modified'] > res['last_modified']['date_modified'] - - -@pytest.fixture -def individual_human(human, remc_lab, nofic_award, wrangler_testapp): - ind_human = {'lab': remc_lab['@id'], 'award': nofic_award['@id'], 'organism': human['@id']} - return wrangler_testapp.post_json('/individual_human', ind_human, status=201).json['@graph'][0] - - -def test_multi_viewing_group_viewer_can_view_nofic_when_submission_in_progress( - wrangler_testapp, multi_viewing_group_member_testapp, individual_human): - #import pdb; pdb.set_trace() - wrangler_testapp.patch_json(individual_human['@id'], {'status': 'submission in progress'}, status=200) - res = wrangler_testapp.get(individual_human['@id'], status=200) - multi_viewing_group_member_testapp.get(individual_human['@id'], status=200) - - -def test_viewing_group_viewer_cannot_view_nofic_when_submission_in_progress( - wrangler_testapp, viewing_group_member_testapp, individual_human): - wrangler_testapp.patch_json(individual_human['@id'], {'status': 'submission in progress'}, status=200) - viewing_group_member_testapp.get(individual_human['@id'], status=403) - - -### These aren't strictly permissions tests but putting them here so we don't need to -### move around wrangler and submitter testapps and associated fixtures - - -@pytest.fixture -def planned_experiment_set_data(lab, award): - return { - 'lab': lab['@id'], - 'award': award['@id'], - 'description': 'test experiment set', - 'experimentset_type': 'custom', - } - - -@pytest.fixture -def status2date(): - return { - 'released': 'public_release', - 'released to project': 'project_release' - } - - -def test_planned_item_status_can_be_updated_by_admin( - submitter_testapp, wrangler_testapp, planned_experiment_set_data): - # submitter cannot change status so wrangler needs to patch - res1 = submitter_testapp.post_json('/experiment_set', planned_experiment_set_data).json['@graph'][0] - assert res1['status'] == 'in review by lab' - res2 = wrangler_testapp.patch_json(res1['@id'], {'status': 'planned'}).json['@graph'][0] - assert res2['status'] == 'planned' - - -def test_planned_item_status_is_not_changed_on_admin_patch( - submitter_testapp, wrangler_testapp, planned_experiment_set_data): - desc = 'updated description' - res1 = submitter_testapp.post_json('/experiment_set', planned_experiment_set_data).json['@graph'][0] - wrangler_testapp.patch_json(res1['@id'], {'status': 'planned'}, status=200) - res2 = wrangler_testapp.patch_json(res1['@id'], {'description': desc}).json['@graph'][0] - assert res2['description'] == desc - assert res2['status'] == 'planned' - - -def test_planned_item_status_is_changed_on_submitter_patch( - submitter_testapp, wrangler_testapp, planned_experiment_set_data): - desc = 'updated description' - res1 = submitter_testapp.post_json('/experiment_set', planned_experiment_set_data).json['@graph'][0] - wrangler_testapp.patch_json(res1['@id'], {'status': 'planned'}, status=200) - res2 = submitter_testapp.patch_json(res1['@id'], {'description': desc}).json['@graph'][0] - assert res2['description'] == desc - assert res2['status'] == 'submission in progress' - - -# these tests are for the item _update function as above so sticking them here -def test_unreleased_item_does_not_get_release_date( - wrangler_testapp, planned_experiment_set_data, status2date): - res1 = wrangler_testapp.post_json('/experiment_set', planned_experiment_set_data).json['@graph'][0] - assert res1['status'] == 'in review by lab' - for datefield in status2date.values(): - assert datefield not in res1 - - -def test_insert_of_released_item_does_get_release_date( - wrangler_testapp, planned_experiment_set_data, status2date): - - for status, datefield in status2date.items(): - planned_experiment_set_data['status'] = status - res = wrangler_testapp.post_json('/experiment_set', planned_experiment_set_data).json['@graph'][0] - assert res['status'] == status - assert res[datefield] == date.today().isoformat() - if status in ['released', 'current']: - assert res['project_release'] == res['public_release'] - - -def test_update_of_item_to_released_status_adds_release_date( - wrangler_testapp, planned_experiment_set_data, status2date): - for status, datefield in status2date.items(): - res1 = wrangler_testapp.post_json('/experiment_set', planned_experiment_set_data).json['@graph'][0] - assert res1['status'] == 'in review by lab' - assert datefield not in res1 - res2 = wrangler_testapp.patch_json(res1['@id'], {'status': status}, status=200).json['@graph'][0] - assert res2['status'] == status - assert res2[datefield] == date.today().isoformat() - if status == 'released to project': - assert 'public_release' not in res2 - if status in ['released', 'current']: - assert res2['project_release'] == res2['public_release'] - - -def test_update_of_item_to_non_released_status_does_not_add_release_date( - wrangler_testapp, planned_experiment_set_data): - statuses = ["planned", "revoked", "deleted", "obsolete", "replaced", "in review by lab", "submission in progress"] - datefields = ['public_release', 'project_release'] - for status in statuses: - res1 = wrangler_testapp.post_json('/experiment_set', planned_experiment_set_data).json['@graph'][0] - assert res1['status'] == 'in review by lab' - res2 = wrangler_testapp.patch_json(res1['@id'], {'status': status}, status=200).json['@graph'][0] - assert res2['status'] == status - for datefield in datefields: - assert datefield not in res1 - assert datefield not in res2 - - -def test_update_of_item_that_has_release_date_does_not_change_release_date( - wrangler_testapp, planned_experiment_set_data, status2date): - test_date = '2001-01-01' - for status, datefield in status2date.items(): - planned_experiment_set_data[datefield] = test_date - res1 = wrangler_testapp.post_json('/experiment_set', planned_experiment_set_data).json['@graph'][0] - assert res1['status'] == 'in review by lab' - assert res1[datefield] == test_date - res2 = wrangler_testapp.patch_json(res1['@id'], {'status': status}, status=200).json['@graph'][0] - assert res2['status'] == status - assert res2[datefield] == test_date - - -def test_update_of_item_without_release_dates_mixin(wrangler_testapp, award): - assert award['status'] == 'current' - datefields = ['public_release', 'project_release'] - for field in datefields: - assert field not in award - - -# tests for bogus nofic specific __ac_local_roles__ -def test_4dn_can_view_nofic_released_to_project( - planned_experiment_set_data, wrangler_testapp, viewing_group_member_testapp, - nofic_award): - eset_item = planned_experiment_set_data - eset_item['award'] = nofic_award['@id'] - eset_item['status'] = 'released to project' - res1 = wrangler_testapp.post_json('/experiment_set', eset_item).json['@graph'][0] - viewing_group_member_testapp.get(res1['@id'], status=200) - - -def test_4dn_cannot_view_nofic_not_joint_analysis_planned_and_in_progress( - planned_experiment_set_data, wrangler_testapp, viewing_group_member_testapp, - nofic_award): - statuses = ['planned', 'submission in progress'] - eset_item = planned_experiment_set_data - eset_item['award'] = nofic_award['@id'] - for status in statuses: - eset_item['status'] = status - res1 = wrangler_testapp.post_json('/experiment_set', eset_item).json['@graph'][0] - viewing_group_member_testapp.get(res1['@id'], status=403) - - -def test_4dn_can_view_nofic_joint_analysis_planned_and_in_progress( - planned_experiment_set_data, wrangler_testapp, viewing_group_member_testapp, - nofic_award): - statuses = ['planned', 'submission in progress'] - eset_item = planned_experiment_set_data - eset_item['award'] = nofic_award['@id'] - eset_item['tags'] = ['Joint Analysis'] - for status in statuses: - eset_item['status'] = status - res1 = wrangler_testapp.post_json('/experiment_set', eset_item).json['@graph'][0] - viewing_group_member_testapp.get(res1['@id'], status=200) - - -@pytest.fixture -def replicate_experiment_set_data(lab, award): - return { - 'lab': lab['@id'], - 'award': award['@id'], - 'description': 'test replicate experiment set', - 'experimentset_type': 'replicate', - } - - -def test_ready_to_process_set_status_admin_can_edit( - submitter_testapp, wrangler_testapp, replicate_experiment_set_data): - res1 = submitter_testapp.post_json('/experiment_set_replicate', replicate_experiment_set_data).json['@graph'][0] - assert res1['status'] == 'in review by lab' - res2 = wrangler_testapp.patch_json(res1['@id'], {'status': 'pre-release'}).json['@graph'][0] - assert res2['status'] == 'pre-release' - # admin can Edit - res3 = wrangler_testapp.patch_json(res1['@id'], {'description': 'admin edit'}, status=200).json['@graph'][0] - assert res3['description'] == 'admin edit' - - -def test_ready_to_process_set_status_submitter_can_view( - submitter_testapp, wrangler_testapp, replicate_experiment_set_data): - res1 = submitter_testapp.post_json('/experiment_set_replicate', replicate_experiment_set_data).json['@graph'][0] - assert res1['status'] == 'in review by lab' - res2 = wrangler_testapp.patch_json(res1['@id'], {'status': 'pre-release'}).json['@graph'][0] - assert res2['status'] == 'pre-release' - # submitter can view - res3 = submitter_testapp.get(res1['@id'], status=200).json - assert res3['description'] == 'test replicate experiment set' - - -def test_ready_to_process_set_status_submitter_can_not_edit( - submitter_testapp, wrangler_testapp, replicate_experiment_set_data): - res1 = submitter_testapp.post_json('/experiment_set_replicate', replicate_experiment_set_data).json['@graph'][0] - assert res1['status'] == 'in review by lab' - res2 = wrangler_testapp.patch_json(res1['@id'], {'status': 'pre-release'}).json['@graph'][0] - assert res2['status'] == 'pre-release' - # submitter can not edit - submitter_testapp.patch_json(res1['@id'], {'description': 'submitter edit'}, status=403) - - -def test_ready_to_process_set_status_others_can_not_view( - submitter_testapp, wrangler_testapp, viewing_group_member_testapp, replicate_experiment_set_data): - res1 = submitter_testapp.post_json('/experiment_set_replicate', replicate_experiment_set_data).json['@graph'][0] - assert res1['status'] == 'in review by lab' - res2 = wrangler_testapp.patch_json(res1['@id'], {'status': 'pre-release'}).json['@graph'][0] - assert res2['status'] == 'pre-release' - # others can not view - viewing_group_member_testapp.get(res1['@id'], status=403) - - -@pytest.fixture -def static_section_item(): - return { - 'name': 'static-section.test_ss', - 'title': 'Test Static Section', - 'body': 'This is a test section' - } - - -def test_static_section_with_lab_view_by_lab_member( - wrangler_testapp, lab_viewer_testapp, lab, static_section_item): - static_section_item['lab'] = lab['@id'] - static_section_item['status'] = 'released to lab' - res = wrangler_testapp.post_json('/static_section', static_section_item).json['@graph'][0] - lab_viewer_testapp.get(res['@id'], status=200) - - -def test_permissions_validate_false(award, lab, file_formats, submitter_testapp, wrangler_testapp): - """ - Only admin can use validate=false with POST/PUT/PATCH - """ - file_item_body = { - 'award': award['uuid'], - 'lab': lab['uuid'], - 'file_format': file_formats.get('fastq').get('uuid'), - 'paired_end': '1' - } - # does it matter that the wrangler posts this? I don't think so for this test - Will 03/23/2021 - res = submitter_testapp.post_json('/file_fastq', file_item_body, status=201) - - # no permissions - submitter_testapp.post_json('/file_fastq/?validate=false', file_item_body, status=403) - submitter_testapp.patch_json(res.json['@graph'][0]['@id'] + '?validate=false', - {'paired_end': '1'}, status=403) - submitter_testapp.put_json(res.json['@graph'][0]['@id'] + '?validate=false', - file_item_body, status=403) - # okay permissions - try: - wrangler_testapp.post_json('/file_fastq/?validate=false&upgrade=False', file_item_body, status=201) - except TypeError: # thrown from open_data_url, but should make it there - pass # we are ok, any other exception should be thrown - - wrangler_testapp.patch_json(res.json['@graph'][0]['@id'] + '?validate=false', - {'paired_end': '1'}, status=200) - try: - wrangler_testapp.put_json(res.json['@graph'][0]['@id'] + '?validate=false', - file_item_body, status=200) - except TypeError: # thrown from open_data_url, but should make it there - pass # we are ok, any other exception should be thrown - - -def test_permissions_database_applies_permissions(award, lab, file_formats, wrangler_testapp, anontestapp): - """ Tests that anon_testapp gets view denied when using datastore=database - moved from test_indexing as this test has no bearing on ES """ - file_item_body = { - 'award': award['uuid'], - 'lab': lab['uuid'], - 'file_format': file_formats.get('fastq').get('uuid'), - 'paired_end': '1', - 'status': 'released', - 'uuid': 'f40ecbb0-294f-4a51-9b1b-effaddb14b1d', - 'accession': '4DNFIFGXXBKV' - } - res = wrangler_testapp.post_json('/file_fastq', file_item_body, status=201).json - item_id = res['@graph'][0]['@id'] - res = anontestapp.get('/' + item_id).json - assert res['file_format'] == {'error': 'no view permissions'} - res = anontestapp.get('/' + item_id + '?datastore=database').json - assert res['file_format'] == {'error': 'no view permissions'} diff --git a/src/encoded/tests/test_post_put_patch.py b/src/encoded/tests/test_post_put_patch.py deleted file mode 100644 index e8bed20265..0000000000 --- a/src/encoded/tests/test_post_put_patch.py +++ /dev/null @@ -1,402 +0,0 @@ -import pytest - - -pytestmark = [pytest.mark.setone, pytest.mark.working] - -targets = [ - {'name': 'one', 'uuid': '775795d3-4410-4114-836b-8eeecf1d0c2f'}, - {'name': 'two', 'uuid': 'd6784f5e-48a1-4b40-9b11-c8aefb6e1377'}, -] - -item = { - 'required': 'required value', -} - -simple1 = { - 'required': 'required value', - 'simple1': 'supplied simple1', -} - -simple2 = { - 'required': 'required value', - 'simple2': 'supplied simple2', -} - -item_with_uuid = [ - { - 'uuid': '0f13ff76-c559-4e70-9497-a6130841df9f', - 'required': 'required value 1', - 'field_no_default': 'test' - }, - { - 'uuid': '6c3e444b-f290-43c4-bfb9-d20135377770', - 'required': 'required value 2', - }, -] - -item_with_link = [ - { - 'required': 'required value 1', - 'protected_link': '775795d3-4410-4114-836b-8eeecf1d0c2f', - }, - { - 'required': 'required value 2', - 'protected_link': 'd6784f5e-48a1-4b40-9b11-c8aefb6e1377', - }, -] - - -COLLECTION_URL = '/testing-post-put-patch/' - - -@pytest.fixture -def link_targets(testapp): - url = '/testing-link-targets/' - for item in targets: - testapp.post_json(url, item, status=201) - - -@pytest.fixture -def content(testapp): - res = testapp.post_json(COLLECTION_URL, item_with_uuid[0], status=201) - return {'@id': res.location} - - -@pytest.fixture -def content_with_child(testapp): - parent_res = testapp.post_json('/testing-link-targets/', {}, status=201) - parent_id = parent_res.json['@graph'][0]['@id'] - child_res = testapp.post_json('/testing-link-sources/', {'target': parent_id}) - child_id = child_res.json['@graph'][0]['@id'] - return {'@id': parent_id, 'child': child_id} - - -def test_admin_post(testapp): - testapp.post_json(COLLECTION_URL, item, status=201) - testapp.post_json(COLLECTION_URL, item_with_uuid[0], status=201) - - -def test_submitter_post(submitter_testapp): - testapp = submitter_testapp - testapp.post_json(COLLECTION_URL, item, status=201) - res = testapp.post_json(COLLECTION_URL, item_with_uuid[0], status=422) - assert any(error.get('name') == 'Schema: uuid' for error in res.json['errors']) - - -def test_admin_put_uuid(content, testapp): - url = content['@id'] - # so long as the same uuid is supplied, PUTing the uuid is fine - testapp.put_json(url, item_with_uuid[0], status=200) - # but the uuid may not be changed on PUT; - testapp.put_json(url, item_with_uuid[1], status=422) - - -def test_submitter_put_uuid(content, submitter_testapp): - testapp = submitter_testapp - url = content['@id'] - # so long as the same uuid is supplied, PUTing the uuid is fine - testapp.put_json(url, item_with_uuid[0], status=200) - # but the uuid may not be changed on PUT; - testapp.put_json(url, item_with_uuid[1], status=422) - - -def test_defaults_on_put(content, testapp): - url = content['@id'] - res = testapp.get(url) - assert res.json['simple1'] == 'simple1 default' - assert res.json['simple2'] == 'simple2 default' - - res = testapp.put_json(url, simple1, status=200) - assert res.json['@graph'][0]['simple1'] == 'supplied simple1' - assert res.json['@graph'][0]['simple2'] == 'simple2 default' - - res = testapp.put_json(url, simple2, status=200) - assert res.json['@graph'][0]['simple1'] == 'simple1 default' - assert res.json['@graph'][0]['simple2'] == 'supplied simple2' - - -def test_patch(content, testapp): - url = content['@id'] - res = testapp.get(url) - assert res.json['simple1'] == 'simple1 default' - assert res.json['simple2'] == 'simple2 default' - - res = testapp.patch_json(url, {}, status=200) - assert res.json['@graph'][0]['simple1'] == 'simple1 default' - assert res.json['@graph'][0]['simple2'] == 'simple2 default' - - res = testapp.patch_json(url, {'simple1': 'supplied simple1'}, status=200) - assert res.json['@graph'][0]['simple1'] == 'supplied simple1' - assert res.json['@graph'][0]['simple2'] == 'simple2 default' - - res = testapp.patch_json(url, {'simple2': 'supplied simple2'}, status=200) - assert res.json['@graph'][0]['simple1'] == 'supplied simple1' - assert res.json['@graph'][0]['simple2'] == 'supplied simple2' - - -def test_patch_new_schema_version(content, root, testapp, monkeypatch): - collection = root['testing_post_put_patch'] - properties = collection.type_info.schema['properties'] - - url = content['@id'] - res = testapp.get(url) - assert res.json['schema_version'] == '1' - - monkeypatch.setitem(properties['schema_version'], 'default', '2') - monkeypatch.setattr(collection.type_info, 'schema_version', '2') - monkeypatch.setitem(properties, 'new_property', {'default': 'new'}) - res = testapp.patch_json(url, {}, status=200) - assert res.json['@graph'][0]['schema_version'] == '2' - assert res.json['@graph'][0]['new_property'] == 'new' - - -def test_admin_put_protected_link(link_targets, testapp): - res = testapp.post_json(COLLECTION_URL, item_with_link[0], status=201) - url = res.location - - testapp.put_json(url, item_with_link[0], status=200) - testapp.put_json(url, item_with_link[1], status=200) - - -def test_submitter_put_protected_link(link_targets, testapp, submitter_testapp): - res = testapp.post_json(COLLECTION_URL, item_with_link[0], status=201) - url = res.location - - submitter_testapp.put_json(url, item_with_link[0], status=200) - submitter_testapp.put_json(url, item_with_link[1], status=422) - - -def test_put_object_not_touching_children(content_with_child, testapp): - """ - The rev_link style of editing children is removed as of FF-1089 - This will still pass - """ - url = content_with_child['@id'] - res = testapp.put_json(url, {}, status=200) - assert content_with_child['child'] in res.json['@graph'][0]['reverse'] - - -def test_put_object_editing_child_does_not_work(content_with_child, testapp): - """ - This will not post a rev link, since editing children through reverse links - is no longer done and linkFrom is removed. - """ - edit = { - 'reverse': [{ - '@id': content_with_child['child'], - 'status': 'released', - }] - } - # cannot submit 'reverse' calc property - res = testapp.put_json(content_with_child['@id'], edit, status=422).json - assert len(res['errors']) == 1 - assert res['errors'][0]['description'] == 'submission of calculatedProperty disallowed' - assert res['errors'][0]['name'] == 'Schema: reverse' - - get_res = testapp.get(content_with_child['child'] + '?frame=embedded').json - assert 'status' not in get_res - - -def test_post_object_with_child(content_with_child, testapp): - """ - This will not post a rev link, since editing children through reverse links - is no longer done and linkFrom is removed. - """ - edit = { - 'reverse': [ - {'status': 'released'} - ] - } - res = testapp.post_json('/testing-link-targets', edit, status=422).json - errors = sorted(res['errors'], key=lambda d: d['description']) - assert len(errors) == 2 - assert errors[1]['description'] == 'submission of calculatedProperty disallowed' - assert errors[1]['name'] == 'Schema: reverse' - - -def test_retry(testapp): - res = testapp.post_json('/testing-post-put-patch/', {'required': ''}) - url = res.location - res = testapp.get(url + '/@@testing-retry?datastore=database') - assert res.json['attempt'] == 2 - assert not res.json['detached'] - - -def test_post_check_only(testapp, human_data, human): - """ - organism should validate fine but not post - """ - # if we post this data it will fail with uuid conflict, as calling the human fixture posts it - testapp.post_json('/organism/', human_data, status=409) - - # so this one won't post, but schema validation is ok, - # note it doesn't detect primary key - rest = testapp.post_json('/organism/?check_only=true', human_data).json - assert rest['status'] == 'success' - - -def test_put_check_only(testapp, human_data, human): - ''' - organism should validate fine but not post - ''' - #if we post this data it will fail with invalid status - - testapp.post_json('/organism/', human_data, status=409) - - # so this one won't post, but schema validation is ok, - # note it doesn't detect primary key - rest = testapp.post_json('/organism/?check_only=true', human_data).json - assert rest['status'] == 'success' - -def test_post_check_only_invalid_data(testapp, human_data): - ''' - note theese test should work on any object - ''' - human_data['taxon_id'] = 24; - testapp.post_json('/organism/?check_only=true', human_data, status=422) - - -def test_put_check_only(testapp, human_data, human): - ''' - organism should validate fine but not post - ''' - # human_data has already been posted, now put with invalid status - human_data['status'] = 'no a valid status' - testapp.put_json('/organisms/human/?check_only=true', human_data, status=422) - - # so this one won't post, but schema validation is ok, - # note it doesn't detect primary key - human_data['status'] = human['status'] - rest = testapp.put_json('/organisms/human/?check_only=true', human_data).json - assert rest['status'] == 'success' - - -def test_patch_check_only(testapp, human_data, human): - ''' - organism should validate fine but not post - ''' - # human_data has already been posted, now put with invalid status - human_data['status'] = 'no a valid status' - testapp.patch_json('/organisms/human/?check_only=true', human_data, status=422) - - # so this one won't post, but schema validation is ok, - # note it doesn't detect primary key - human_data['status'] = human['status'] - rest = testapp.patch_json('/organisms/human/?check_only=true', human_data).json - assert rest['status'] == 'success' - - -def test_patch_delete_fields(content, testapp): - url = content['@id'] - res = testapp.get(url) - assert res.json['simple1'] == 'simple1 default' - assert res.json['simple2'] == 'simple2 default' - assert res.json['field_no_default'] == 'test' - - res = testapp.patch_json(url, {'simple1': 'this is a test'}, status=200) - assert res.json['@graph'][0]['simple1'] == 'this is a test' - - # delete fields with defaults resets to default, while deleting non default field - # completely removes them - res = testapp.patch_json(url + "?delete_fields=simple1,field_no_default", {}, status=200) - assert 'field_no_default' not in res.json['@graph'][0].keys() - assert res.json['@graph'][0]['simple1'] == 'simple1 default' - - -def test_patch_delete_fields_non_string(content, testapp): - url = content['@id'] - res = testapp.get(url) - - # delete fields with defaults resets to default, while deleting non default field - # completely removes them - res = testapp.patch_json(url + "?delete_fields=schema_version", {}, status=200) - assert res.json['@graph'][0]['schema_version'] == '1' - - -def test_patch_delete_fields_fails_with_no_validation(content, testapp): - url = content['@id'] - res = testapp.get(url) - assert res.json['simple1'] == 'simple1 default' - assert res.json['simple2'] == 'simple2 default' - assert res.json['field_no_default'] == 'test' - - # using delete_fields with validate=False will now raise a validation err - res = testapp.patch_json(url + "?delete_fields=simple1,field_no_default&validate=false", {}, status=422) - assert res.json['description'] == "Failed validation" - assert res.json['errors'][0]['name'] == 'delete_fields' - assert 'Cannot delete fields' in res.json['errors'][0]['description'] - - -def test_patch_delete_fields_bad_param(content, testapp): - """ - delete_fields should not fail with a bad fieldname, but simply ignore - """ - url = content['@id'] - res = testapp.get(url) - assert res.json['simple1'] == 'simple1 default' - assert res.json['simple2'] == 'simple2 default' - assert res.json['field_no_default'] == 'test' - res = testapp.patch_json(url + "?delete_fields=simple1,bad_fieldname", {}, status=200) - # default value - assert res.json['@graph'][0]['simple1'] == 'simple1 default' - assert 'bad_fieldname' not in res.json['@graph'][0] - - -def test_patch_delete_fields_import_items_admin(link_targets, testapp): - res = testapp.post_json(COLLECTION_URL, item_with_link[0], status=201) - url = res.location - assert res.json['@graph'][0]['protected_link'] - res = testapp.patch_json(url + "?delete_fields=protected_link", {}, status=200) - - -def test_patch_delete_fields_import_items_submitter(content, testapp, submitter_testapp): - """ - Since the deleted protected field has a default value in the schema, there - are two cases for this test: - 1. No validation problems if previous value == default value (allow delete - to happen, since it will effectively do nothing) - 2. ValidationFailure if previous protected value != default value - """ - url = content['@id'] - res = testapp.get(url) - assert res.json['protected'] == 'protected default' - res1 = submitter_testapp.patch_json(url + "?delete_fields=protected", {}, status=200) - assert res1.json['@graph'][0]['protected'] == 'protected default' - - # change protected value - res = testapp.patch_json(url, {'protected': 'protected new'}, status=200) - assert res.json['@graph'][0]['protected'] == 'protected new' - - res2 = submitter_testapp.patch_json(url + "?delete_fields=protected", {}, status=422) - res_errors = res2.json['errors'] - assert len(res_errors) == 2 - assert res_errors[0]['name'] == "Schema: protected" - assert res_errors[0]['description'] == "permission 'import_items' required" - assert res_errors[1]['name'] == 'delete_fields' - assert res_errors[1]['description'] == 'Error deleting fields' - - -def test_patch_delete_fields_required(content, testapp): - url = content['@id'] - res = testapp.patch_json(url + "?delete_fields=required", {}, status=422) - assert res.json['description'] == "Failed validation" - assert res.json['errors'][0]['name'] == 'Schema: ' - assert res.json['errors'][0]['description'] == "'required' is a required property" - - -def test_name_key_validation(link_targets, testapp): - # name_key - target_data = {'name': 'one#name'} - res = testapp.post_json('/testing-link-targets/', target_data, status=422) - assert res.json['description'] == 'Failed validation' - res_error = res.json['errors'][0] - assert res_error['name'] == 'Item: path characters' - assert "Forbidden character(s) {'#'}" in res_error['description'] - - # unique_key - source_data = {'name': 'two@*name', 'target': targets[0]['uuid']} - res = testapp.post_json('/testing-link-sources/', source_data, status=422) - assert res.json['description'] == 'Failed validation' - res_error = res.json['errors'][0] - assert res_error['name'] == 'Item: path characters' - assert "Forbidden character(s) {'*'}" in res_error['description'] diff --git a/src/encoded/tests/test_purge_item_type.py b/src/encoded/tests/test_purge_item_type.py deleted file mode 100644 index b4c0cfd540..0000000000 --- a/src/encoded/tests/test_purge_item_type.py +++ /dev/null @@ -1,70 +0,0 @@ -import pytest -from dcicutils.qa_utils import notice_pytest_fixtures -#from .workbook_fixtures import es_app, es_testapp, es_app_settings # es app without workbook -from encoded.commands.purge_item_type import purge_item_type_from_storage - - -# notice_pytest_fixtures(es_app, es_app_settings, es_testapp) - - -pytestmark = [pytest.mark.broken] - - -@pytest.fixture -def dummy_static_section(es_testapp): - static_section = { # from workbook_inserts - "name": "search-info-header.Workflow_copy", - "uuid": "442c8aa0-dc6c-43d7-814a-854af460b015", - "section_type": "Search Info Header", - "title": "Workflow Information", - "body": "Some text to be rendered as a header" - } - es_testapp.post_json('/static_section', static_section, status=[201, 409]) - es_testapp.post_json('/index', {'record': True}) - - -@pytest.fixture -def many_dummy_static_sections(es_testapp): - static_section_template = { - "name": "search-info-header.Workflow", - "section_type": "Search Info Header", - "title": "Workflow Information", - "body": "Some text to be rendered as a header" - } - paths = [] - for i in range(2): # arbitrarily defined, lowered for efficiency - static_section_template['name'] = 'search-info-header.Workflow:%s' % i - resp = es_testapp.post_json('/static_section', static_section_template, status=201).json - paths.append(resp['@graph'][0]['@id']) - es_testapp.post_json('/index', {'record': True}) - return paths - - -@pytest.mark.parametrize('item_type', ['static_section']) # maybe should test some other types... -def test_purge_item_type_from_db(es_testapp, dummy_static_section, item_type): - """ Tests purging all items of a certain item type from the DB """ - assert purge_item_type_from_storage(es_testapp, [item_type]) is True - es_testapp.post_json('/index', {'record': True}) - es_testapp.get('/search/?type=StaticSection', status=404) - es_testapp.get('/static-sections/442c8aa0-dc6c-43d7-814a-854af460b015?datastore=database', status=404) - - -def test_purge_item_type_from_db_many(es_testapp, many_dummy_static_sections): - """ Tests posting/deleting several static sections and checking all are gone """ - paths_to_check = many_dummy_static_sections - assert purge_item_type_from_storage(es_testapp, ['static_section']) is True - es_testapp.post_json('/index', {'record': True}) - path_string = '%s?datastore=database' - for path in paths_to_check: - es_testapp.get(path_string % path, status=404) - es_testapp.get('/search/?type=StaticSection', status=404) - - -# @pytest.mark.workbook -# Skipped because workbook fixture here causes issues with test orderings -# def test_purge_item_type_with_links_fails(es_testapp, workbook): -# """ Tries to remove 'lab', which should fail since it has links """ -# es_testapp.post_json('/index', {'record': True}) # must index everything so individual links show up -# time.sleep(5) # wait for indexing to catch up -# assert not purge_item_type_from_storage(es_testapp, ['lab']) -# es_testapp.post_json('/index', {'record': True}) diff --git a/src/encoded/tests/test_root.py b/src/encoded/tests/test_root.py deleted file mode 100644 index 4507874015..0000000000 --- a/src/encoded/tests/test_root.py +++ /dev/null @@ -1,31 +0,0 @@ -from unittest import mock -from ..root import uptime_info -from dcicutils import lang_utils -from dcicutils.misc_utils import ignored -from ..appdefs import ITEM_INDEX_ORDER - - -def test_type_metadata(anontestapp): - - response = anontestapp.get('/type-metadata') - assert response.status_code == 200 - result = response.json - assert isinstance(result, dict) - assert set(result.keys()) == {'index_order'} - assert result['index_order'] == ITEM_INDEX_ORDER - - -def test_uptime_info(): - - with mock.patch("uptime.uptime", return_value=65 * 60): - assert uptime_info() == "1 hour, 5 minutes" - - def fail(*args, **kwargs): - ignored(args, kwargs) - raise RuntimeError("Failure") - - with mock.patch("uptime.uptime", side_effect=fail): - assert uptime_info() == "unavailable" - - with mock.patch.object(lang_utils, "relative_time_string", fail): - assert uptime_info() == "unavailable" diff --git a/src/encoded/tests/test_schema_formats.py b/src/encoded/tests/test_schema_formats.py deleted file mode 100644 index e46165e975..0000000000 --- a/src/encoded/tests/test_schema_formats.py +++ /dev/null @@ -1,115 +0,0 @@ -import glob -import io -import json -import pkg_resources -import pytest -import uuid - -from ..schema_formats import ACCESSION_CODES, ACCESSION_TEST_CODES, is_uri, is_uuid - - -pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema, pytest.mark.unit] - - -# Any legacy codes that have been discontinued and aren't mentioned in schemas -# but that have accessions that must still be parsed should go here. -kmp 2-Mar-2021 -LEGACY_ACCESSION_CODES = "" - -def compute_accession_codes(): - legacy_codes = [x for x in LEGACY_ACCESSION_CODES.split("|") if x] - letter_pairs = set(legacy_codes) - for file in glob.glob(pkg_resources.resource_filename('encoded', 'schemas/*.json')): - with io.open(file) as fp: - schema = json.load(fp) - letter_pair = schema.get('properties', {}).get('accession', {}).get('accessionType') - if letter_pair: - if not isinstance(letter_pair, str) or len(letter_pair) != 2: - raise RuntimeError("accession_type in %s is not a 2-character string:", letter_pair) - letter_pairs.add(letter_pair) - return "|".join(sorted(letter_pairs)) - - -def test_accession_codes(): - - computed_codes = compute_accession_codes() - assert ACCESSION_TEST_CODES == computed_codes - assert ACCESSION_CODES == computed_codes - - -def test_is_uuid(): - - good_uuid = str(uuid.uuid4()) - bad_uuid = '123-456-789' - - assert not is_uuid("12345678abcd678123456781234") # wrong length. expecting 32 digits - assert not is_uuid("12-3456781234abcd1234567812345678") # hyphens only allowed at multiple of four boundaries - assert not is_uuid("12-3456781234abcd1234567-812345678") # ditto - - assert is_uuid("123456781234abcd1234567812345678") - assert is_uuid("12345678abcd56781234ABCD12345678") - assert is_uuid("1234-5678abcd56781234ABCD12345678") - assert is_uuid("12345678abcd-56781234ABCD1234-5678") - assert is_uuid("1234-5678-abcd56781234ABCD-12345678") - assert is_uuid("1234-5678-abcd-56781234ABCD12345678") - assert is_uuid("1234-5678-abcd-5678-1234-ABCD-1234-5678") - assert is_uuid("1234-5678-abcd-5678-1234-ABCD-1234-5678-") # we don't really want this, but we tolerate it - - assert is_uuid("{12345678abcd56781234ABCD12345678}") # braces are optionally allowed - assert is_uuid("{1234-5678-abcd5678-1234-ABCD-1234-5678}") # ditto - assert is_uuid("1234-5678-abcd5678-1234-ABCD-1234-5678}") # ditto - assert is_uuid("{1234-5678-abcd5678-1234-ABCD-1234-5678-}") # balanced braces trailing hyphen tolerated - - assert is_uuid(good_uuid) is True - assert is_uuid(bad_uuid) is False - - -def test_is_uri(): - - # http/https scheme and some kind of host are required. We want absolute URIs. - # See more extensive testing and implementation notes dcicutils test for misc_utils.is_valid_absolute_uri - - assert is_uri("foo") is False # simple relative URL not allowed - assert is_uri("//somehost?alpha=1&beta=2") is False # Host but no scheme also not allowed - assert is_uri("//somehost/?alpha=1&beta=2&colon=:") is False # Used to wrongly yield True due to ':' in URI - assert is_uri("http:/foo?alpha=1") is False # Scheme but no host. This used to wrongly yield True. - assert is_uri("ftps://somehost") is False # Wrong kind of scheme. Used to be True. - - assert is_uri("https://somehost") is True - assert is_uri("http://user@somehost") is True - assert is_uri("http://user:pass@somehost") is True - assert is_uri("http://somehost/x/y/z?alpha=1&beta=2&colon=:") is True - - assert is_uri("http://abc.def/foo#alpha") is True # TODO: Reconsider tags - assert is_uri("http://abc.def/foo#alpha:beta") is True # TODO: Reconsider tags - - -@pytest.mark.parametrize('obj, expected', [ - ({ - 'accession': 'not valid' - }, False), - ({ - 'accession': 'SNOnotvalid' - }, False), - ({ - 'accession': 'TSTnotvalid' - }, False), - ({ - 'accession': 'TSTabcclose' - }, False), - ({ - 'accession': 'TSTBS439abcd' # lowercase fails - }, False), - ({ - 'accession': 'TSTBS4399428' # real one that matches - }, True), - ({ - 'accession': 'TSTBS4393BCD' # real one that matches (can be trailing characters) - }, True), - ({}, True), # serverDefault should work -]) -def test_custom_validator_with_real_type(testapp, obj, expected): - """ Tests that we can validate accessions (or other nonstandard format fields) """ - if not expected: - testapp.post_json('/TestingServerDefault', obj, status=422) - else: - testapp.post_json('/TestingServerDefault', obj, status=201) diff --git a/src/encoded/tests/test_schemas.py b/src/encoded/tests/test_schemas.py deleted file mode 100644 index e92e584f9f..0000000000 --- a/src/encoded/tests/test_schemas.py +++ /dev/null @@ -1,217 +0,0 @@ -import pytest -import re - -from pkg_resources import resource_listdir -from snovault import COLLECTIONS, TYPES -from snovault.schema_utils import load_schema -from snovault.util import crawl_schema - - -pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema] - -SCHEMA_FILES = [ - f for f in resource_listdir('encoded', 'schemas') - if f.endswith('.json') -] - - -@pytest.fixture(scope='module') -def master_mixins(): - mixins = load_schema('encoded:schemas/mixins.json') - mixin_keys = [ - 'schema_version', - 'uuid', - 'accession', - 'aliases', - 'status', - 'submitted', - 'release_dates', - 'modified', - 'references', - 'attribution', - 'notes', - 'documents', - 'attachment', - 'dbxrefs', - 'library', - 'antibody_info', - 'spikein_info', - 'sop_mapping', - 'tags', - 'badges', - 'facets_common' - ] - for key in mixin_keys: - assert(mixins[key]) - - -def camel_case(name): - return ''.join(x for x in name.title() if not x == '_') - - -def pluralize(name): - name = name.replace('_', '-') - # deal with a few special cases explicitly - specials = ['experiment', 'file', 'individual', 'treatment', - 'quality-metric', 'summary-statistic', 'workflow-run', - 'microscope-setting'] - for sp in specials: - if name.startswith(sp) and re.search('-(set|flag|format|type)', name) is None: - return name.replace(sp, sp + 's') - elif name.startswith(sp) and re.search('setting', name): - return name.replace(sp, sp + 's') - # otherwise just add 's' - return name + 's' - - -@pytest.mark.parametrize('schema', SCHEMA_FILES) -def test_load_schema(schema, master_mixins, registry): - - abstract = [ - 'microscope_setting.json', - 'experiment.json', - 'file.json', - 'individual.json', - 'quality_metric.json', - 'treatment.json', - 'workflow_run.json', - 'user_content.json' - ] - - loaded_schema = load_schema('encoded:schemas/%s' % schema) - assert(loaded_schema) - - typename = schema.replace('.json', '') - collection_names = [camel_case(typename), pluralize(typename)] - - # check the mixin properties for each schema - if not schema == ('mixins.json'): - verify_mixins(loaded_schema, master_mixins) - - if schema not in ['namespaces.json', 'mixins.json']: - # check that schema.id is same as /profiles/schema - idtag = loaded_schema['$id'] - idtag = idtag.replace('/profiles/', '') - # special case for access_key.json - if schema == 'access_key.json': - idtag = idtag.replace('_admin', '') - assert schema == idtag - - # check for pluralized and camel cased in collection_names - val = None - for name in collection_names: - assert name in registry[COLLECTIONS] - if val is not None: - assert registry[COLLECTIONS][name] == val - else: - val = registry[COLLECTIONS][name] - - if schema not in abstract: - # check schema w/o json extension is in registry[TYPES] - assert typename in registry[TYPES].by_item_type - assert typename in registry[COLLECTIONS] - assert registry[COLLECTIONS][typename] == val - - shared_properties = [ - 'uuid', - 'schema_version', - 'aliases', - 'lab', - 'award', - 'date_created', - 'submitted_by', - 'last_modified', - 'status' - ] - no_alias_or_attribution = [ - 'user.json', 'award.json', 'lab.json', 'organism.json', - 'ontology.json', 'ontology_term.json', 'page.json', - 'static_section.json', 'badge.json', 'tracking_item.json', - 'file_format.json', 'experiment_type.json', 'higlass_view_config.json', - 'microscope_configuration.json', 'image_setting.json' - ] - for prop in shared_properties: - if schema == 'experiment.json': - # currently experiment is abstract and has no mixin properties - continue - if schema == 'access_key.json' and prop not in ['uuid', 'schema_version']: - continue - if schema in no_alias_or_attribution and prop in ['aliases', 'lab', 'award']: - continue - verify_property(loaded_schema, prop) - - -def verify_property(loaded_schema, property): - assert(loaded_schema['properties'][property]) - - -def verify_mixins(loaded_schema, master_mixins): - ''' - test to ensure that we didn't accidently overwrite mixins somehow - ''' - for mixin in loaded_schema.get('mixinProperties', []): - # get the mixin name from {'$ref':'mixins.json#/schema_version'} - mixin_file_name, mixin_name = mixin['$ref'].split('/') - if mixin_file_name != "mixins.json": - # skip any mixins not in main mixins.json - continue - mixin_schema = master_mixins[mixin_name] - - # each field in the mixin should be present in the parent schema with same properties - for mixin_field_name, mixin_field in mixin_schema.items(): - schema_field = loaded_schema['properties'][mixin_field_name] - for key in mixin_field.keys(): - assert mixin_field[key] == schema_field[key] - - -def test_linkTo_saves_uuid(root, submitter, lab): - item = root['users'][submitter['uuid']] - assert item.properties['submits_for'] == [lab['uuid']] - - -def test_mixinProperties(): - schema = load_schema('snovault:schemas/access_key.json') - assert schema['properties']['uuid']['type'] == 'string' - - -def test_dependencies(testapp): - collection_url = '/testing-dependencies/' - testapp.post_json(collection_url, {'dep1': 'dep1', 'dep2': 'dep2'}, status=201) - testapp.post_json(collection_url, {'dep1': 'dep1'}, status=422) - testapp.post_json(collection_url, {'dep2': 'dep2'}, status=422) - testapp.post_json(collection_url, {'dep1': 'dep1', 'dep2': 'disallowed'}, status=422) - - -def test_changelogs(testapp, registry): - for typeinfo in registry[TYPES].by_item_type.values(): - changelog = typeinfo.schema.get('changelog') - if changelog is not None: - res = testapp.get(changelog) - assert res.status_int == 200, changelog - assert res.content_type == 'text/markdown' - - -def test_fourfront_crawl_schemas(testapp, registry): - schema = load_schema('encoded:schemas/experiment_hi_c.json') - field_path = 'files.extra_files.file_size' - field_schema = crawl_schema(registry[TYPES], field_path, schema) - assert isinstance(field_schema, dict) - assert field_schema['title'] == 'File Size' - - -def test_schema_version_present_on_items(app): - """Test a valid schema version is present on all non-test item - types. - Expecting positive integer values for non-abstract items, and empty - string for all abstract items. - """ - all_types = app.registry.get(TYPES).by_item_type - for type_name, item_type in all_types.items(): - if type_name.startswith("testing"): - continue - schema_version = item_type.schema_version - if item_type.is_abstract: - assert schema_version == "" - else: - assert schema_version, f'failed for {item_type.name}' - assert int(schema_version) >= 1 diff --git a/src/encoded/tests/test_search.py b/src/encoded/tests/test_search.py deleted file mode 100644 index 02c4e869cb..0000000000 --- a/src/encoded/tests/test_search.py +++ /dev/null @@ -1,1101 +0,0 @@ -import json -import pytest -import webtest - - -from datetime import datetime, timedelta -from dcicutils.misc_utils import Retry, ignored, local_attrs -from dcicutils.qa_utils import notice_pytest_fixtures -from snovault import TYPES, COLLECTIONS -from snovault.elasticsearch import create_mapping -from snovault.elasticsearch.indexer_utils import get_namespaced_index -from snovault.util import add_default_embeds -from ..commands.run_upgrader_on_inserts import get_inserts -# Use workbook fixture from BDD tests (including elasticsearch) -#from .workbook_fixtures import es_app_settings, es_app, es_testapp, anon_es_testapp, html_es_testapp, workbook -# from ..util import customized_delay_rerun - - -pytestmark = [ - pytest.mark.working, - pytest.mark.schema, - # pytest.mark.indexing, - pytest.mark.workbook, - # pytest.mark.flaky(rerun_filter=customized_delay_rerun(sleep_seconds=10)) -] - - -# == IMPORTANT == -# uses the inserts in ./data/workbook_inserts -# design your tests accordingly -# notice_pytest_fixtures(es_app_settings, es_app, es_testapp, anon_es_testapp, html_es_testapp, workbook) - - -# just a little helper function -def recursively_find_uuids(json, uuids): - for key, val in json.items(): - if key == 'uuid': - uuids.add(val) - elif isinstance(val, list): - for item in val: - if isinstance(item, dict): - uuids = recursively_find_uuids(item, uuids) - elif isinstance(val, dict): - uuids = recursively_find_uuids(val, uuids) - return uuids - - -def test_search_view(workbook, es_testapp): - notice_pytest_fixtures(workbook) - res = es_testapp.get('/search/?type=Item').json - assert res['@type'] == ['ItemSearchResults', 'Search'] - assert res['@id'] == '/search/?type=Item' - assert res['@context'] == '/terms/' - assert res['notification'] == 'Success' - assert res['title'] == 'Search' - assert res['total'] > 0 - assert 'facets' in res - assert 'filters' in res - assert '@graph' in res - - -def test_search_with_no_query(workbook, es_testapp): - notice_pytest_fixtures(workbook, es_testapp) - # using /search/ (with no query) should default to /search/?type=Item - # thus, should satisfy same assertions as test_search_view - res = es_testapp.get('/search/').follow(status=200) - assert res.json['@type'] == ['ItemSearchResults', 'Search'] - assert res.json['@id'] == '/search/?type=Item' - assert res.json['@context'] == '/terms/' - assert res.json['notification'] == 'Success' - assert res.json['title'] == 'Search' - assert res.json['total'] > 0 - assert 'facets' in res - # test default facets (data type and status) - default_facets = [facet['field'] for facet in res.json['facets']] - assert 'type' in default_facets - assert 'status' in default_facets - assert 'filters' in res - assert '@graph' in res - - -def test_collections_redirect_to_search(workbook, es_testapp): - notice_pytest_fixtures(workbook, es_testapp) - # we removed the collections page and redirect to search of that type - # redirected_from is not used for search - res = es_testapp.get('/biosamples/', status=301).follow(status=200) - assert res.json['@type'] == ['BiosampleSearchResults', 'ItemSearchResults', 'Search'] - assert res.json['@id'] == '/search/?type=Biosample' - assert 'redirected_from' not in res.json['@id'] - assert res.json['@context'] == '/terms/' - assert res.json['notification'] == 'Success' - assert res.json['title'] == 'Search' - assert res.json['total'] > 0 - assert 'facets' in res - assert 'filters' in res - assert '@graph' in res - - -def test_search_with_embedding(workbook, es_testapp): - notice_pytest_fixtures(workbook, es_testapp) - res = es_testapp.get('/search/?type=Biosample&limit=all').json - # Use a specific biosample, found by accession from test data - # Check the embedding /types/biosample.py entry; test ensures - # that the actual embedding matches that - res_json = [bios for bios in res['@graph'] if bios['accession'] == '4DNBS1234567'] - assert len(res_json) == 1 - test_json = res_json[0] - # check default embedding: @id and display_title for submitted_by - assert test_json['submitted_by']['display_title'] == 'Wrangler Wrangler' - assert test_json['submitted_by']['@id'] == '/users/986b362f-4eb6-4a9c-8173-3ab267307e3b/' - # this specific field should be embedded ('biosource.biosource_type') - assert test_json['biosource'][0]['biosource_type'] == 'immortalized cell line' - # this specific linked should be embedded ('biosource.biosource_vendor') - assert isinstance(test_json['biosource'][0]['biosource_vendor'], dict) - # since lab.awards was not specifically embedded, the field should not exist - assert test_json['lab'].get('awards') is None - - -def test_file_search_type(workbook, es_testapp): - """ Tests that searching on a type that inherits from File adds a FileSearchResults - identifier in the @type field - """ - notice_pytest_fixtures(workbook, es_testapp) - res = es_testapp.get('/search/?type=FileProcessed').json - assert 'FileSearchResults' in res['@type'] - res = es_testapp.get('/search/?type=Biosample').json - assert 'FileSearchResults' not in res['@type'] - assert res['@type'][0] == 'BiosampleSearchResults' - assert res['@type'][1] == 'ItemSearchResults' - res = es_testapp.get('/search/?type=FileProcessed&type=Biosample').json - assert 'FileSearchResults' not in res['@type'] - res = es_testapp.get('/search/?type=FileProcessed&type=FileReference').json - assert 'FileSearchResults' in res['@type'] - res = es_testapp.get('/search').follow().json - assert 'FileSearchResults' not in res['@type'] - res = es_testapp.get('/search/?type=File').json - assert 'FileSearchResults' in res['@type'] - assert res['@type'].count('FileSearchResults') == 1 - res = es_testapp.get('/search/?type=FileFastq').json - assert res['@type'][0] == 'FileFastqSearchResults' - assert res['@type'][1] == 'FileSearchResults' - assert res['@type'][2] == 'ItemSearchResults' - assert res['@type'][3] == 'Search' - res = es_testapp.get('/search/?type=FileFastq&type=Biosample').json - assert res['@type'][0] == 'ItemSearchResults' - res = es_testapp.get('/search/?type=FileFastq&type=File').json - assert res['@type'][0] == 'FileSearchResults' - - -def test_search_with_simple_query(workbook, es_testapp): - # run a simple query with type=Organism and q=mouse - res = es_testapp.get('/search/?type=Organism&q=mouse').json - assert res['@type'] == ['OrganismSearchResults', 'ItemSearchResults', 'Search'] - assert len(res['@graph']) > 0 - # get the uuids from the results - mouse_uuids = [org['uuid'] for org in res['@graph'] if 'uuid' in org] - # run the same search with type=Item - res = es_testapp.get('/search/?type=Item&q=mouse').json - assert len(res['@graph']) > 0 - all_uuids = [item['uuid'] for item in res['@graph'] if 'uuid' in item] - # make sure all uuids found in the first search are present in the second - assert set(mouse_uuids).issubset(set(all_uuids)) - # run with q=mous returns the same hits... - res = es_testapp.get('/search/?type=Item&q=mous').json - mous_uuids = [item['uuid'] for item in res['@graph'] if 'uuid' in item] - # make sure all uuids found in the first search are present in the second - assert set(mouse_uuids).issubset(set(mous_uuids)) - # run with q=musculus, which should return the same hits as mouse - res = es_testapp.get('/search/?type=Item&q=musculus').json - musculus_uuids = [item['uuid'] for item in res['@graph'] if 'uuid' in item] - # make sure all uuids found in the first search are present in the second - assert set(mouse_uuids).issubset(set(musculus_uuids)) - # run with q=mauze (misspelled) and ensure uuids are not in results - res = es_testapp.get('/search/?type=Item&q=mauxz', status=[200, 404]).json - # make this test robust by either assuming no results are found - # (true when this test was written) - # OR that results that happen to contain "mauze" do not include what - # we're looking for. - mauxz_uuids = [item['uuid'] for item in res['@graph'] if 'uuid' in item] - # make sure all uuids found in the first search are present in the second - assert not set(mouse_uuids).issubset(set(mauxz_uuids)) - - -def test_search_ngram(workbook, es_testapp): - """ - Tests ngram behavior for various use cases - """ - res = es_testapp.get('/search/?type=Organism&q=mo').json - assert len(res['@graph']) == 1 - res = es_testapp.get('/search/?type=Organism&q=hu').json - assert len(res['@graph']) == 1 - res = es_testapp.get('/search/?type=Organism&q=ma').json - assert len(res['@graph']) == 1 - # or search - res = es_testapp.get('/search/?type=Organism&q=(mu|an)').follow().json - assert len(res['@graph']) == 2 - # or not search - res = es_testapp.get('/search/?type=Organism&q=(ho|-an)').follow().json - assert len(res['@graph']) == 2 - # by uuid subset - res = es_testapp.get('/search/?type=Organism&q=3413218c').json - assert len(res['@graph']) == 2 - # uuid difference beyond max_ngram - res = es_testapp.get('/search/?type=Organism&q=3413218c-3f').json - assert len(res['@graph']) == 2 - # uuid difference before max_ngram, no results - res = es_testapp.get('/search/?type=Organism&q=3413218d', status=404) - - -def test_search_facets_and_columns_order(workbook, es_testapp, registry): - # TODO: Adjust ordering of mixed-in facets, perhaps sort by lookup or something, in order to un-xfail. - test_type = 'experiment_set_replicate' - type_info = registry[TYPES].by_item_type[test_type] - schema = type_info.schema - schema_facets = [('type', {'title': 'Data Type'})] - schema_facets.extend(schema['facets'].items()) - # the following facets are added after schema facets - schema_facets.append(('status', {'title': 'Status'})) - # remove any disabled facets - schema_facets = [fct for fct in schema_facets if not fct[1].get('disabled', False)] - sort_facets = sorted(schema_facets, key=lambda fct: fct[1].get('order', 0)) - res = es_testapp.get('/search/?type=ExperimentSetReplicate&limit=all').json - for i, val in enumerate(sort_facets): - assert res['facets'][i]['field'] == val[0] - # assert order of columns when we officially upgrade to python 3.6 (ordered dicts) - for key, val in schema.get('columns', {}).items(): - assert res['columns'][key]['title'] == val['title'] - - -def test_search_embedded_file_by_accession(workbook, es_testapp): - res = es_testapp.get('/search/?type=ExperimentHiC&files.accession=4DNFIO67APU1').json - assert len(res['@graph']) > 0 - item_uuids = [item['uuid'] for item in res['@graph'] if 'uuid' in item] - for item_uuid in item_uuids: - item_res = es_testapp.get('/experiments-hi-c/%s/' % item_uuid, status=301) - exp = item_res.follow().json - file_uuids = [f['uuid'] for f in exp['files']] - assert '46e82a90-49e5-4c33-afab-9ec90d65faa0' in file_uuids - - -@pytest.fixture -def mboI_dts(es_testapp, workbook): - # returns a dictionary of strings of various date and datetimes - # relative to the creation date of the mboI one object in test inserts - enz = es_testapp.get('/search/?type=Enzyme&name=MboI').json['@graph'][0] - - cdate = enz['date_created'] - _date, _time = cdate.split('T') - yr, mo, day = [int(i) for i in _date.split('-')] - hr, mi, _ = _time.split(':', 2) - hr = int(hr) - mi = int(mi) - createdate = datetime(yr, mo, day, hr, mi) - - return { - 'creationdatetime': ':'.join(str(createdate).replace(' ', '+').split(':')[:-1]), - 'creationdate': str(createdate.date()) + '+00:00', - 'daybefore': ':'.join(str(createdate - timedelta(days=1)).replace(' ', '+').split(':')[:-1]), - 'dayafter': ':'.join(str(createdate + timedelta(days=1)).replace(' ', '+').split(':')[:-1]), - 'hourbefore': ':'.join(str(createdate - timedelta(hours=1)).replace(' ', '+').split(':')[:-1]), - 'hourafter': ':'.join(str(createdate + timedelta(hours=1)).replace(' ', '+').split(':')[:-1]) - } - - -def test_search_date_range_find_within(mboI_dts, es_testapp, workbook): - # the MboI enzyme should be returned with all the provided pairs - gres = es_testapp.get('/search/?type=Enzyme&name=MboI').json - g_uuids = [item['uuid'] for item in gres['@graph'] if 'uuid' in item] - dts = {k: v.replace(':', '%3A') for k, v in mboI_dts.items()} - datepairs = [ - (dts['daybefore'], dts['dayafter']), - (dts['creationdatetime'], dts['dayafter']), - (dts['daybefore'], dts['creationdatetime']), - (dts['creationdate'], dts['dayafter']), - (dts['hourbefore'], dts['hourafter']) - ] - - for dp in datepairs: - search = '/search/?type=Enzyme&date_created.from=%s&date_created.to=%s' % dp - sres = es_testapp.get(search).json - s_uuids = [item['uuid'] for item in sres['@graph'] if 'uuid' in item] - assert set(g_uuids).issubset(set(s_uuids)) - - -def test_search_with_nested_integer(es_testapp, workbook): - search0 = '/search/?type=ExperimentHiC' - s0res = es_testapp.get(search0).json - s0_uuids = [item['uuid'] for item in s0res['@graph'] if 'uuid' in item] - - search1 = '/search/?type=ExperimentHiC&files.file_size.to=1500' - s1res = es_testapp.get(search1).json - s1_uuids = [item['uuid'] for item in s1res['@graph'] if 'uuid' in item] - assert len(s1_uuids) > 0 - - search2 = '/search/?type=ExperimentHiC&files.file_size.from=1501' - s2res = es_testapp.get(search2).json - s2_uuids = [item['uuid'] for item in s2res['@graph'] if 'uuid' in item] - assert len(s2_uuids) > 0 - - # make sure there is no intersection of the uuids - assert not set(s1_uuids) & set(s2_uuids) - assert set(s1_uuids) | set(s2_uuids) == set(s0_uuids) - - -def test_search_date_range_dontfind_without(mboI_dts, es_testapp, workbook): - # the MboI enzyme should be returned with all the provided pairs - dts = {k: v.replace(':', '%3A') for k, v in mboI_dts.items()} - datepairs = [ - (dts['daybefore'], dts['creationdate']), - (dts['hourafter'], dts['dayafter']), - (dts['daybefore'], dts['hourbefore']) - ] - for dp in datepairs: - search = '/search/?type=Enzyme&date_created.from=%s&date_created.to=%s' % dp - assert es_testapp.get(search, status=404) - - -def test_search_query_string_AND_NOT_cancel_out(workbook, es_testapp): - # if you use + and - with same field you should get no result - search = '/search/?q=cell+-cell&type=Biosource' - assert es_testapp.get(search, status=404) - - -def test_search_multiple_types(workbook, es_testapp): - # multiple types work with @type in response - search = '/search/?type=Biosample&type=ExperimentHiC' - res = es_testapp.get(search).json - assert res['@type'] == ['ItemSearchResults', 'Search'] - - -def test_search_query_string_with_booleans(workbook, es_testapp): - """ - moved references to res_not_induced and not_induced_uuids, - which were passing locally but failing on travis for some undetermined - reason... will look into this more later - """ - search = '/search/?type=Biosource&q=GM12878' - res_stem = es_testapp.get(search).json - assert len(res_stem['@graph']) > 1 - bios_uuids = [r['uuid'] for r in res_stem['@graph'] if 'uuid' in r] - swag_bios = '331111bc-8535-4448-903e-854af460b888' - assert swag_bios in bios_uuids - # assert induced_stem_uuid not in not_induced_uuids - # now search for stem +induced (AND is now "+") - search_and = '/search/?type=Biosource&q=swag+%2BGM12878' - res_both = es_testapp.get(search_and).json - both_uuids = [r['uuid'] for r in res_both['@graph'] if 'uuid' in r] - assert len(both_uuids) == 1 - assert swag_bios in both_uuids - # search with OR ("|") - search_or = '/search/?type=Biosource&q=swag+%7CGM12878' - res_or = es_testapp.get(search_or).json - or_uuids = [r['uuid'] for r in res_or['@graph'] if 'uuid' in r] - assert len(or_uuids) > 1 - assert swag_bios in or_uuids - # search with NOT ("-") - search_not = '/search/?type=Biosource&q=GM12878+-swag' - res_not = es_testapp.get(search_not).json - not_uuids = [r['uuid'] for r in res_not['@graph'] if 'uuid' in r] - assert swag_bios not in not_uuids - - -@pytest.mark.broken # test doesn't work, this will keep make from running it -@pytest.mark.skip # In case of running the file by name, this still doesn't want to run -def test_metadata_tsv_view(workbook, html_es_testapp): - - FILE_ACCESSION_COL_INDEX = 3 - FILE_DOWNLOAD_URL_COL_INDEX = 0 - - def check_tsv(result_rows, len_requested=None): - info_row = result_rows.pop(0) - header_row = result_rows.pop(0) - - assert header_row[FILE_ACCESSION_COL_INDEX] == 'File Accession' - assert header_row.index('File Download URL') == FILE_DOWNLOAD_URL_COL_INDEX # Ensure we have this column - assert len(result_rows) > 0 # We at least have some rows. - - for row_index in range(1): - assert len(result_rows[row_index][FILE_ACCESSION_COL_INDEX]) > 4 # We have a value for File Accession - assert 'http' in result_rows[row_index][FILE_DOWNLOAD_URL_COL_INDEX] # Make sure it seems like a valid URL. - assert '/@@download/' in result_rows[row_index][FILE_DOWNLOAD_URL_COL_INDEX] - assert result_rows[row_index][FILE_ACCESSION_COL_INDEX] in result_rows[row_index][FILE_DOWNLOAD_URL_COL_INDEX] # That File Accession is also in File Download URL of same row. - assert len(result_rows[row_index][FILE_ACCESSION_COL_INDEX]) < len(result_rows[row_index][FILE_DOWNLOAD_URL_COL_INDEX]) - - # Last some rows should be 'summary' rows. And have empty spaces for 'Download URL' / first column. - summary_start_row = None - for row_index, row in enumerate(result_rows): - if row[1] == 'Summary': - summary_start_row = row_index - 1 - break - - # Check that summary cells are present, in right place, with some correct-looking values - assert result_rows[summary_start_row + 1][1] == 'Summary' - assert result_rows[summary_start_row + 3][1] == 'Files Selected for Download:' - assert result_rows[summary_start_row + 4][1] == 'Total File Rows:' - assert result_rows[summary_start_row + 5][1] == 'Unique Downloadable Files:' - if len_requested: - assert int(result_rows[summary_start_row + 3][4]) == len_requested - assert int(result_rows[summary_start_row + 4][4]) == summary_start_row - assert int(result_rows[summary_start_row + 5][4]) <= summary_start_row - - - # run a simple GET query with type=ExperimentSetReplicate - res = html_es_testapp.get('/metadata/type=ExperimentSetReplicate/metadata.tsv') # OLD URL FORMAT IS USED -- TESTING REDIRECT TO NEW URL - res = res.maybe_follow() # Follow redirect -- https://docs.pylonsproject.org/projects/webtest/en/latest/api.html#webtest.response.TestResponse.maybe_follow - assert 'text/tsv' in res.content_type - result_rows = [ row.rstrip(' \r').split('\t') for row in res.body.decode('utf-8').split('\n') ] # Strip out carriage returns and whatnot. Make a plain multi-dim array. - - check_tsv(result_rows) - - # Perform POST w/ accession triples (main case, for BrowseView downloads) - - # N.B. '.post', not '.post_json' is used. This dict is converted to POST form values, - # with key values STRINGIFIED, not to POST JSON request. - res2_post_data = { - "accession_triples": [ - ["4DNESAAAAAA1", "4DNEXO67APU1", "4DNFIO67APU1"], - ["4DNESAAAAAA1", "4DNEXO67APU1", "4DNFIO67APT1"], - ["4DNESAAAAAA1", "4DNEXO67APT1", "4DNFIO67APV1"], - ["4DNESAAAAAA1", "4DNEXO67APT1", "4DNFIO67APY1"], - ["4DNESAAAAAA1", "4DNEXO67APV1", "4DNFIO67APZ1"], - ["4DNESAAAAAA1", "4DNEXO67APV1", "4DNFIO67AZZ1"] - ], - 'download_file_name': 'metadata_TEST.tsv' - } - - res2 = html_es_testapp.post('/metadata/?type=ExperimentSetReplicate', # NEWER URL FORMAT - {k : json.dumps(v) - for k,v in res2_post_data.items() }) - - assert 'text/tsv' in res2.content_type - result_rows = [row.rstrip(' \r').split('\t') for row in res2.body.decode('utf-8').split('\n')] - - check_tsv(result_rows, len(res2_post_data['accession_triples'])) - - -def test_default_schema_and_non_schema_facets(workbook, es_testapp, registry): - test_type = 'biosample' - type_info = registry[TYPES].by_item_type[test_type] - schema = type_info.schema - embeds = add_default_embeds(test_type, registry[TYPES], type_info.embedded_list, schema) - # we're looking for this specific facet, which is not in the schema - assert 'biosource.*' in embeds - res = es_testapp.get('/search/?type=Biosample&biosource.biosource_type=immortalized+cell+line').json - assert 'facets' in res - facet_fields = [ facet['field'] for facet in res['facets'] ] - assert 'type' in facet_fields - assert 'status' in facet_fields - for facet in schema['facets'].keys(): - if not schema['facets'][facet].get('hide_from_view'): - assert facet in facet_fields - # now ensure that facets can also be created outside of the schema - assert 'biosource.biosource_type' in facet_fields - - -def test_search_query_string_no_longer_functional(workbook, es_testapp): - # since we now use simple_query_string, cannot use field:value or range - # expect 404s, since simple_query_string doesn't return exceptions - search_field = '/search/?q=name%3Ahuman&type=Item' - res_field = es_testapp.get(search_field, status=404) - assert len(res_field.json['@graph']) == 0 - - search_range = '/search/?q=date_created%3A>2018-01-01&type=Item' - res_search = es_testapp.get(search_range, status=404) - assert len(res_search.json['@graph']) == 0 - - -def test_search_with_added_display_title(workbook, es_testapp, registry): - # 4DNBS1234567 is display_title for biosample - search = '/search/?type=ExperimentHiC&biosample=4DNBS1234567' - # 301 because search query is changed - res_json = es_testapp.get(search, status=301).follow(status=200).json - assert res_json['@id'] == '/search/?type=ExperimentHiC&biosample.display_title=4DNBS1234567' - added_facet = [fct for fct in res_json['facets'] if fct['field'] == 'biosample.display_title'] - # use the title from biosample in experiment schema - bios_title = registry[TYPES]['ExperimentHiC'].schema['properties']['biosample']['title'] - assert added_facet[0]['title'] == bios_title - exps = [exp['uuid'] for exp in res_json['@graph']] - - # make sure the search result is the same for the explicit query - res_json2 = es_testapp.get(res_json['@id']).json - exps2 = [exp['uuid'] for exp in res_json2['@graph']] - assert set(exps) == set(exps2) - - # 'sort' also adds display_title for ascending and descending queries - for use_sort in ['biosample', '-biosample']: - search = '/search/?type=ExperimentHiC&sort=%s' % use_sort - res_json = es_testapp.get(search, status=301).follow(status=200).json - assert res_json['@id'] == '/search/?type=ExperimentHiC&sort=%s.display_title' % use_sort - - # regular sort queries remain unchanged - search = '/search/?type=ExperimentHiC&sort=uuid' - res_json = es_testapp.get(search).json - assert res_json['@id'] == '/search/?type=ExperimentHiC&sort=uuid' - - # check to see that added facet doesn't conflict with existing facet title - # query below will change to file_format.display_title=fastq - search = '/search/?type=File&file_format=fastq' - res_json = es_testapp.get(search, status=301).follow(status=200).json - assert res_json['@id'] == '/search/?type=File&file_format.display_title=fastq' - # find title from schema - ff_title = registry[TYPES]['File'].schema['properties']['file_format']['title'] - existing_ff_facet = [fct for fct in res_json['facets'] if fct['field'] == 'file_format.file_format'] - assert existing_ff_facet[0]['title'] == ff_title - added_ff_facet = [fct for fct in res_json['facets'] if fct['field'] == 'file_format.display_title'] - assert added_ff_facet[0]['title'] == ff_title + ' (Title)' - - -def test_search_with_no_value(workbook, es_testapp): - search = '/search/?description=No+value&description=GM12878+prepared+for+HiC&type=Biosample' - res_json = es_testapp.get(search).json - # grab some random results - for item in res_json['@graph']: - maybe_null = item.get('description') - assert( maybe_null is None or maybe_null == 'GM12878 prepared for HiC') - res_ids = [r['uuid'] for r in res_json['@graph'] if 'uuid' in r] - search2 = '/search/?description=GM12878+prepared+for+HiC&type=Biosample' - res_json2 = es_testapp.get(search2).json - # just do 1 res here - check_item = res_json2['@graph'][0] - assert(check_item.get('description') == 'GM12878 prepared for HiC') - res_ids2 = [r['uuid'] for r in res_json2['@graph'] if 'uuid' in r] - assert(set(res_ids2) <= set(res_ids)) - - -def test_search_with_static_header(workbook, es_testapp): - """ Performs a search which should be accompanied by a search header """ - search = '/search/?type=Workflow' - res_json = es_testapp.get(search, status=404).json # no items, just checking hdr - assert 'search_header' in res_json - assert 'content' in res_json['search_header'] - assert res_json['search_header']['title'] == 'Workflow Information' - search = '/search/?type=workflow' # check type resolution - res_json = es_testapp.get(search, status=404).json - assert 'search_header' in res_json - assert 'content' in res_json['search_header'] - assert res_json['search_header']['title'] == 'Workflow Information' - - -######################################### -## Tests for collections (search 301s) ## -######################################### - -def test_collection_limit(workbook, es_testapp): - res = es_testapp.get('/biosamples/?limit=2', status=301) - assert len(res.follow().json['@graph']) == 2 - - -def test_collection_actions_filtered_by_permission(workbook, es_testapp, anon_es_testapp): - res = es_testapp.get('/biosamples/') - assert any(action for action in res.follow().json.get('actions', []) if action['name'] == 'add') - - # biosamples not visible - res = anon_es_testapp.get('/biosamples/', status=404) - assert len(res.json['@graph']) == 0 - - -class ItemTypeChecker: - - @staticmethod - @Retry.retry_allowed('ItemTypeCheckerf.check_item_type', wait_seconds=1, retries_allowed=5) - def check_item_type(client, item_type, deleted=False): - # This might get a 404 if not enough time has elapsed, so try a few times before giving up. - # - # We retry a lot of times because it's still fast if things are working quickly, but if it's - # slow it's better to wait than fail the test. Slowness is not what we're trying to check for here. - # And even if it's slow for one item, that same wait time will help others have time to catch up, - # so it shouldn't be slow for others. At least that's the theory. -kmp 27-Jan-2021 - extra = "&status=deleted" if deleted else "" - return client.get('/%s?limit=all%s' % (item_type, extra), status=[200, 301]).follow() - - - CONSIDER_DELETED = True - DELETED_SEEN = {} - - @classmethod - def reset_deleted(cls): - cls.DELETED_SEEN = {} - - @classmethod - def deleted_seen(cls): - total_deleted = 0 - for item_type, item_deleted_count in cls.DELETED_SEEN.items(): - ignored(item_type) - total_deleted += item_deleted_count - return total_deleted - - @classmethod - def get_all_items_of_type(cls, client, item_type): - if cls.CONSIDER_DELETED: - try: - res = cls.check_item_type(client, item_type) - items_not_deleted = res.json.get('@graph', []) - except webtest.AppError: - items_not_deleted = [] - try: - res = cls.check_item_type(client, item_type, deleted=True) - items_deleted = res.json.get('@graph', []) - except webtest.AppError: - items_deleted = [] - if items_deleted: - cls.DELETED_SEEN[item_type] = items_deleted - else: - cls.DELETED_SEEN.pop(item_type, None) # delete entry if present but we want it empty - return items_not_deleted + items_deleted - else: - res = cls.check_item_type(client, item_type) - items_not_deleted = res.json.get('@graph', []) - return items_not_deleted - - -@pytest.mark.flaky -@pytest.mark.skip # this test is redundant with other tests that load the workbook -def test_index_data_workbook(app, workbook, es_testapp, indexer_es_testapp, html_es_testapp): - es = app.registry['elasticsearch'] - # we need to reindex the collections to make sure numbers are correct - create_mapping.run(app, sync_index=True) - retried = False - while True: - # check counts and ensure they're equal - es_testapp_counts = es_testapp.get('/counts') - # e.g., {"db_es_total": "DB: 748 ES: 748 ", ...} - db_es_total = es_testapp_counts.json['db_es_total'] - split_counts = db_es_total.split() - db_total = int(split_counts[1]) - es_total = int(split_counts[3]) - if db_total == es_total or retried: - print("Counts are not aligned, but we've tried once already.") - break - retried = True - print("Posting /index anew because counts are not aligned") - es_testapp.post_json('/index', {}) - # e.g., {..., "db_es_compare": {"AnalysisStep": "DB: 26 ES: 26 ", ...}, ...} - for item_name, item_counts in es_testapp_counts.json['db_es_compare'].items(): - print("item_name=", item_name, "item_counts=", item_counts) - # make sure counts for each item match ES counts - split_item_counts = item_counts.split() - db_item_count = int(split_item_counts[1]) - es_item_count = int(split_item_counts[3]) - assert db_item_count == es_item_count - - # check ES counts directly. Must skip abstract collections - # must change counts result ("ItemName") to item_type format - item_type = app.registry[COLLECTIONS][item_name].type_info.item_type - namespaced_index = get_namespaced_index(app, item_type) - - es_direct_count = es.count(index=namespaced_index).get('count') - assert es_item_count == es_direct_count - - if es_item_count == 0: - continue - # check items in search result individually - search_url = '/%s?limit=all' % item_type - print("search_url=", search_url) - items = ItemTypeChecker.get_all_items_of_type(client=es_testapp, item_type=item_type) - for item_res in items: - index_view_res = es.get(index=namespaced_index, - id=item_res['uuid'])['_source'] - # make sure that the linked_uuids match the embedded data - assert 'linked_uuids_embedded' in index_view_res - assert 'embedded' in index_view_res - found_uuids = recursively_find_uuids(index_view_res['embedded'], set()) - # all found uuids must be within the linked_uuids - assert found_uuids <= set([link['uuid'] for link in index_view_res['linked_uuids_embedded']]) - # if uuids_rev_linking to me, make sure they show up in @@links - if len(index_view_res.get('uuids_rev_linked_to_me', [])) > 0: - links_res = es_testapp.get('/' + item_res['uuid'] + '/@@links', status=200) - link_uuids = [lnk['uuid'] for lnk in links_res.json.get('uuids_linking_to')] - assert set(index_view_res['uuids_rev_linked_to_me']) <= set(link_uuids) - # previously test_html_pages - try: - html_res = html_es_testapp.get(item_res['@id']) - assert html_res.body.startswith(b'') - except Exception as e: - ignored(e) - pass - if ItemTypeChecker.CONSIDER_DELETED: - print(f"(CONSIDER_DELETED) Items deleted = {ItemTypeChecker.DELETED_SEEN}") - print(f"db_total={db_total} es_total={es_total} deleted_seen={ItemTypeChecker.deleted_seen()}") - assert(db_total == es_total + ItemTypeChecker.deleted_seen()) # 2nd is db, 4th is es - else: - print(f"(not CONSIDER_DELETED) db_total={db_total} es_total={es_total} deleted_seen={ItemTypeChecker.deleted_seen()}") - assert(db_total == es_total) # 2nd is db, 4th is es - -@pytest.mark.manual -@pytest.mark.skip -def test_index_data_workbook_after_posting_deleted_page_c4_570(workbook, es_testapp, html_es_testapp): - """ - Regression test for C4-570. - - This test takes a long time to run since it runs a long-running test three different ways. - This test must be invoked manually. 'make test' and 'make remote-test' will skip it because it's marked manual. - See details at https://hms-dbmi.atlassian.net/browse/C4-570 - """ - - # Running the test this way should work fine - test_index_data_workbook(workbook, es_testapp, html_es_testapp) - - # But now let's add a deleted page. - # test_index_data_workbook will fail if preceded by anything that makes a deleted page - es_testapp.post_json('/pages/', - { - "name": "help/user-guide/sample-deleted-page", - "title": "Sample Deleted Page", - "content": [], - "uuid": "db807a0f-2e76-4c77-a6bb-313a9c174252", - "status": "deleted" - }, - status=201) - - # This test will now protect itself against failure. - test_index_data_workbook(workbook, es_testapp, html_es_testapp) - - # And we can see that if we hadn't protected ourselves against failure, this would reliably fail. - with pytest.raises(webtest.AppError): - with local_attrs(ItemTypeChecker, CONSIDER_DELETED=False): - test_index_data_workbook(workbook, es_testapp, html_es_testapp) - - -###################################### -## Search-based visualization tests ## -###################################### - - -def test_barplot_aggregation_endpoint(workbook, es_testapp): - - # Check what we get back - - search_result = es_testapp.get('/browse/?type=ExperimentSetReplicate&experimentset_type=replicate').json - search_result_count = len(search_result['@graph']) - - # We should get back same count as from search results here. - # But on Travis oftentime we don't, so we compare either against count of inserts - # --or-- count returned from regular results. - exp_set_test_inserts = list(get_inserts('inserts', 'experiment_set_replicate')) - count_exp_set_test_inserts = len(exp_set_test_inserts) - - # Now, test the endpoint after ensuring we have the data correctly loaded into ES. - # We should get back same count as from search results here. - res = es_testapp.post_json('/bar_plot_aggregations', { - "search_query_params": {"type": ['ExperimentSetReplicate']}, - "fields_to_aggregate_for": ["experiments_in_set.experiment_type.display_title", "award.project"] - }).json - - print() - - # Our total count for experiment_sets should match # of exp_set_replicate inserts.abs - - assert (res['total']['experiment_sets'] == count_exp_set_test_inserts) or (res['total']['experiment_sets'] == search_result_count) - - assert res['field'] == 'experiments_in_set.experiment_type.display_title' # top level field - - assert isinstance(res['terms'], dict) is True - - assert len(res["terms"].keys()) > 0 - - # assert isinstance(res['terms']["CHIP-seq"], dict) is True # A common term likely to be found. - - # assert res["terms"]["CHIP-seq"]["field"] == "award.project" # Child-field - - # We only have 4DN as single award.project in test inserts so should have values in all buckets, though probably less than total. - # assert res["terms"]["CHIP-seq"]["total"]["experiment_sets"] > 0 - # assert res["terms"]["CHIP-seq"]["total"]["experiment_sets"] < count_exp_set_test_inserts - - # assert res["terms"]["CHIP-seq"]["terms"]["4DN"]["experiment_sets"] > 0 - # assert res["terms"]["CHIP-seq"]["terms"]["4DN"]["experiment_sets"] < count_exp_set_test_inserts - - -def test_recently_released_datasets_endpoint(workbook, es_testapp): - - max_row_count = 1 - - # get experiment sets count that are released and are included in a dataset - search_result = es_testapp.get('/browse/?type=ExperimentSetReplicate&experimentset_type=replicate&status=released&dataset_label%21=No+value').json - search_result_count = len(search_result['@graph']) - - # Test the endpoint after ensuring we have the data correctly loaded into ES. - # We should get back same count as from search results here. - datasets_res = es_testapp.get('/recently_released_datasets?max_row_count=' + str(max_row_count)).json - - assert (datasets_res['total']['experiment_sets'] == search_result_count) - - assert isinstance(datasets_res['terms'], dict) is True - - assert len(datasets_res["terms"].keys()) > 0 - - assert len(datasets_res["terms"].keys()) <= max_row_count - - -@pytest.fixture(scope='session') -def hidden_facet_data_one(): - """ Sample TestingHiddenFacets object we are going to facet on """ - return { - 'first_name': 'John', - 'last_name': 'Doe', - 'sid': 1, - 'unfaceted_string': 'hello', - 'unfaceted_integer': 123, - 'disabled_string': 'orange', - 'disabled_integer': 789, - 'unfaceted_object': { - 'mother': 'Anne', - 'father': 'Bob' - }, - 'unfaceted_array_of_objects': [ - { - 'fruit': 'orange', - 'color': 'orange', - 'uid': 1 - }, - { - 'fruit': 'banana', - 'color': 'yellow', - 'uid': 2 - }, - ] - } - - -@pytest.fixture(scope='session') -def hidden_facet_data_two(): - """ A second sample TestingHiddenFacets object we are going to facet on """ - return { - 'first_name': 'Boston', - 'last_name': 'Bruins', - 'sid': 2, - 'unfaceted_string': 'world', - 'unfaceted_integer': 456, - 'disabled_string': 'apple', - 'disabled_integer': 101112, - 'unfaceted_object': { - 'mother': 'Candice', - 'father': 'Doug' - }, - 'unfaceted_array_of_objects': [ - { - 'fruit': 'blueberry', - 'color': 'blue', - 'uid': 3 - }, - { - 'fruit': 'mango', - 'color': 'yellow', - 'uid': 4 - }, - ] - } - - -@pytest.fixture(scope='module') # TODO consider this further... -def hidden_facet_test_data(es_testapp, hidden_facet_data_one, hidden_facet_data_two): - es_testapp.post_json('/TestingHiddenFacets', hidden_facet_data_one, status=201) - es_testapp.post_json('/TestingHiddenFacets', hidden_facet_data_two, status=201) - es_testapp.post_json('/index', {'record': False}) - - -class TestSearchHiddenAndAdditionalFacets: - """ Encapsulates tests meant for testing behavior associated with default_hidden, hidden - and additional_facets - """ - DEFAULT_FACETS = ['first_name', 'status', 'type'] - DEFAULT_HIDDEN_FACETS = ['last_name', 'sid'] - ADDITIONAL_FACETS = ['unfaceted_string', 'unfaceted_integer'] - DISABLED_FACETS = ['disabled_string', 'disabled_integer'] - - @staticmethod - def check_and_verify_result(facets, desired_facet, number_expected): - """ Helper method for later tests that checks terms count and average. """ - for facet in facets: - field = facet['field'] - if field == desired_facet and 'terms' in facet: - assert len(facet['terms']) == number_expected - elif field == facet and 'avg' in facet: - assert facet['avg'] == number_expected - else: - continue - break - - def test_search_default_hidden_facets_dont_show(self, es_testapp, hidden_facet_test_data): - facets = es_testapp.get('/search/?type=TestingHiddenFacets').json['facets'] - actual = [facet['field'] for facet in facets] - assert self.DEFAULT_FACETS == sorted(actual) - - @pytest.mark.parametrize('facet', ADDITIONAL_FACETS) - def test_search_one_additional_facet(self, es_testapp, hidden_facet_test_data, facet): - """ Tests that specifying each of the 'additional' facets works correctly """ - facets = es_testapp.get('/search/?type=TestingHiddenFacets&additional_facet=%s' % facet).json['facets'] - expected = self.DEFAULT_FACETS + [facet] - actual = [facet['field'] for facet in facets] - assert sorted(expected) == sorted(actual) - - def test_search_multiple_additional_facets(self, es_testapp, hidden_facet_test_data): - """ Tests that enabling multiple additional facets works """ - facets = es_testapp.get('/search/?type=TestingHiddenFacets' - '&additional_facet=unfaceted_string' - '&additional_facet=unfaceted_integer').json['facets'] - expected = self.DEFAULT_FACETS + self.ADDITIONAL_FACETS - for facet in facets: - assert facet['field'] in expected - if facet['field'] == 'unfaceted_integer': - assert facet['aggregation_type'] == 'stats' - else: - assert facet['aggregation_type'] == 'terms' - - @pytest.mark.parametrize('facet', DEFAULT_HIDDEN_FACETS) - def test_search_one_additional_default_hidden_facet(self, es_testapp, hidden_facet_test_data, facet): - """ Tests that passing default_hidden facets to additional_facets works correctly """ - facets = es_testapp.get('/search/?type=TestingHiddenFacets&additional_facet=%s' % facet).json['facets'] - expected = self.DEFAULT_FACETS + [facet] - actual = [facet['field'] for facet in facets] - assert sorted(expected) == sorted(actual) - - def test_search_multiple_additional_default_hidden_facets(self, es_testapp, hidden_facet_test_data): - """ Tests that passing multiple hidden_facets as additionals works correctly """ - facets = es_testapp.get('/search/?type=TestingHiddenFacets' - '&additional_facet=last_name' - '&additional_facet=sid').json['facets'] - expected = self.DEFAULT_FACETS + self.DEFAULT_HIDDEN_FACETS - for facet in facets: - assert facet['field'] in expected - if facet['field'] == 'sid': - assert facet['aggregation_type'] == 'stats' - else: - assert facet['aggregation_type'] == 'terms' - - @pytest.mark.parametrize('_facets', [ - ['last_name', 'unfaceted_integer'], # second slot holds number field - ['unfaceted_string', 'sid'] - ]) - def test_search_mixing_additional_and_default_hidden(self, es_testapp, hidden_facet_test_data, _facets): - """ Tests that we can mix additional_facets with those both on and off schema """ - [sample_string_field, sample_number_field] = _facets - facets = es_testapp.get('/search/?type=TestingHiddenFacets' - '&additional_facet=%s' - '&additional_facet=%s' % (sample_string_field, sample_number_field)).json['facets'] - expected = self.DEFAULT_FACETS + _facets - actual = [facet['field'] for facet in facets] - assert sorted(expected) == sorted(actual) - for facet in facets: - if facet['field'] == sample_number_field: # second slot holds number field - assert facet['aggregation_type'] == 'stats' - else: - assert facet['aggregation_type'] == 'terms' - - @pytest.mark.parametrize('_facet', DISABLED_FACETS) - def test_search_disabled_overrides_additional(self, es_testapp, hidden_facet_test_data, _facet): - """ Hidden facets should NEVER be faceted on """ - facets = es_testapp.get('/search/?type=TestingHiddenFacets&additional_facet=%s' % _facet).json['facets'] - field_names = [facet['field'] for facet in facets] - assert _facet not in field_names # always hidden should not be here, even if specified - - @pytest.mark.parametrize('_facets', [ - ('last_name', 'unfaceted_integer', 'disabled_integer'), # default_hidden second - ('sid', 'unfaceted_string', 'disabled_string') # disabled always last - ]) - def test_search_additional_mixing_disabled_default_hidden(self, es_testapp, hidden_facet_test_data, _facets): - """ Tests that supplying multiple additional facets combined with hidden still respects the - hidden restriction. """ - facets = es_testapp.get('/search/?type=TestingHiddenFacets' - '&additional_facet=%s' - '&additional_facet=%s' - '&additional_facet=%s' % (_facets[0], _facets[1], _facets[2])).json['facets'] - expected = self.DEFAULT_FACETS + [_facets[0], _facets[1]] # first two should show - actual = [facet['field'] for facet in facets] - assert sorted(expected) == sorted(actual) - - @pytest.mark.parametrize('_facet', [ - 'unfaceted_object.mother', - 'unfaceted_object.father' - ]) - def test_search_additional_object_facets(self, es_testapp, hidden_facet_test_data, _facet): - """ Tests that specifying an object field as an additional_facet works correctly """ - facets = es_testapp.get('/search/?type=TestingHiddenFacets' - '&additional_facet=%s' % _facet).json['facets'] - expected = self.DEFAULT_FACETS + [_facet] - actual = [facet['field'] for facet in facets] - assert sorted(expected) == sorted(actual) - - @pytest.mark.parametrize('_facet, n_expected', [ - ('unfaceted_array_of_objects.fruit', 4), - ('unfaceted_array_of_objects.color', 3), - ('unfaceted_array_of_objects.uid', 2.5) # stats avg - ]) - def test_search_additional_nested_facets(self, es_testapp, hidden_facet_test_data, _facet, n_expected): - """ Tests that specifying an array of object field mapped with nested as an additional_facet - works correctly. """ - [desired_facet] = [facet for facet in es_testapp.get('/search/?type=TestingHiddenFacets' - '&additional_facet=%s' % _facet).json['facets'] - if facet['field'] == _facet] - if 'terms' in desired_facet: - assert len(desired_facet['terms']) == n_expected - else: - assert desired_facet['avg'] == n_expected - - @pytest.fixture - def many_non_nested_facets(self, es_testapp, hidden_facet_test_data): - return es_testapp.get('/search/?type=TestingHiddenFacets' - '&additional_facet=non_nested_array_of_objects.fruit' - '&additional_facet=non_nested_array_of_objects.color' - '&additional_facet=non_nested_array_of_objects.uid').json['facets'] - - @pytest.mark.parametrize('_facet, n_expected', [ - ('unfaceted_array_of_objects.fruit', 4), - ('unfaceted_array_of_objects.color', 3), - ('unfaceted_array_of_objects.uid', 2.5) # stats avg - ]) - def test_search_additional_non_nested_facets(self, many_non_nested_facets, _facet, n_expected): - """ Tests trying to facet on an array of objects field that is not nested, requesting - all at the same time. - """ - self.check_and_verify_result(many_non_nested_facets, _facet, n_expected) - - -@pytest.fixture(scope='session') -def bucket_range_data_raw(): - """ 10 objects with a numerical field we will bucket on. - 'special_integer' has i in it. - 'special_object_that_holds_integer' holds a single integer field with i as well - 'array_of_objects_that_holds_integer' holds 2 objects that are mirrors of one another - """ - return [{ - 'special_integer': i, - 'special_object_that_holds_integer': { - 'embedded_integer': i - }, - 'array_of_objects_that_holds_integer': [ - { - 'embedded_identifier': 'forward', - 'embedded_integer': 0 if i < 5 else 9 - }, - { - 'embedded_identifier': 'reverse', - 'embedded_integer': 9 if i < 5 else 0 - }, - ] - } for i in range(10)] - - -@pytest.fixture(scope='module') # XXX: consider scope further - Will 11/5/2020 -def bucket_range_data(es_testapp, bucket_range_data_raw): - for entry in bucket_range_data_raw: - es_testapp.post_json('/TestingBucketRangeFacets', entry, status=201) - es_testapp.post_json('/index', {'record': False}) - - -class TestSearchBucketRangeFacets: - """ Class that encapsulates tests for BucketRanges """ - - @staticmethod - def verify_facet_counts(facets, expected_fields, expected_cardinality, expected_count): - """ Checks for given expected facets, checking bucket cardinality and document count - Note that the actual range properties are trivial (we are not testing elasticsearch) - """ - for facet in facets: - if facet['field'] in expected_fields: - assert len(facet['ranges']) == expected_cardinality - for bucket in facet['ranges']: - assert bucket['doc_count'] == expected_count - - @staticmethod - def select_facet(facets, facet_name): - result = None - for facet in facets: - if facet['field'] == facet_name: - result = facet - break - return result - - @pytest.mark.parametrize('expected_fields, expected_counts', [ - (['special_integer', 'special_object_that_holds_integer.embedded_integer'], 5), - (['array_of_objects_that_holds_integer.embedded_integer'], 10) - ]) - def test_search_bucket_range_simple(self, es_testapp, bucket_range_data, expected_fields, expected_counts): - """ Tests searching a collection of documents with varying integer field types that - have the same distribution - all of which should give the same results. """ - res = es_testapp.get('/search/?type=TestingBucketRangeFacets').json['facets'] - self.verify_facet_counts(res, expected_fields, 2, expected_counts) - - @pytest.mark.parametrize('identifier', [ - 'reverse', 'forward' - ]) - def test_search_bucket_range_nested_qualifier(self, es_testapp, bucket_range_data, identifier): - """ Tests aggregating on a nested field while selecting for a field within the nested object. """ - res = es_testapp.get('/search/?type=TestingBucketRangeFacets' - '&array_of_objects_that_holds_integer.embedded_identifier=%s' % identifier).json['facets'] - self.verify_facet_counts(res, ['array_of_objects_that_holds_integer.embedded_integer'], - 2, 10) - - @pytest.mark.parametrize('identifier', [ - 'reverse', 'forward' - ]) - def test_search_bucket_range_nested_qualifier(self, es_testapp, bucket_range_data, identifier): - """ Tests aggregating on a nested field while selecting for a field within the nested object (no change). """ - res = es_testapp.get('/search/?type=TestingBucketRangeFacets' - '&array_of_objects_that_holds_integer.embedded_integer.from=6' - '&array_of_objects_that_holds_integer.embedded_identifier=%s' % identifier).json['facets'] - self.verify_facet_counts(res, ['array_of_objects_that_holds_integer.embedded_integer'], - 2, 10) - facet_with_labels = self.select_facet(res, 'array_of_objects_that_holds_integer.embedded_integer') - for r in facet_with_labels['ranges']: - assert 'label' in r - assert r['label'] in ['Low', 'High'] diff --git a/src/encoded/tests/test_server_defaults.py b/src/encoded/tests/test_server_defaults.py deleted file mode 100644 index e4a8191c02..0000000000 --- a/src/encoded/tests/test_server_defaults.py +++ /dev/null @@ -1,46 +0,0 @@ -import pytest -import webtest - -from dcicutils.qa_utils import notice_pytest_fixtures -from .. import main - - -pytestmark = [pytest.mark.setone, pytest.mark.working] - - -def test_server_defaults(admin, anontestapp): - notice_pytest_fixtures(admin, anontestapp) - - email = admin['email'] - extra_environ = {'REMOTE_USER': str(email)} - res = anontestapp.post_json( - '/testing_server_default', {}, status=201, - extra_environ=extra_environ, - ) - item = res.json['@graph'][0] - assert item['now'].startswith('2') - assert item['user'] == admin['@id'] - assert item['accession'].startswith('TSTBS') # recent change, use TEST accession instead - - anontestapp.patch_json( - res.location, {}, status=200, - extra_environ=extra_environ, - ) - - -@pytest.fixture(scope='session') -def test_accession_app(request, check_constraints, zsa_savepoints, app_settings): - notice_pytest_fixtures(request, check_constraints, zsa_savepoints, app_settings) - - app_settings = app_settings.copy() - return main({}, **app_settings) - - -@pytest.fixture -def test_accession_anontestapp(request, test_accession_app, external_tx, zsa_savepoints): - """ TestApp with JSON accept header. """ - notice_pytest_fixtures(request, test_accession_app, external_tx, zsa_savepoints) - environ = { - 'HTTP_ACCEPT': 'application/json', - } - return webtest.TestApp(test_accession_app, environ) diff --git a/src/encoded/tests/test_static_page.py b/src/encoded/tests/test_static_page.py deleted file mode 100644 index d0860179c1..0000000000 --- a/src/encoded/tests/test_static_page.py +++ /dev/null @@ -1,148 +0,0 @@ -import pytest -import webtest - -from dcicutils.qa_utils import notice_pytest_fixtures -#from .workbook_fixtures import es_app_settings, es_app # are these needed? -kmp 12-Mar-2021 - - -# notice_pytest_fixtures(es_app_settings, es_app) - -pytestmark = [pytest.mark.indexing, pytest.mark.working] - - -@pytest.fixture(scope='module') -def help_page_section_json(): - return { - "title": "", - "name": "help.user-guide.rest-api.rest_api_submission", - "file": "/docs/source/rest_api_submission.rst", - "uuid": "442c8aa0-dc6c-43d7-814a-854af460b020" - } - - -@pytest.fixture(scope='module') -def help_page_json(): - return { - "name": "help/user-guide/rest-api", - "title": "The REST-API", - "content": ["442c8aa0-dc6c-43d7-814a-854af460b020"], - "uuid": "a2aa8bb9-9dd9-4c80-bdb6-2349b7a3540d", - "table-of-contents": { - "enabled": True, - "header-depth": 4, - "list-styles": ["decimal", "lower-alpha", "lower-roman"] - } - } - - -@pytest.fixture(scope='module') -def help_page_json_draft(): - return { - "name": "help/user-guide/rest-api-draft", - "title": "The REST-API", - "content": ["442c8aa0-dc6c-43d7-814a-854af460b020"], - "uuid": "a2aa8bb9-9dd9-4c80-bdb6-2349b7a3540c", - "table-of-contents": { - "enabled": True, - "header-depth": 4, - "list-styles": ["decimal", "lower-alpha", "lower-roman"] - }, - "status": "draft" - } - - -@pytest.fixture(scope='module') -def help_page_json_deleted(): - return { - "name": "help/user-guide/rest-api-deleted", - "title": "The REST-API", - "content": ["442c8aa0-dc6c-43d7-814a-854af460b020"], - "uuid": "a2aa8bb9-9dd9-4c80-bdb6-2349b7a3540a", - "table-of-contents": { - "enabled": True, - "header-depth": 4, - "list-styles": ["decimal", "lower-alpha", "lower-roman"] - }, - "status": "deleted" - } - - -@pytest.fixture(scope='module') -def posted_help_page_section(testapp, help_page_section_json): - try: - res = testapp.post_json('/static-sections/', help_page_section_json, status=201) - val = res.json['@graph'][0] - except webtest.AppError: - res = testapp.get('/' + help_page_section_json['uuid'], status=301).follow() - val = res.json - return val - - -@pytest.fixture(scope='module') -def help_page(testapp, posted_help_page_section, help_page_json): - try: - res = testapp.post_json('/pages/', help_page_json, status=201) - val = res.json['@graph'][0] - except webtest.AppError: - res = testapp.get('/' + help_page_json['uuid'], status=301).follow() - val = res.json - return val - - -@pytest.fixture(scope='module') -def help_page_deleted(testapp, posted_help_page_section, help_page_json_draft): - try: - res = testapp.post_json('/pages/', help_page_json_draft, status=201) - val = res.json['@graph'][0] - except webtest.AppError: - res = testapp.get('/pages/' + help_page_json_draft['uuid'], status=301).follow() - val = res.json - return val - - -@pytest.fixture(scope='module') -def help_page_restricted(testapp, posted_help_page_section, help_page_json_deleted): - try: - res = testapp.post_json('/pages/', help_page_json_deleted, status=201) - val = res.json['@graph'][0] - except webtest.AppError: - res = testapp.get('/pages/' + help_page_json_deleted['uuid'], status=301).follow() - val = res.json - return val - - -def test_get_help_page(testapp, help_page): - help_page_url = "/pages/" + help_page['uuid'] + "/" - res = testapp.get(help_page_url, status=200) - assert res.json['@id'] == help_page_url - # assert 'HelpPage' in res.json['@type'] - # assert 'StaticPage' in res.json['@type'] - #assert res.json['content'] == help_page['content'] # No longer works latter is set to an @id of static_section - assert 'Accession and uuid are automatically assigned during initial posting' in res.json['content'][0]['content'] # Instead lets check what we have embedded on GET request is inside our doc file (rest_api_submission.md). - assert res.json['table-of-contents'] == help_page['table-of-contents'] - - -def test_get_help_page_deleted(anonhtmltestapp, help_page_deleted): - help_page_url = "/pages/" + help_page_deleted['uuid'] + "/" - anonhtmltestapp.get(help_page_url, status=403) - - -def test_get_help_page_no_access(anonhtmltestapp, testapp, help_page_restricted): - help_page_url = "/pages/" + help_page_restricted['uuid'] + "/" - anonhtmltestapp.get(help_page_url, status=403) - testapp.get(help_page_url, status=200) - - -def test_page_unique_name(testapp, help_page, help_page_deleted): - # POST again with same name and expect validation error - new_page = {'name': help_page['name']} - res = testapp.post_json('/page', new_page, status=422) - expected_val_err = "%s already exists with name '%s'" % (help_page['uuid'], new_page['name']) - actual_error_description = res.json['errors'][0]['description'] - print("expected:", expected_val_err) - print("actual:", actual_error_description) - assert expected_val_err in actual_error_description - - # also test PATCH of an existing page with another name - res = testapp.patch_json(help_page_deleted['@id'], {'name': new_page['name']}, status=422) - assert expected_val_err in res.json['errors'][0]['description'] diff --git a/src/encoded/tests/test_types_access_key.py b/src/encoded/tests/test_types_access_key.py deleted file mode 100644 index 3e424dbac8..0000000000 --- a/src/encoded/tests/test_types_access_key.py +++ /dev/null @@ -1,181 +0,0 @@ -import pytest - -from base64 import b64encode -from pyramid.compat import ascii_native_ -from snovault import COLLECTIONS -from ..edw_hash import EDWHash - - -pytestmark = [pytest.mark.working, pytest.mark.setone] - - -def basic_auth(username, password): - return 'Basic ' + ascii_native_(b64encode(('%s:%s' % (username, password)).encode('utf-8'))) - - -def auth_header(access_key): - return basic_auth(access_key['access_key_id'], access_key['secret_access_key']) - - -@pytest.fixture -def no_login_submitter(external_tx, testapp): - """ This is a user who has deleted status, so auth'd requests should be rejected """ - item = { - 'first_name': 'ENCODE', - 'last_name': 'Submitter', - 'email': 'no_login_submitter@example.org', - 'status': 'deleted', - } - # User @@object view has keys omitted. - res = testapp.post_json('/user', item) - return testapp.get(res.location).json - - -@pytest.fixture -def submitter(testapp): - """ This is a legit submitter with current status where auth'd requests should succeed""" - item = { - 'first_name': 'ENCODE', - 'last_name': 'Submitter', - 'email': 'encode_submitter@example.org', - 'status': "current" - } - # User @@object view has keys omitted. - res = testapp.post_json('/user', item) - return testapp.get(res.location).json - - -@pytest.fixture -def access_key(testapp, submitter): - description = 'My programmatic key' - item = { - 'user': submitter['@id'], - 'description': description, - } - res = testapp.post_json('/access_key', item) - result = res.json['@graph'][0].copy() - result['secret_access_key'] = res.json['secret_access_key'] - return result - - -@pytest.fixture -def no_login_access_key(external_tx, testapp, no_login_submitter): - description = 'My programmatic key' - item = { - 'user': no_login_submitter['@id'], - 'description': description, - } - res = testapp.post_json('/access_key', item) - result = res.json['@graph'][0].copy() - result['secret_access_key'] = res.json['secret_access_key'] - return result - - -def test_access_key_get(anontestapp, access_key): - headers = {'Authorization': auth_header(access_key)} - anontestapp.get('/', headers=headers) - - -def test_access_key_get_bad_username(anontestapp, access_key): - headers = {'Authorization': basic_auth('not_an_access_key', 'bad_password')} - anontestapp.get('/', headers=headers, status=401) - - -def test_access_key_get_bad_password(anontestapp, access_key): - headers = {'Authorization': basic_auth(access_key['access_key_id'], 'bad_password')} - anontestapp.get('/', headers=headers, status=401) - - -def test_access_key_principals(anontestapp, execute_counter, access_key, submitter): - # TO DO - needs to be reviewed in context of what prinicipals do we want on access keys - headers = {'Authorization': auth_header(access_key)} - with execute_counter.expect(2): - res = anontestapp.get('/@@testing-user', headers=headers) - assert res.json['authenticated_userid'] == 'accesskey.' + access_key['access_key_id'] - assert sorted(res.json['effective_principals']) == [ - 'accesskey.%s' % access_key['access_key_id'], - 'system.Authenticated', - 'system.Everyone', - 'userid.%s' % submitter['uuid'], - ] - - -def test_access_key_self_create_no_submits_for(anontestapp, access_key, submitter): - extra_environ = {'REMOTE_USER': str(submitter['email'])} - res = anontestapp.post_json( - '/access_key/', {}, extra_environ=extra_environ - ) - access_key_id = res.json['access_key_id'] - headers = { - 'Authorization': basic_auth(access_key_id, res.json['secret_access_key']), - } - res = anontestapp.get('/@@testing-user', headers=headers) - assert res.json['authenticated_userid'] == 'accesskey.' + access_key_id - - -def test_access_key_self_create(anontestapp, access_key, submitter): - extra_environ = {'REMOTE_USER': str(submitter['email'])} - res = anontestapp.post_json( - '/access_key/', {}, extra_environ=extra_environ - ) - access_key_id = res.json['access_key_id'] - headers = { - 'Authorization': basic_auth(access_key_id, res.json['secret_access_key']), - } - res = anontestapp.get('/@@testing-user', headers=headers) - assert res.json['authenticated_userid'] == 'accesskey.' + access_key_id - - -def test_access_key_submitter_cannot_create_for_someone_else(anontestapp, submitter): - extra_environ = {'REMOTE_USER': str(submitter['email'])} - anontestapp.post_json( - '/access_key/', {'user': 'BOGUS'}, extra_environ=extra_environ, status=422) - - -def test_access_key_reset(anontestapp, access_key, submitter): - headers = {'Authorization': auth_header(access_key)} - extra_environ = {'REMOTE_USER': str(submitter['email'])} # Must be native string for Python 2.7 - res = anontestapp.post_json( - access_key['@id'] + '@@reset-secret', {}, extra_environ=extra_environ) - new_headers = { - 'Authorization': basic_auth(access_key['access_key_id'], res.json['secret_access_key']), - } - anontestapp.get('/@@testing-user', headers=headers, status=401) - res = anontestapp.get('/@@testing-user', headers=new_headers) - assert res.json['authenticated_userid'] == 'accesskey.' + access_key['access_key_id'] - - -def test_access_key_delete_disable_login(anontestapp, testapp, access_key): - testapp.patch_json(access_key['@id'], {'status': 'deleted'}) - headers = {'Authorization': auth_header(access_key)} - anontestapp.get('/@@testing-user', headers=headers, status=401) - - -def test_access_key_user_disable_login(anontestapp, no_login_access_key): - access_key = no_login_access_key - headers = {'Authorization': auth_header(access_key)} - anontestapp.get('/@@testing-user', headers=headers, status=401) - - -def test_access_key_edit(anontestapp, access_key): - headers = {'Authorization': auth_header(access_key)} - new_description = 'new description' - properties = {'description': new_description} - anontestapp.put_json(access_key['@id'], properties, headers=headers) - - res = anontestapp.get(access_key['@id'], properties, headers=headers) - assert res.json['description'] == new_description - - -@pytest.mark.parametrize('frame', ['', 'raw', 'object', 'embedded', 'page']) -def test_access_key_view_hides_secret_access_key_hash(testapp, access_key, frame): - query = '?frame=' + frame if frame else '' - res = testapp.get(access_key['@id'] + query) - assert 'secret_access_key_hash' not in res.json - - -def test_access_key_uses_edw_hash(app, access_key): - root = app.registry[COLLECTIONS] - obj = root.by_item_type['access_key'][access_key['access_key_id']] - pwhash = obj.properties['secret_access_key_hash'] - assert EDWHash.hash(access_key['secret_access_key']) == pwhash diff --git a/src/encoded/tests/test_types_antibody.py b/src/encoded/tests/test_types_antibody.py deleted file mode 100644 index 0a44f8d749..0000000000 --- a/src/encoded/tests/test_types_antibody.py +++ /dev/null @@ -1,41 +0,0 @@ -import pytest - - -pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema] - - -@pytest.fixture -def antibody_data(lab, award): - return { - 'lab': lab['@id'], - 'award': award['@id'], - 'description': 'Test Antibody', - 'antibody_name': 'test-Ab', - 'antibody_product_no': '123' - } - - -@pytest.fixture -def post_antibody_vendor(testapp, lab, award): - item = {'lab': lab['@id'], - 'award': award['@id'], - 'title': 'Vendor Biolabs'} - return testapp.post_json('/vendor', item).json['@graph'][0] - - -@pytest.fixture -def ab_w_name(testapp, antibody_data): - return testapp.post_json('/antibody', antibody_data).json['@graph'][0] - - -def test_antibody_update_antibody_id(ab_w_name): - assert ab_w_name['antibody_id'] == 'test-Ab-123' - - -def test_antibody_display_title(testapp, ab_w_name, post_antibody_vendor): - assert ab_w_name['display_title'] == 'test-Ab (123)' - res = testapp.patch_json( - ab_w_name['@id'], - {'antibody_vendor': post_antibody_vendor['@id']} - ).json['@graph'][0] - assert res['display_title'] == 'test-Ab (Vendor Biolabs, 123)' diff --git a/src/encoded/tests/test_types_award.py b/src/encoded/tests/test_types_award.py deleted file mode 100644 index fb63ae34cf..0000000000 --- a/src/encoded/tests/test_types_award.py +++ /dev/null @@ -1,53 +0,0 @@ -import pytest - - -pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema] - - -@pytest.fixture -def maward(): - return {'name': 'U1234567'} - - -def test_award_center_title_name_only(testapp, maward): - res = testapp.post_json('/award', maward).json['@graph'][0] - assert res['center_title'] == maward['name'] - - -def test_award_center_title_w_pi(testapp, maward, submitter): - maward['pi'] = submitter['@id'] - res = testapp.post_json('/award', maward).json['@graph'][0] - assert res['center_title'] == submitter['last_name'] - - -def test_award_center_title_w_desc(testapp, maward): - maward['description'] = 'DCIC: this is a cool award' - res = testapp.post_json('/award', maward).json['@graph'][0] - assert res['center_title'] == 'DCIC' - - -def test_award_center_title_w_pi_and_desc(testapp, maward, submitter): - maward['description'] = 'DCIC: this is a cool award' - maward['pi'] = submitter['@id'] - res = testapp.post_json('/award', maward).json['@graph'][0] - assert res['center_title'] == 'DCIC - Submitter' - - -def test_award_center_title_w_center(testapp, maward, submitter): - ctr = 'Snazzy Center' - maward['description'] = 'DCIC: this is a cool award' - maward['pi'] = submitter['@id'] - maward['center'] = ctr - res = testapp.post_json('/award', maward).json['@graph'][0] - assert res['center_title'] == ctr - - -def test_award_pi_name_w_pi(testapp, maward, pi): - maward['pi'] = pi['@id'] - res = testapp.post_json('/award', maward).json['@graph'][0] - assert res['pi_name'] == pi['display_title'] - - -def test_award_pi_name_w_no_pi(testapp, maward): - res = testapp.post_json('/award', maward).json['@graph'][0] - assert 'pi_name' not in res \ No newline at end of file diff --git a/src/encoded/tests/test_types_badge.py b/src/encoded/tests/test_types_badge.py deleted file mode 100644 index 5afd70fdad..0000000000 --- a/src/encoded/tests/test_types_badge.py +++ /dev/null @@ -1,62 +0,0 @@ -import pytest - - -pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema] - - -@pytest.fixture -def badge_data(lab, award): - return { - "title": "Test BaDGe Title", - "badge_classification": "Internal", - 'lab': lab['@id'], - 'award': award['@id'] - } - - -@pytest.fixture -def positive_badge_data(badge_data): - badge_data['badge_classification'] = 'Commendation' - return badge_data - - -@pytest.fixture -def warning_badge_data(badge_data): - badge_data['badge_classification'] = 'Warning' - return badge_data - - -def test_badge_update_name_no_caps(testapp, badge_data): - res = testapp.post_json('/badge', badge_data, status=201) - assert res.json['@graph'][0]['badge_name'] == "test-badge-title" - - -def test_badge_update_name_no_punctuation_or_space(testapp, badge_data): - badge_data['title'] = "Test, = Badge! # -title?" - res = testapp.post_json('/badge', badge_data, status=201) - assert res.json['@graph'][0]['badge_name'] == "test-badge-title" - - -def test_badge_name_updates_on_patch(testapp, badge_data): - res1 = testapp.post_json('/badge', badge_data, status=201) - res2 = testapp.patch_json(res1.json['@graph'][0]['@id'], {'title': 'WaHoo'}, status=200) - assert res2.json['@graph'][0]['badge_name'] == "wahoo" - - -def test_positive_badge_calc_props(testapp, positive_badge_data): - res = testapp.post_json('/badge', positive_badge_data, status=201) - print(res.json['@graph'][0]) - assert res.json['@graph'][0]['commendation'] == 'Test BaDGe Title' - assert not res.json['@graph'][0].get('warning') - - -def test_warning_badge_calc_props(testapp, warning_badge_data): - res = testapp.post_json('/badge', warning_badge_data, status=201) - assert res.json['@graph'][0]['warning'] == 'Test BaDGe Title' - assert not res.json['@graph'][0].get('commendation') - - -def test_other_badge_calc_props(testapp, badge_data): - res = testapp.post_json('/badge', badge_data, status=201) - res_graph = res.json['@graph'][0] - assert not res_graph.get('warning') and not res_graph.get('commendation') diff --git a/src/encoded/tests/test_types_bio_feature.py b/src/encoded/tests/test_types_bio_feature.py deleted file mode 100644 index 606f0fb3f6..0000000000 --- a/src/encoded/tests/test_types_bio_feature.py +++ /dev/null @@ -1,191 +0,0 @@ -import pytest - - -pytestmark = [pytest.mark.working, pytest.mark.schema] - - -@pytest.fixture -def so_ont(testapp): - return testapp.post_json('/ontology', {'ontology_name': 'SO'}).json['@graph'][0] - - -@pytest.fixture -def protein_term(testapp, so_ont): - gterm = { - 'uuid': '8bea5bde-d860-49f8-b178-35d0dadbd644', - 'term_id': 'SO:0000104', 'term_name': 'polypeptide', - 'preferred_name': 'protein', - 'source_ontologies': [so_ont['@id']]} - return testapp.post_json('/ontology_term', gterm).json['@graph'][0] - - -@pytest.fixture -def region_term(testapp, so_ont): - gterm = { - 'uuid': '6bea5bde-d860-49f8-b178-35d0dadbd644', - 'term_id': 'SO:0000001', 'term_name': 'region', - 'source_ontologies': [so_ont['@id']]} - return testapp.post_json('/ontology_term', gterm).json['@graph'][0] - - -@pytest.fixture -def transcript_term(testapp, so_ont): - gterm = { - 'uuid': '5bea5bde-d860-49f8-b178-35d0dadbd644', - 'term_id': 'SO:0000673', 'term_name': 'transcript', - 'source_ontologies': [so_ont['@id']]} - return testapp.post_json('/ontology_term', gterm).json['@graph'][0] - - -@pytest.fixture -def component_term(testapp, so_ont): - gterm = { - 'uuid': '4bea5bde-d860-49f8-b178-35d0dadbd644', - 'term_id': 'GO:0005575', 'term_name': 'cellular_component', - 'source_ontologies': [so_ont['@id']]} - return testapp.post_json('/ontology_term', gterm).json['@graph'][0] - - -@pytest.fixture -def gene_item(testapp, lab, award, human): - gene_item = {'lab': lab['@id'], 'award': award['@id'], 'geneid': '5885'} - return testapp.post_json('/gene', gene_item).json['@graph'][0] - - -@pytest.fixture -def mouse_gene_item(testapp, lab, award, mouse): - gene_item = {'lab': lab['@id'], 'award': award['@id'], 'geneid': '16825'} - return testapp.post_json('/gene', gene_item).json['@graph'][0] - - -@pytest.fixture -def armadillo_gene_item(testapp, lab, award): - gene_item = {'lab': lab['@id'], 'award': award['@id'], 'geneid': '101428042'} - return testapp.post_json('/gene', gene_item).json['@graph'][0] - - -@pytest.fixture -def gene_bio_feature(testapp, lab, award, gene_term, gene_item, human): - item = {'award': award['@id'], - 'lab': lab['@id'], - 'description': 'Test Gene BioFeature', - 'feature_type': gene_term['@id'], - 'organism_name': 'human', - 'relevant_genes': [gene_item['@id']]} - return testapp.post_json('/bio_feature', item).json['@graph'][0] - - -@pytest.fixture -def mouse_gene_bio_feature(testapp, lab, award, gene_term, mouse_gene_item, human, mouse): - item = {'award': award['@id'], - 'lab': lab['@id'], - 'description': 'Test Mouse Gene BioFeature', - 'feature_type': gene_term['@id'], - 'organism_name': 'mouse', - 'relevant_genes': [mouse_gene_item['@id']]} - return testapp.post_json('/bio_feature', item).json['@graph'][0] - - -@pytest.fixture -def armadillo_gene_bio_feature(testapp, lab, award, gene_term, armadillo_gene_item): - item = {'award': award['@id'], - 'lab': lab['@id'], - 'description': 'Test Mouse Gene BioFeature', - 'feature_type': gene_term['@id'], - 'relevant_genes': [armadillo_gene_item['@id']]} - return testapp.post_json('/bio_feature', item).json['@graph'][0] - - -@pytest.fixture -def multi_species_gene_bio_feature(testapp, lab, award, gene_term, gene_item, mouse_gene_item, human, mouse): - item = {'award': award['@id'], - 'lab': lab['@id'], - 'description': 'Test Multi Gene BioFeature', - 'feature_type': gene_term['@id'], - 'organism_name': 'multiple organisms', - 'relevant_genes': [mouse_gene_item['@id'], gene_item['@id']]} - return testapp.post_json('/bio_feature', item).json['@graph'][0] - - -@pytest.fixture -def genomic_region_bio_feature(testapp, lab, award, region_term, some_genomic_region, human): - item = {'award': award['@id'], - 'lab': lab['@id'], - 'description': 'Test Region BioFeature', - 'feature_type': region_term['@id'], - 'organism_name': 'human', - 'genome_location': [some_genomic_region['@id']]} - return testapp.post_json('/bio_feature', item).json['@graph'][0] - - -def test_bio_feature_display_title_gene(gene_bio_feature, gene_item): - assert gene_bio_feature.get('display_title') == gene_item.get('display_title') + ' gene' - - -def test_bio_feature_display_title_genomic_region(genomic_region_bio_feature): - assert genomic_region_bio_feature.get('display_title') == 'GRCh38:1:17-544 region' - - -def test_bio_feature_display_title_genomic_region_w_preferred_label(testapp, genomic_region_bio_feature): - label = 'awesome region' - res = testapp.patch_json(genomic_region_bio_feature['@id'], {'preferred_label': label}, status=200) - assert res.json['@graph'][0].get('display_title') == label - - -def test_bio_feature_display_title_protein_transcript( - testapp, gene_item, gene_bio_feature, protein_term, transcript_term): - ''' gene_bio_feature is in datafixtures ''' - types = [protein_term, transcript_term] - for t in types: - res = testapp.patch_json(gene_bio_feature['@id'], {'feature_type': t['@id']}, status=200) - assert res.json['@graph'][0].get('display_title') == gene_item.get('display_title') + ' ' + t.get('display_title') - - -def test_bio_feature_display_title_modfied_protein( - testapp, gene_item, gene_bio_feature, protein_term): - ''' gene_bio_feature is in datafixtures ''' - res = testapp.patch_json( - gene_bio_feature['@id'], - { - 'feature_type': protein_term['@id'], - 'feature_mods': [{ - 'mod_type': 'Methylation', - 'mod_position': 'K9' - }] - }, - status=200) - assert res.json['@graph'][0].get('display_title') == 'RAD21 protein with K9 Methylation' - - -def test_bio_feature_display_title_cellular_component(testapp, component_term, lab, award): - struct = 'Nuclear pore complex' - item = { - 'feature_type': component_term['@id'], - 'cellular_structure': struct, - 'lab': lab['@id'], - 'award': award['@id'], - 'description': 'test structure' - } - res = testapp.post_json('/bio_feature', item, status=201) - assert res.json['@graph'][0].get('display_title') == struct - - -def test_bio_feature_display_title_mouse_gene( - mouse_gene_bio_feature, mouse_gene_item): - assert mouse_gene_bio_feature.get('display_title') == mouse_gene_item.get('display_title') + ' mouse gene' - - -def test_bio_feature_display_title_multi_species_gene( - multi_species_gene_bio_feature): - assert multi_species_gene_bio_feature.get('display_title') == 'Ldb1, RAD21 genes multiple organisms' - - -def test_bio_feature_display_title_unknown_organism_gene( - armadillo_gene_bio_feature, armadillo_gene_item): - assert armadillo_gene_bio_feature.get('display_title') == armadillo_gene_item.get('display_title') + ' gene' - - -def test_bio_feature_display_title_preferred_name_w_org( - testapp, mouse_gene_bio_feature): - mfeat = testapp.patch_json(mouse_gene_bio_feature['@id'], {'preferred_label': 'Cool gene'}, status=200).json['@graph'][0] - assert mfeat.get('display_title') == 'Cool gene (mouse)' diff --git a/src/encoded/tests/test_types_biosample.py b/src/encoded/tests/test_types_biosample.py deleted file mode 100644 index a8eb82c432..0000000000 --- a/src/encoded/tests/test_types_biosample.py +++ /dev/null @@ -1,335 +0,0 @@ -import pytest -# from snovault.schema_utils import load_schema - - -pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema] - - -@pytest.fixture -def biosample_cc_w_diff(testapp, de_term, lab, award): - item = { - "culture_start_date": "2018-01-01", - "differentiation_state": "Differentiated to definitive endoderm demonstrated by decreased Oct4 expression and increased Sox17 expression", - "tissue": de_term['@id'], - "in_vitro_differentiated": "Yes", - 'award': award['@id'], - 'lab': lab['@id'] - } - return testapp.post_json('/biosample_cell_culture', item).json['@graph'][0] - - -@pytest.fixture -def biosample_1(testapp, human_biosource, lab, award): - item = { - 'description': "GM12878 prepared for Hi-C", - 'biosource': [human_biosource['@id'], ], - 'award': award['@id'], - 'lab': lab['@id'], - } - return testapp.post_json('/biosample', item).json['@graph'][0] - - -@pytest.fixture -def biosample_w_mod(testapp, biosample_1, mod_w_target): - return testapp.patch_json(biosample_1['@id'], {'modifications': [mod_w_target['@id']]}).json['@graph'][0] - - -@pytest.fixture -def biosample_w_treatment(testapp, biosample_1, rnai): - return testapp.patch_json(biosample_1['@id'], {'treatments': [rnai['@id']]}).json['@graph'][0] - - -def biosample_relation(derived_from): - return {"biosample_relation": [{"relationship_type": "derived from", - "biosample": derived_from['@id']}]} - - -def test_biosample_has_display_title(testapp, biosample_1): - # accession fallback used for display title here - assert biosample_1['display_title'] == biosample_1['accession'] - - -# data from test/datafixtures -def test_update_biosample_relation(testapp, human_biosample, biosample_1): - patch_res = testapp.patch_json(human_biosample['@id'], biosample_relation(biosample_1)) - res = testapp.get(biosample_1['@id']) - # expected relation: 'biosample': human_biosample['@id'], - # 'relationship_type': 'parent of' - assert res.json['biosample_relation'][0]['biosample']['@id'] == human_biosample['@id'] - assert res.json['biosample_relation'][0]['relationship_type'] == 'parent of' - - -def test_biosample_calculated_properties(testapp, biosample_1, ): - """ - Test to ensure the calculated properties are in result returned from testapp - These have string 'None' returned if no value as they are used in Item page view - """ - res = testapp.get(biosample_1['@id']).json - assert 'modifications_summary' in res - assert 'modifications_summary_short' in res - assert 'treatments_summary' in res - assert 'biosource_summary' in res - - -def test_biosample_biosource_summary_one_biosource(testapp, biosample_1, human_biosource): - assert biosample_1['biosource_summary'] == human_biosource['biosource_name'] - - -def test_biosample_biosource_summary_two_biosource(testapp, biosample_1, human_biosource, lung_biosource): - res = testapp.patch_json(biosample_1['@id'], {'biosource': [human_biosource['@id'], lung_biosource['@id']]}).json['@graph'][0] - assert human_biosource['biosource_name'] in res['biosource_summary'] - assert lung_biosource['biosource_name'] in res['biosource_summary'] - assert ' and ' in res['biosource_summary'] - - -def test_biosample_biosource_summary_w_differentiation(testapp, biosample_1, human_biosource, biosample_cc_w_diff, de_term): - res = testapp.patch_json(biosample_1['@id'], {'cell_culture_details': [biosample_cc_w_diff['@id']]}).json['@graph'][0] - assert human_biosource['biosource_name'] in res['biosource_summary'] - assert ' differentiated to ' in res['biosource_summary'] - assert de_term['display_title'] in res['biosource_summary'] - - -def test_biosample_sample_type_w_differentiation(testapp, biosample_1, biosample_cc_w_diff): - res = testapp.patch_json(biosample_1['@id'], {'cell_culture_details': [biosample_cc_w_diff['@id']]}).json['@graph'][0] - assert res['biosample_type'] == 'in vitro differentiated cells' - - -def test_biosample_sample_type_immortalized_wo_differentiation(testapp, biosample_1, biosample_cc_wo_diff): - res = testapp.patch_json(biosample_1['@id'], {'cell_culture_details': [biosample_cc_wo_diff['@id']]}).json['@graph'][0] - assert res['biosample_type'] == 'immortalized cells' - - -def test_biosample_sample_type_bs_stem_cell_line(testapp, biosample_1, human_biosource): - bsres = testapp.patch_json(human_biosource['@id'], {'biosource_type': 'stem cell derived cell line'}).json['@graph'][0] - res = testapp.patch_json(biosample_1['@id'], {'biosource': [bsres['@id']]}).json['@graph'][0] - assert res['biosample_type'] == 'stem cells' - - -def test_biosample_sample_type_bs_multicellular(testapp, biosample_1, human_biosource): - bsres = testapp.patch_json(human_biosource['@id'], {'biosource_type': 'multicellular organism'}).json['@graph'][0] - res = testapp.patch_json(biosample_1['@id'], {'biosource': [bsres['@id']]}).json['@graph'][0] - assert res['biosample_type'] == 'whole organisms' - - -def test_biosample_sample_type_bs_tissue(testapp, biosample_1, human_biosource): - bty = 'tissue' - bsres = testapp.patch_json(human_biosource['@id'], {'biosource_type': bty}).json['@graph'][0] - res = testapp.patch_json(biosample_1['@id'], {'biosource': [bsres['@id']]}).json['@graph'][0] - assert res['biosample_type'] == bty - - -def test_biosample_sample_type_bs_lines_and_to_pluralize(testapp, biosample_1, human_biosource): - types = { - "primary cell": "primary cells", - "primary cell line": "primary cells", - "immortalized cell line": "immortalized cells", - "stem cell": "stem cells", - "induced pluripotent stem cell": "induced pluripotent stem cells" - } - for bty, bsty in types.items(): - bsres = testapp.patch_json(human_biosource['@id'], {'biosource_type': bty}).json['@graph'][0] - res = testapp.patch_json(biosample_1['@id'], {'biosource': [bsres['@id']]}).json['@graph'][0] - assert res['biosample_type'] == bsty - - -def test_biosample_sample_type_bs_multiple_same_type(testapp, biosample_1, human_biosource, GM12878_biosource): - res = testapp.patch_json(biosample_1['@id'], {'biosource': [human_biosource['@id'], GM12878_biosource['@id']]}).json['@graph'][0] - assert res['biosample_type'] == 'immortalized cells' - - -def test_biosample_sample_type_bs_multiple_diff_types(testapp, biosample_1, human_biosource, lung_biosource): - res = testapp.patch_json(biosample_1['@id'], {'biosource': [human_biosource['@id'], lung_biosource['@id']]}).json['@graph'][0] - assert res['biosample_type'] == 'mixed sample' - - -def test_biosample_modifications_summaries(biosample_w_mod): - assert biosample_w_mod['modifications_summary'] == 'Crispr for RAD21 gene' - assert biosample_w_mod['modifications_summary_short'] == 'RAD21 Crispr' - - -def test_biosample_modifications_summaries_no_mods(biosample_1): - assert biosample_1.get('modifications_summary') == 'None' - assert biosample_1.get('modifications_summary_short') == 'None' - - -def test_biosample_treatments_summary(biosample_w_treatment): - assert biosample_w_treatment.get('treatments_summary') == 'shRNA treatment' - - -def test_biosample_treatments_summary_no_treatment(biosample_1): - assert biosample_1.get('treatments_summary') == 'None' - - -def test_biosample_category_undifferentiated_stem_cells(testapp, biosample_1, human_biosource): - scl = testapp.patch_json(human_biosource['@id'], {'biosource_type': 'stem cell derived cell line'}).json['@graph'][0] - bios = testapp.patch_json(biosample_1['@id'], {'biosource': [scl['@id']]}).json['@graph'][0] - assert 'Human stem cell' in bios.get('biosample_category') - - -def test_biosample_category_differentiated_stem_cells(testapp, biosample_1, human_biosource, biosample_cc_w_diff): - scl = testapp.patch_json(human_biosource['@id'], {'biosource_type': 'stem cell derived cell line'}).json['@graph'][0] - bios = testapp.patch_json(biosample_1['@id'], {'biosource': [scl['@id']], 'cell_culture_details': [biosample_cc_w_diff['@id']]}).json['@graph'][0] - cats = bios.get('biosample_category') - assert 'Human stem cell' not in cats - assert 'In vitro Differentiation' in cats - - -def test_biosample_biosource_category_two_biosource(testapp, biosample_1, human_biosource, lung_biosource): - res = testapp.patch_json(biosample_1['@id'], {'biosource': [human_biosource['@id'], lung_biosource['@id']]}).json['@graph'][0] - cat = res.get('biosample_category') - assert len(cat) == 1 - assert cat[0] == 'Mixed samples' - - -# setting up fixtures for testing tissue and organ calcprop -@pytest.fixture -def brain_term(testapp, uberon_ont, cns_term, ectoderm_term): - item = { - "is_slim_for": "organ", - "term_id": "brain_tid", - "term_name": "brain", - "source_ontologies": [uberon_ont['@id']], - "slim_terms": [cns_term['@id'], ectoderm_term['@id']] - } - return testapp.post_json('/ontology_term', item).json['@graph'][0] - - -@pytest.fixture -def cns_term(testapp, uberon_ont, ectoderm_term): - item = { - "is_slim_for": "system", - "term_id": "cns_tid", - "term_name": "central nervous system", - "source_ontologies": [uberon_ont['@id']], - "slim_terms": [ectoderm_term['@id']] - } - return testapp.post_json('/ontology_term', item).json['@graph'][0] - - -@pytest.fixture -def ectoderm_term(testapp, uberon_ont): - item = { - "is_slim_for": "developmental", - "term_id": "ectoderm_tid", - "term_name": "ectoderm", - "source_ontologies": [uberon_ont['@id']], - } - return testapp.post_json('/ontology_term', item).json['@graph'][0] - - -@pytest.fixture -def primary_cell_term(testapp, ontology): - item = { - "is_slim_for": "cell", - "term_id": "pcell_id", - "term_name": "primary cell", - "source_ontologies": [ontology['@id']], - } - return testapp.post_json('/ontology_term', item).json['@graph'][0] - - -@pytest.fixture -def cortical_neuron_term(testapp, uberon_ont, brain_term, cns_term, - ectoderm_term, primary_cell_term): - item = { - "term_id": "cort_neuron_id", - "term_name": "cortical neuron", - "source_ontologies": [uberon_ont['@id']], - "slim_terms": [brain_term['@id'], cns_term['@id'], ectoderm_term['@id'], primary_cell_term['@id']] - } - return testapp.post_json('/ontology_term', item).json['@graph'][0] - - -@pytest.fixture -def bcc_diff_to_cortical(testapp, lab, award, cortical_neuron_term): - item = { - "culture_start_date": "2018-01-01", - "differentiation_state": "Stem cell differentiated to cortical neuron", - "tissue": cortical_neuron_term['@id'], - "in_vitro_differentiated": "Yes", - 'award': award['@id'], - 'lab': lab['@id'] - } - return testapp.post_json('/biosample_cell_culture', item).json['@graph'][0] - - -@pytest.fixture -def diff_cortical_neuron_bs(testapp, F123_biosource, bcc_diff_to_cortical, lab, award): - item = { - "description": "Differentiated cortical neuron", - "biosource": [F123_biosource['@id']], - "cell_culture_details": [bcc_diff_to_cortical['@id']], - "award": award['@id'], - "lab": lab['@id'] - } - return testapp.post_json('/biosample', item).json['@graph'][0] - - -@pytest.fixture -def brain_biosource(testapp, brain_term, lab, award): - item = { - "description": "Brain tissue", - "biosource_type": "tissue", - "tissue": brain_term['@id'], - "lab": lab['@id'], - "award": award['@id'] - } - return testapp.post_json('/biosource', item).json['@graph'][0] - - -@pytest.fixture -def brain_biosample(testapp, brain_biosource, lab, award): - item = { - "description": "Brain Tissue Biosample", - "biosource": [brain_biosource['@id']], - "award": award['@id'], - "lab": lab['@id'] - } - return testapp.post_json('/biosample', item).json['@graph'][0] - - -@pytest.fixture -def mixed_biosample(testapp, brain_biosource, lung_biosource, lab, award): - item = { - "description": "Mixed Tissue Biosample", - "biosource": [brain_biosource['@id'], lung_biosource['@id']], - "award": award['@id'], - "lab": lab['@id'] - } - return testapp.post_json('/biosample', item).json['@graph'][0] - - -def test_get_tissue_organ_info_none_present(biosample_1): - assert 'tissue_organ_info' not in biosample_1 - - -def test_get_tissue_organ_info_tissue_in_cell_culture(diff_cortical_neuron_bs, cortical_neuron_term): - org_sys = sorted(['brain', 'central nervous system', 'ectoderm']) - assert 'tissue_organ_info' in diff_cortical_neuron_bs - assert diff_cortical_neuron_bs['tissue_organ_info']['tissue_source'] == cortical_neuron_term.get('display_title') - assert sorted(diff_cortical_neuron_bs['tissue_organ_info']['organ_system']) == org_sys - - -def test_get_tissue_organ_info_tissue_in_biosource(brain_biosample, brain_term): - org_sys = sorted(['central nervous system', 'ectoderm']) - assert 'tissue_organ_info' in brain_biosample - assert brain_biosample['tissue_organ_info']['tissue_source'] == brain_term.get('display_title') - assert sorted(brain_biosample['tissue_organ_info']['organ_system']) == org_sys - - -def test_get_tissue_organ_info_tissue_mixed_biosample(mixed_biosample): - org_sys = sorted(['central nervous system', 'ectoderm']) - assert 'tissue_organ_info' in mixed_biosample - assert mixed_biosample['tissue_organ_info']['tissue_source'] == 'mixed tissue' - assert sorted(mixed_biosample['tissue_organ_info']['organ_system']) == org_sys - - -def test_get_tissue_organ_info_none_if_only_cell_slim_terms(testapp, F123_biosource, lab, award): - item = { - "description": "F123 Biosample", - "biosource": [F123_biosource['@id']], - "award": award['@id'], - "lab": lab['@id'] - } - f123_biosample = testapp.post_json('/biosample', item).json['@graph'][0] - assert 'tissue_organ_info' not in f123_biosample diff --git a/src/encoded/tests/test_types_biosource.py b/src/encoded/tests/test_types_biosource.py deleted file mode 100644 index f5f958414f..0000000000 --- a/src/encoded/tests/test_types_biosource.py +++ /dev/null @@ -1,331 +0,0 @@ -import pytest - - -pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema] - - -@pytest.fixture -def other_mod(testapp, lab, award): - data = { - "lab": lab['@id'], - "award": award['@id'], - "modification_type": "Stable Transfection", - "description": "second modification" - } - return testapp.post_json('/modification', data).json['@graph'][0] - - -@pytest.fixture -def GM12878_mod_biosource(testapp, lab, award, gm12878_oterm, basic_modification): - item = { - "accession": "4DNSROOOAAC1", - "biosource_type": "primary cell line", - "cell_line": gm12878_oterm['@id'], - 'award': award['@id'], - 'lab': lab['@id'], - 'modifications': [basic_modification['@id']] - } - return testapp.post_json('/biosource', item).json['@graph'][0] - - -@pytest.fixture -def GM12878_twomod_biosource(testapp, lab, award, gm12878_oterm, basic_modification, other_mod): - item = { - "accession": "4DNSROOOAAC2", - "biosource_type": "primary cell line", - "cell_line": gm12878_oterm['@id'], - 'award': award['@id'], - 'lab': lab['@id'], - 'modifications': [basic_modification['@id'], other_mod['@id']] - } - return testapp.post_json('/biosource', item).json['@graph'][0] - - -@pytest.fixture -def cell_lines(GM12878_biosource, F123_biosource, GM12878_mod_biosource, GM12878_twomod_biosource): - return [F123_biosource, GM12878_biosource, GM12878_mod_biosource, GM12878_twomod_biosource] - - -@pytest.fixture -def whole_biosource(testapp, human_individual, lab, award): - item = { - "biosource_type": "multicellular organism", - "individual": human_individual['@id'], - 'award': award['@id'], - 'lab': lab['@id'] - } - return testapp.post_json('/biosource', item).json['@graph'][0] - - -@pytest.fixture -def biosources(cell_lines, lung_biosource, whole_biosource): - bs = cell_lines - bs.extend([lung_biosource, whole_biosource]) - return bs - - -@pytest.fixture -def human_biosource_data(testapp, lab, award, human_individual): - return { - 'award': award['@id'], - 'lab': lab['@id'], - 'individual': human_individual['@id'] - } - - -@pytest.fixture -def mouse_SC_biosrc(testapp, human_biosource_data, mouse_individual): - mouse_SC_biosrc_data = human_biosource_data.copy() - mouse_SC_biosrc_data['biosource_type'] = 'stem cell derived cell line' - mouse_SC_biosrc_data['individual'] = mouse_individual['@id'] - return testapp.post_json('/biosource', mouse_SC_biosrc_data).json['@graph'][0] - - -@pytest.fixture -def primary_cell_biosource(testapp, human_biosource_data): - pc_biosrc_data = human_biosource_data.copy() - pc_biosrc_data['biosource_type'] = 'primary cell' - return testapp.post_json('/biosource', pc_biosrc_data).json['@graph'][0] - - -@pytest.fixture -def hum_SC_biosrc(testapp, human_biosource_data): - hum_SC_biosrc_data = human_biosource_data.copy() - hum_SC_biosrc_data['biosource_type'] = 'stem cell derived cell line' - return testapp.post_json('/biosource', hum_SC_biosrc_data).json['@graph'][0] - - -@pytest.fixture -def thous_genomes_biosources(testapp, human_biosource_data, thousandgen_oterms, b_lymphocyte_oterm): - bsources = [] - human_biosource_data['tissue'] = b_lymphocyte_oterm['@id'] - human_biosource_data['biosource_type'] = 'immortalized cell line' - for ot in thousandgen_oterms: - bs_data = human_biosource_data.copy() - bs_data['cell_line'] = ot['@id'] - bsources.append(testapp.post_json('/biosource', bs_data).json['@graph'][0]) - return bsources - - -def test_calculated_organism_wo_ind(testapp, human_biosource_data): - human_biosource_data['biosource_type'] = 'primary cell' - del human_biosource_data['individual'] - res = testapp.post_json('/biosource', human_biosource_data, status=201).json['@graph'][0] - assert 'organism' not in res - - -def test_calculated_organism_wo_ind_w_override(testapp, human_biosource_data, human): - human_biosource_data['biosource_type'] = 'primary cell' - human_biosource_data['override_organism_name'] = 'human' - del human_biosource_data['individual'] - res = testapp.post_json('/biosource', human_biosource_data, status=201).json['@graph'][0] - assert res.get('organism') == human.get('uuid') - - -def test_calculated_organism_w_ind(testapp, human_biosource_data, human): - human_biosource_data['biosource_type'] = 'primary cell' - res = testapp.post_json('/biosource', human_biosource_data, status=201).json['@graph'][0] - assert res.get('organism') == human.get('uuid') - - -def test_calculated_biosource_category_multicellular(lung_biosource, whole_biosource): - assert 'Multicellular Tissue' in lung_biosource.get('biosource_category') - assert 'Multicellular Tissue' in whole_biosource.get('biosource_category') - - -def test_calculated_biosource_category_primary_cell(primary_cell_biosource): - assert 'Primary Cells' in primary_cell_biosource.get('biosource_category') - - -def test_calculated_biosource_category_1000_gen(thous_genomes_biosources, GM12878_biosource): - assert 'GM12878' in GM12878_biosource.get('biosource_category') - thous_genomes_biosources.append(GM12878_biosource) - for bs in thous_genomes_biosources: - assert '1000 genomes/Hap Map' in bs.get('biosource_category') - - -def test_calculated_biosource_category_tiers(cell_lines): - bs1 = cell_lines.pop(0) - assert 'Tier 2' in bs1.get('biosource_category') - for bs in cell_lines: - assert 'GM12878' in bs.get('biosource_category') - - -def test_calculated_biosource_category_stem_cells(mouse_SC_biosrc, hum_SC_biosrc): - assert 'Human stem cell' in hum_SC_biosrc.get('biosource_category') - assert 'Mouse stem cell' not in hum_SC_biosrc.get('biosource_category') - assert 'Mouse stem cell' in mouse_SC_biosrc.get('biosource_category') - assert 'Human stem cell' not in mouse_SC_biosrc.get('biosource_category') - - -def test_calculated_biosource_name(testapp, biosources, mod_w_change_and_target, lung_oterm): - for biosource in biosources: - biotype = biosource['biosource_type'] - name = biosource['biosource_name'] - if biotype == 'immortalized cell line': - assert name == 'GM12878' - elif biotype == 'stem cell': - assert name == 'F123-CASTx129' - elif biotype == 'primary cell line' and biosource['accession'] == "4DNSROOOAAC1": - # import pdb; pdb.set_trace() - # used not real type here to test modification addition to name - assert name == 'GM12878 with Crispr' - res = testapp.patch_json(biosource['@id'], {'modifications': [mod_w_change_and_target['@id']]}) - assert res.json['@graph'][0]['biosource_name'] == 'GM12878 with RAD21 deletion' - elif biotype == 'primary cell line' and biosource['accession'] == "4DNSROOOAAC2": - assert name == 'GM12878 with Crispr, Stable Transfection' - elif biotype == 'tissue': - assert name == lung_oterm.get('preferred_name') - elif biotype == 'multicellular organism': - assert name == 'whole human' - - -def test_calculated_biosource_name_override(testapp, GM12878_mod_biosource): - bs_w_or = testapp.patch_json(GM12878_mod_biosource['@id'], {'override_biosource_name': 'neat modified cell line'}).json['@graph'][0] - assert bs_w_or['biosource_name'] == 'neat modified cell line' - assert bs_w_or['display_title'] == 'neat modified cell line - {}'.format(bs_w_or.get('accession')) - - -def test_validate_biosource_tissue_no_tissue(testapp, award, lab, gm12878_oterm): - biosource = {'award': award['@id'], - 'lab': lab['@id'], - 'biosource_type': 'immortalized cell line', - 'cell_line': 'GM12878'} - res = testapp.post_json('/biosource', biosource, status=201) - assert not res.json.get('errors') - - -def test_validate_biosource_tissue_invalid(testapp, award, lab, lung_oterm, ontology): - testapp.patch_json(lung_oterm['@id'], {'source_ontologies': [ontology['@id']]}, status=200) - biosource = {'award': award['@id'], - 'lab': lab['@id'], - 'biosource_type': 'tissue', - 'tissue': lung_oterm['@id']} - res = testapp.post_json('/biosource', biosource, status=422) - errors = res.json['errors'] - assert 'not found in UBERON' in errors[0]['description'] - - -def test_validate_biosource_tissue_valid_atid(testapp, award, lab, lung_oterm): - biosource = {'award': award['@id'], - 'lab': lab['@id'], - 'biosource_type': 'tissue', - 'tissue': lung_oterm['@id']} - res = testapp.post_json('/biosource', biosource, status=201) - assert not res.json.get('errors') - - -def test_validate_biosource_tissue_valid_uuid(testapp, award, lab, lung_oterm): - biosource = {'award': award['@id'], - 'lab': lab['@id'], - 'biosource_type': 'tissue', - 'tissue': lung_oterm['uuid']} - res = testapp.post_json('/biosource', biosource, status=201) - assert not res.json.get('errors') - - -def test_validate_biosource_tissue_on_valid_patch(testapp, award, lab, lung_oterm): - biosource = {'award': award['@id'], - 'lab': lab['@id'], - 'biosource_type': 'tissue', - 'tissue': lung_oterm['uuid']} - res = testapp.post_json('/biosource', biosource, status=201) - assert not res.json.get('errors') - new_oterm = {'term_name': 'finger', - 'term_id': 'UBERON:0000009', - 'source_ontologies': lung_oterm['source_ontologies']} - ot = testapp.post_json('/ontology_term', new_oterm, status=201) - pid = '/' + res.json['@graph'][0].get('uuid') - res2 = testapp.patch_json(pid, {'tissue': ot.json['@graph'][0]['uuid']}) - assert not res2.json.get('errors') - - -def test_validate_biosource_tissue_on_invalid_patch(testapp, award, lab, lung_oterm, ontology): - biosource = {'award': award['@id'], - 'lab': lab['@id'], - 'biosource_type': 'tissue', - 'tissue': lung_oterm['uuid']} - res = testapp.post_json('/biosource', biosource, status=201) - assert not res.json.get('errors') - new_oterm = {'term_name': 'finger', - 'term_id': 'UBERON:0000009', - 'source_ontologies': [ontology['uuid']]} - ot = testapp.post_json('/ontology_term', new_oterm, status=201) - pid = '/' + res.json['@graph'][0].get('uuid') - res2 = testapp.patch_json(pid, {'tissue': ot.json['@graph'][0]['uuid']}, status=422) - errors = res2.json['errors'] - assert 'not found in UBERON' in errors[0]['description'] - - -def test_validate_biosource_cell_line_no_cell_line(testapp, award, lab): - biosource = {'award': award['@id'], - 'lab': lab['@id'], - 'biosource_type': 'tissue' - } - res = testapp.post_json('/biosource', biosource, status=201) - assert not res.json.get('errors') - - -def test_validate_biosource_cell_line_invalid_ont(testapp, award, lab, gm12878_oterm, lung_oterm): - testapp.patch_json(gm12878_oterm['@id'], {'slim_terms': [lung_oterm['@id']]}, status=200) - biosource = {'award': award['@id'], - 'lab': lab['@id'], - 'biosource_type': 'immortalized cell line', - 'cell_line': gm12878_oterm['@id']} - res = testapp.post_json('/biosource', biosource, status=422) - errors = res.json['errors'] - assert errors[0]['name'] == 'Biosource: invalid cell_line term' - assert 'not a known valid cell line' in errors[0]['description'] - - -def test_validate_biosource_cell_line_valid_atid(testapp, award, lab, gm12878_oterm): - biosource = {'award': award['@id'], - 'lab': lab['@id'], - 'biosource_type': 'immortalized cell line', - 'cell_line': gm12878_oterm['@id']} - res = testapp.post_json('/biosource', biosource, status=201) - assert not res.json.get('errors') - - -def test_validate_biosource_cell_line_valid_uuid(testapp, award, lab, gm12878_oterm): - biosource = {'award': award['@id'], - 'lab': lab['@id'], - 'biosource_type': 'immortalized cell line', - 'cell_line': gm12878_oterm['uuid']} - res = testapp.post_json('/biosource', biosource, status=201) - assert not res.json.get('errors') - - -def test_validate_biosource_cell_line_on_valid_patch(testapp, award, lab, gm12878_oterm): - biosource = {'award': award['@id'], - 'lab': lab['@id'], - 'biosource_type': 'immortalized cell line', - 'cell_line': gm12878_oterm['uuid']} - res = testapp.post_json('/biosource', biosource, status=201) - assert not res.json.get('errors') - new_oterm = {'term_name': 'bigcell', - 'term_id': 'test:1', - 'source_ontologies': gm12878_oterm['source_ontologies'], - 'slim_terms': gm12878_oterm['slim_terms']} - ot = testapp.post_json('/ontology_term', new_oterm, status=201) - pid = '/' + res.json['@graph'][0].get('uuid') - res2 = testapp.patch_json(pid, {'cell_line': ot.json['@graph'][0]['uuid']}) - assert not res2.json.get('errors') - - -def test_validate_biosource_cell_line_on_invalid_patch(testapp, award, lab, gm12878_oterm): - biosource = {'award': award['@id'], - 'lab': lab['@id'], - 'biosource_type': 'immortalized cell line', - 'cell_line': gm12878_oterm['uuid']} - res = testapp.post_json('/biosource', biosource, status=201) - assert not res.json.get('errors') - new_oterm = {'term_name': 'bigcell', - 'term_id': 'test:1', - 'source_ontologies': gm12878_oterm['source_ontologies']} - ot = testapp.post_json('/ontology_term', new_oterm, status=201) - pid = '/' + res.json['@graph'][0].get('uuid') - res2 = testapp.patch_json(pid, {'cell_line': ot.json['@graph'][0]['uuid']}, status=422) - errors = res2.json['errors'] - assert errors[0]['name'] == 'Biosource: invalid cell_line term' - assert 'not a known valid cell line' in errors[0]['description'] diff --git a/src/encoded/tests/test_types_experiment.py b/src/encoded/tests/test_types_experiment.py deleted file mode 100644 index c4fb6a8563..0000000000 --- a/src/encoded/tests/test_types_experiment.py +++ /dev/null @@ -1,984 +0,0 @@ -""" - -Tests for both experiment.py and experiment_set.py - -""" - -import pytest -from snovault import TYPES -# from snovault.storage import UUID -from uuid import uuid4 -from ..types.experiment import ExperimentHiC - - -pytestmark = [pytest.mark.setone, pytest.mark.working] - - -@pytest.fixture -def custom_experiment_set_data(lab, award): - return { - 'lab': lab['@id'], - 'award': award['@id'], - 'description': 'test experiment set', - 'experimentset_type': 'custom', - 'status': 'in review by lab' - } - - -@pytest.fixture -def custom_experiment_set(testapp, custom_experiment_set_data): - return testapp.post_json('/experiment_set', custom_experiment_set_data).json['@graph'][0] - - -@pytest.fixture -def replicate_experiment_set_data(lab, award): - return { - 'lab': lab['@id'], - 'award': award['@id'], - 'description': 'test replicate set', - 'experimentset_type': 'replicate', - 'status': 'in review by lab' - } - - -@pytest.fixture -def replicate_experiment_set(testapp, replicate_experiment_set_data): - return testapp.post_json('/experiment_set_replicate', replicate_experiment_set_data).json['@graph'][0] - - -@pytest.fixture -def sop_map_data(protocol, lab, award): - return { - "sop_name": "in situ Hi-C SOP map", - "sop_version": 1, - 'lab': lab['@id'], - 'award': award['@id'], - "associated_item_type": "ExperimentHiC", - "id_values": ["in situ Hi-C"], - "notes": "This is just a dummy insert not linked to true SOP protocol", - "description": "Fields with specified defaults in the SOP for in situ Hi-C experiments as per ??", - "sop_protocol": protocol['@id'], - "fields_with_default": [ - {"field_name": "digestion_enzyme", "field_value": "MboI"}, - ] - } - - -@pytest.fixture -def sop_map_data_2(lab, award): - return { - "sop_name": "Second in situ hic map", - "sop_version": 2, - 'lab': lab['@id'], - 'award': award['@id'], - "associated_item_type": "ExperimentHiC", - "id_values": ["in situ Hi-C"], - "notes": "This is a dummy second version of map", - "description": "Second", - } - - -def test_experiment_update_experiment_relation(testapp, base_experiment, experiment): - relation = [{'relationship_type': 'controlled by', - 'experiment': experiment['@id']}] - res = testapp.patch_json(base_experiment['@id'], {'experiment_relation': relation}) - assert res.json['@graph'][0]['experiment_relation'] == relation - - # patching an experiment should also update the related experiement - exp_res = testapp.get(experiment['@id']) - exp_res_id = exp_res.json['experiment_relation'][0]['experiment']['@id'] - assert exp_res_id == base_experiment['@id'] - - -def test_experiment_update_hic_sop_mapping_added_on_submit(testapp, experiment_data, sop_map_data): - res_sop = testapp.post_json('/sop_map', sop_map_data, status=201) - res_exp = testapp.post_json('/experiment_hi_c', experiment_data) - assert 'sop_mapping' in res_exp.json['@graph'][0] - assert res_exp.json['@graph'][0]['sop_mapping']['has_sop'] == "Yes" - assert res_exp.json['@graph'][0]['sop_mapping']['sop_map'] == res_sop.json['@graph'][0]['@id'] - - -def test_experiment_update_hic_sop_mapping_has_map_is_no(testapp, experiment_data, exp_types): - experiment_data['experiment_type'] = exp_types['dnase']['@id'] - res_exp = testapp.post_json('/experiment_hi_c', experiment_data) - assert 'sop_mapping' in res_exp.json['@graph'][0] - assert res_exp.json['@graph'][0]['sop_mapping']['has_sop'] == "No" - - -def test_experiment_update_hic_sop_mapping_has_sop2no_when_only_sopmap_deleted( - testapp, experiment_data, sop_map_data): - sop_map_data['status'] = 'deleted' - testapp.post_json('/sop_map', sop_map_data, status=201) - res_exp = testapp.post_json('/experiment_hi_c', experiment_data) - assert 'sop_mapping' in res_exp.json['@graph'][0] - assert res_exp.json['@graph'][0]['sop_mapping']['has_sop'] == "No" - - -def test_experiment_update_hic_sop_mapping_to_v2_when_2_versions( - testapp, experiment_data, sop_map_data, sop_map_data_2): - testapp.post_json('/sop_map', sop_map_data, status=201) - res2chk = testapp.post_json('/sop_map', sop_map_data_2, status=201) - res_exp = testapp.post_json('/experiment_hi_c', experiment_data) - assert 'sop_mapping' in res_exp.json['@graph'][0] - assert res_exp.json['@graph'][0]['sop_mapping']['has_sop'] == "Yes" - assert res_exp.json['@graph'][0]['sop_mapping']['sop_map'] == res2chk.json['@graph'][0]['@id'] - - -def test_experiment_update_hic_sop_mapping_to_v1_when_v2_deleted( - testapp, experiment_data, sop_map_data, sop_map_data_2): - res2chk = testapp.post_json('/sop_map', sop_map_data, status=201) - sop_map_data_2['status'] = 'deleted' - testapp.post_json('/sop_map', sop_map_data_2, status=201) - res_exp = testapp.post_json('/experiment_hi_c', experiment_data) - assert 'sop_mapping' in res_exp.json['@graph'][0] - assert res_exp.json['@graph'][0]['sop_mapping']['has_sop'] == "Yes" - assert res_exp.json['@graph'][0]['sop_mapping']['sop_map'] == res2chk.json['@graph'][0]['@id'] - - -def test_experiment_update_hic_sop_map_not_added_when_already_present(testapp, experiment_data): - experiment_data['sop_mapping'] = {} - experiment_data['sop_mapping']['has_sop'] = 'No' - res = testapp.post_json('/experiment_hi_c', experiment_data) - assert 'sop_mapping' in res.json['@graph'][0] - assert res.json['@graph'][0]['sop_mapping']['has_sop'] == "No" - assert 'sop_map' not in res.json['@graph'][0]['sop_mapping'] - - -def test_calculated_experiment_summary(testapp, experiment, mboI): - summary = 'in situ Hi-C on GM12878 with MboI' - res = testapp.patch_json(experiment['@id'], {'digestion_enzyme': mboI['@id']}, status=200) - assert res.json['@graph'][0]['experiment_summary'] == summary - assert summary in res.json['@graph'][0]['display_title'] - - -def test_experiment_summary_repliseq(repliseq_4): - assert repliseq_4.get('experiment_summary') == '2-stage Repli-seq on GM12878 S-phase early' - - -# test for experiment_set_replicate _update function -def test_experiment_set_replicate_update_adds_experiments_in_set(testapp, experiment, replicate_experiment_set): - assert not replicate_experiment_set['experiments_in_set'] - res = testapp.patch_json( - replicate_experiment_set['@id'], - {'replicate_exps': - [{'replicate_exp': experiment['@id'], 'bio_rep_no': 1, 'tec_rep_no': 1}]}, - status=200) - assert experiment['@id'] in res.json['@graph'][0]['experiments_in_set'] - - -# test for default_embedding practice with embedded list -# this test should change should any of the reference embeds below be altered -def test_experiment_set_default_embedded_list(registry, exp_types): - exp_data = { - 'experiment_type': exp_types['microc']['uuid'], - 'status': 'in review by lab' - } - # create experimentHiC obj; _update (and by extension, add_default_embeds) - # are called automatically - test_exp = ExperimentHiC.create(registry, None, exp_data) - # call reify embedded property (defined in snovault/resources.py) - embedded = test_exp.embedded - embedded_list = test_exp.embedded_list - type_info_embedded = registry[TYPES]['experiment_hi_c'].embedded_list - assert type_info_embedded == embedded_list - if 'produced_in_pub.*' in embedded_list: - assert 'produced_in_pub.*' in embedded - assert 'produced_in_pub.award.@id' in embedded - assert 'produced_in_pub.award.@type' in embedded - assert 'produced_in_pub.award.principals_allowed.*' in embedded - assert 'produced_in_pub.award.display_title' in embedded - assert 'produced_in_pub.award.uuid' in embedded - assert 'experiment_sets.accession' in embedded_list - assert 'experiment_sets.@id' in embedded - assert 'experiment_sets.@type' in embedded - assert 'experiment_sets.principals_allowed.*' in embedded - assert 'experiment_sets.display_title' in embedded - assert 'experiment_sets.uuid' in embedded - - -# tests for the experiment_sets calculated properties -def test_calculated_experiment_sets_for_custom_experiment_set(testapp, experiment, custom_experiment_set): - assert len(experiment['experiment_sets']) == 0 - res = testapp.patch_json(custom_experiment_set['@id'], {'experiments_in_set': [experiment['@id']]}, status=200) - expt_res = testapp.get(experiment['@id']) - assert custom_experiment_set['uuid'] == expt_res.json['experiment_sets'][0]['uuid'] - - -def test_calculated_experiment_sets_for_replicate_experiment_set(testapp, experiment, replicate_experiment_set): - assert len(experiment['experiment_sets']) == 0 - res = testapp.patch_json( - replicate_experiment_set['@id'], - {'replicate_exps': - [{'replicate_exp': experiment['@id'], 'bio_rep_no': 1, 'tec_rep_no': 1}]}, - status=200) - expt_res = testapp.get(experiment['@id']) - assert replicate_experiment_set['uuid'] == expt_res.json['experiment_sets'][0]['uuid'] - - -@pytest.fixture -def pub1_data(lab, award): - # encode paper published 2012-09-06 - return { - 'award': award['@id'], - 'lab': lab['@id'], - 'ID': "PMID:22955616" - } - - -@pytest.fixture -def pub2_data(lab, award): - # Sanborn et al paper published 2015-11-24 - return { - 'award': award['@id'], - 'lab': lab['@id'], - 'ID': "PMID:26499245" - } - - -def test_calculated_produced_in_pub_for_rep_experiment_set(testapp, replicate_experiment_set, pub1_data): - # post single rep_exp_set to single pub - pub1_data['exp_sets_prod_in_pub'] = [replicate_experiment_set['@id']] - pub1res = testapp.post_json('/publication', pub1_data, status=201) - expsetres = testapp.get(replicate_experiment_set['@id']) - assert 'produced_in_pub' in expsetres - assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' in expsetres.json['produced_in_pub'].values() - - -def test_calculated_produced_in_pub_for_cust_experiment_set(testapp, custom_experiment_set, pub1_data): - # post single cust_exp_set to single pub - pub1_data['exp_sets_prod_in_pub'] = [custom_experiment_set['@id']] - pub1res = testapp.post_json('/publication', pub1_data, status=201) - expsetres = testapp.get(custom_experiment_set['@id']) - assert 'produced_in_pub' in expsetres - assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' in expsetres.json['produced_in_pub'].values() - - -def test_calculated_produced_in_pub_for_two_experiment_set_to_one_pub( - testapp, replicate_experiment_set, custom_experiment_set, pub1_data): - # post two exp_set to single pub - pub1_data['exp_sets_prod_in_pub'] = [replicate_experiment_set['@id'], custom_experiment_set['@id']] - pub1res = testapp.post_json('/publication', pub1_data, status=201) - responses = [testapp.get(replicate_experiment_set['@id']), - testapp.get(custom_experiment_set['@id'])] - for response in responses: - assert 'produced_in_pub' in response - assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' == response.json['produced_in_pub']['@id'] - - -def test_calculated_produced_in_pub_for_two_experiment_set_two_pubs( - testapp, replicate_experiment_set, custom_experiment_set, pub1_data, pub2_data): - # post different exp_set to each pub - pub1_data['exp_sets_prod_in_pub'] = [replicate_experiment_set['@id']] - pub2_data['exp_sets_prod_in_pub'] = [custom_experiment_set['@id']] - pub1res = testapp.post_json('/publication', pub1_data, status=201) - pub2res = testapp.post_json('/publication', pub2_data, status=201) - responses = [testapp.get(replicate_experiment_set['@id']), - testapp.get(custom_experiment_set['@id'])] - for response in responses: - assert 'produced_in_pub' in response - assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' == responses[0].json['produced_in_pub']['@id'] - assert '/publications/' + pub2res.json['@graph'][0]['uuid'] + '/' == responses[1].json['produced_in_pub']['@id'] - - -def test_calculated_produced_in_pub_for_one_experiment_set_two_pubs( - testapp, replicate_experiment_set, pub1_data, pub2_data): - # post one exp_set to two pubs - this one should pick up only the most recent pub - pub1_data['exp_sets_prod_in_pub'] = [replicate_experiment_set['@id']] - pub2_data['exp_sets_prod_in_pub'] = [replicate_experiment_set['@id']] - pub1res = testapp.post_json('/publication', pub1_data, status=201) - pub2res = testapp.post_json('/publication', pub2_data, status=201) - response = testapp.get(replicate_experiment_set['@id']) - assert 'produced_in_pub' in response - assert not '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' == response.json['produced_in_pub']['@id'] - assert '/publications/' + pub2res.json['@graph'][0]['uuid'] + '/' == response.json['produced_in_pub']['@id'] - - -def test_calculated_publications_in_experiment_set_no_data( - testapp, replicate_experiment_set, custom_experiment_set, pub1_data): - pub1res = testapp.post_json('/publication', pub1_data, status=201) - print(replicate_experiment_set) - print(custom_experiment_set) - assert not replicate_experiment_set['publications_of_set'] - assert not custom_experiment_set['publications_of_set'] - - -def test_calculated_publications_in_rep_experiment_set_2_fields( - testapp, replicate_experiment_set, pub1_data): - # post single rep_exp_set to single pub both fields - pub1_data['exp_sets_prod_in_pub'] = [replicate_experiment_set['@id']] - pub1_data['exp_sets_used_in_pub'] = [replicate_experiment_set['@id']] - pub1res = testapp.post_json('/publication', pub1_data, status=201) - response = testapp.get(replicate_experiment_set['@id']) - print(response) - print('JSON:', response.json) - assert 'publications_of_set' in response - assert len(response.json['publications_of_set']) == 1 - assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' in response.json['publications_of_set'][0].values() - - -def test_calculated_publications_in_cust_experiment_set_used_in_field( - testapp, custom_experiment_set, pub1_data): - # post only used in publication one pub one exp set - pub1_data['exp_sets_used_in_pub'] = [custom_experiment_set['@id']] - pub1res = testapp.post_json('/publication', pub1_data, status=201) - response = testapp.get(custom_experiment_set['@id']) - assert 'publications_of_set' in response - assert len(response.json['publications_of_set']) == 1 - assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' in response.json['publications_of_set'][0].values() - - -def test_calculated_publications_in_rep_experiment_set_two_pubs_both_fields( - testapp, replicate_experiment_set, pub1_data, pub2_data): - # post same experiment set to two pubs in either field - pub1_data['exp_sets_prod_in_pub'] = [replicate_experiment_set['@id']] - pub2_data['exp_sets_used_in_pub'] = [replicate_experiment_set['@id']] - pub1res = testapp.post_json('/publication', pub1_data, status=201) - pub2res = testapp.post_json('/publication', pub2_data, status=201) - response = testapp.get(replicate_experiment_set['@id']) - assert 'publications_of_set' in response - assert len(response.json['publications_of_set']) == 2 - publications = response.json['publications_of_set'] - combined_pub_vals = [p['@id'] for p in publications] - assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' in combined_pub_vals - assert '/publications/' + pub2res.json['@graph'][0]['uuid'] + '/' in combined_pub_vals - - -def test_calculated_publications_in_rep_experiment_set_two_pubs_in_used( - testapp, replicate_experiment_set, pub1_data, pub2_data): - # post same experiment set to two pubs in used in pub field - pub1_data['exp_sets_used_in_pub'] = [replicate_experiment_set['@id']] - pub2_data['exp_sets_used_in_pub'] = [replicate_experiment_set['@id']] - pub1res = testapp.post_json('/publication', pub1_data, status=201) - pub2res = testapp.post_json('/publication', pub2_data, status=201) - response = testapp.get(replicate_experiment_set['@id']) - assert 'publications_of_set' in response - assert len(response.json['publications_of_set']) == 2 - publications = response.json['publications_of_set'] - combined_pub_vals = list(publications[0].values()) + list(publications[1].values()) - assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' in combined_pub_vals - assert '/publications/' + pub2res.json['@graph'][0]['uuid'] + '/' in combined_pub_vals - - -# experiment pub calculated properties tests - - -@pytest.fixture -def repset_w_exp1(testapp, replicate_experiment_set_data, experiment): - repset = replicate_experiment_set_data - repset['replicate_exps'] = [{'replicate_exp': experiment['@id'], 'bio_rep_no': 1, 'tec_rep_no': 1}] - return testapp.post_json('/experiment_set_replicate', repset).json['@graph'][0] - - -@pytest.fixture -def experiment2(testapp, experiment_data, exp_types): - experiment_data['experiment_type'] = exp_types['capc']['@id'] - return testapp.post_json('/experiment_capture_c', experiment_data).json['@graph'][0] - - -@pytest.fixture -def custset_w_exp1(testapp, custom_experiment_set_data, experiment): - custset = custom_experiment_set_data - custset['experiments_in_set'] = [experiment['@id']] - return testapp.post_json('/experiment_set', custset).json['@graph'][0] - - -@pytest.fixture -def custset_w_exp2(testapp, custom_experiment_set_data, experiment2): - custset = custom_experiment_set_data - custset['experiments_in_set'] = [experiment2['@id']] - return testapp.post_json('/experiment_set', custset).json['@graph'][0] - - -def test_calculated_expt_produced_in_pub_for_rep_experiment_set( - testapp, repset_w_exp1, pub1_data): - # post single rep_exp_set to single pub - pub1_data['exp_sets_prod_in_pub'] = [repset_w_exp1['@id']] - pub1res = testapp.post_json('/publication', pub1_data, status=201) - expres = testapp.get(repset_w_exp1['replicate_exps'][0]['replicate_exp']) - # import pdb; pdb.set_trace() - assert 'produced_in_pub' in expres - assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' == expres.json['produced_in_pub']['@id'] - - -def test_calculated_expt_produced_in_pub_for_expt_w_ref( - testapp, experiment_data, replicate_experiment_set_data, pub2_data, publication): - experiment_data['references'] = [publication['@id']] - # just check experiment by itself first - expt = testapp.post_json('/experiment_hi_c', experiment_data, status=201).json['@graph'][0] - assert 'produced_in_pub' in expt - assert publication['@id'] == expt['produced_in_pub'] - # post repset with this experiment - replicate_experiment_set_data['replicate_exps'] = [{'bio_rep_no': 1, 'tec_rep_no': 1, 'replicate_exp': expt['@id']}] - repset = testapp.post_json('/experiment_set_replicate', replicate_experiment_set_data, status=201).json['@graph'][0] - # post single rep_exp_set to single pub - pub2_data['exp_sets_prod_in_pub'] = [repset['@id']] - testapp.post_json('/publication', pub2_data, status=201) - expinset = testapp.get(repset['replicate_exps'][0]['replicate_exp']).json - assert 'produced_in_pub' in expinset - assert publication['@id'] == expinset['produced_in_pub']['@id'] - - -def test_calculated_expt_produced_in_pub_for_cust_experiment_set( - testapp, custset_w_exp1, pub1_data): - # post single cust_exp_set to single pub - pub1_data['exp_sets_prod_in_pub'] = [custset_w_exp1['@id']] - pub1res = testapp.post_json('/publication', pub1_data, status=201) - expres = testapp.get(custset_w_exp1['experiments_in_set'][0]) - assert 'produced_in_pub' not in expres.json.keys() - - -def test_calculated_expt_produced_in_pub_for_one_expt_in_two_expset_one_pub( - testapp, repset_w_exp1, custset_w_exp1, pub1_data): - # post two exp_set with same experiment (repset and custset) to single pub - pub1_data['exp_sets_prod_in_pub'] = [repset_w_exp1['@id'], custset_w_exp1['@id']] - pub1res = testapp.post_json('/publication', pub1_data, status=201) - # both responses will get the same experiment - responses = [testapp.get(repset_w_exp1['replicate_exps'][0]['replicate_exp']), - testapp.get(custset_w_exp1['experiments_in_set'][0])] - for response in responses: - assert 'produced_in_pub' in response - assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' == response.json['produced_in_pub']['@id'] - - -def test_calculated_expt_produced_in_pub_for_two_exp_two_expset_two_pubs( - testapp, repset_w_exp1, custset_w_exp2, pub1_data, pub2_data): - # post 2 exp_set (one repset, one custom) each with diff expt to each pub - # only expt in repset should get the pub of repset - pub1_data['exp_sets_prod_in_pub'] = [repset_w_exp1['@id']] - pub2_data['exp_sets_prod_in_pub'] = [custset_w_exp2['@id']] - pub1res = testapp.post_json('/publication', pub1_data, status=201) - testapp.post_json('/publication', pub2_data, status=201) - responses = [testapp.get(repset_w_exp1['replicate_exps'][0]['replicate_exp']), - testapp.get(custset_w_exp2['experiments_in_set'][0])] - assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' == responses[0].json['produced_in_pub']['@id'] - assert 'produced_in_pub' not in responses[1].json - - -def test_calculated_expt_produced_in_pub_for_one_expt_one_expset_two_pubs( - testapp, repset_w_exp1, pub1_data, pub2_data): - # post one exp_set to two pubs - this one should pick up only the most recent pub - pub1_data['exp_sets_prod_in_pub'] = [repset_w_exp1['@id']] - pub2_data['exp_sets_prod_in_pub'] = [repset_w_exp1['@id']] - pub1res = testapp.post_json('/publication', pub1_data, status=201) - pub2res = testapp.post_json('/publication', pub2_data, status=201) - response = testapp.get(repset_w_exp1['replicate_exps'][0]['replicate_exp']) - assert 'produced_in_pub' in response - assert not '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' == response.json['produced_in_pub']['@id'] - assert '/publications/' + pub2res.json['@graph'][0]['uuid'] + '/' == response.json['produced_in_pub']['@id'] - - -def test_calculated_publications_in_experiment_no_data( - testapp, repset_w_exp1, custset_w_exp2, pub1_data): - pub1res = testapp.post_json('/publication', pub1_data, status=201) - responses = [testapp.get(repset_w_exp1['replicate_exps'][0]['replicate_exp']), - testapp.get(custset_w_exp2['experiments_in_set'][0])] - for response in responses: - assert response.json['publications_of_exp'] == [] - - -def test_calculated_publications_in_expt_w_repset_in_both_fields( - testapp, repset_w_exp1, pub1_data): - # post single rep_exp_set to single pub both fields - pub1_data['exp_sets_prod_in_pub'] = [repset_w_exp1['@id']] - pub1_data['exp_sets_used_in_pub'] = [repset_w_exp1['@id']] - pub1res = testapp.post_json('/publication', pub1_data, status=201) - response = testapp.get(repset_w_exp1['replicate_exps'][0]['replicate_exp']) - assert 'publications_of_exp' in response - assert len(response.json['publications_of_exp']) == 1 - assert pub1res.json['@graph'][0]['uuid'] == response.json['publications_of_exp'][0]['uuid'] - - -def test_calculated_publications_in_expt_w_custset_used_in_field( - testapp, custset_w_exp2, pub1_data): - # post only used in publication one pub one exp set - pub1_data['exp_sets_used_in_pub'] = [custset_w_exp2['@id']] - pub1res = testapp.post_json('/publication', pub1_data, status=201) - response = testapp.get(custset_w_exp2['experiments_in_set'][0]) - assert 'publications_of_exp' in response - assert len(response.json['publications_of_exp']) == 1 - assert pub1res.json['@graph'][0]['uuid'] == response.json['publications_of_exp'][0]['uuid'] - - -def test_calculated_publications_in_expt_w_repset_two_pubs_both_fields( - testapp, repset_w_exp1, pub1_data, pub2_data): - # post same experiment set to two pubs in either field - pub1_data['exp_sets_prod_in_pub'] = [repset_w_exp1['@id']] - pub2_data['exp_sets_used_in_pub'] = [repset_w_exp1['@id']] - pub1res = testapp.post_json('/publication', pub1_data, status=201) - pub2res = testapp.post_json('/publication', pub2_data, status=201) - pubuuids = [pub1res.json['@graph'][0]['uuid']] - pubuuids.append(pub2res.json['@graph'][0]['uuid']) - response = testapp.get(repset_w_exp1['replicate_exps'][0]['replicate_exp']) - assert 'publications_of_exp' in response - assert len(response.json['publications_of_exp']) == 2 - publications = response.json['publications_of_exp'] - for pub in publications: - assert pub['uuid'] in pubuuids - - -def test_calculated_publications_in_expt_w_repset_two_pubs_in_used( - testapp, repset_w_exp1, pub1_data, pub2_data): - # post same experiment set to two pubs in used in pub field - pub1_data['exp_sets_used_in_pub'] = [repset_w_exp1['@id']] - pub2_data['exp_sets_used_in_pub'] = [repset_w_exp1['@id']] - pub1res = testapp.post_json('/publication', pub1_data, status=201) - pub2res = testapp.post_json('/publication', pub2_data, status=201) - pubuuids = [pub1res.json['@graph'][0]['uuid']] - pubuuids.append(pub2res.json['@graph'][0]['uuid']) - response = testapp.get(repset_w_exp1['replicate_exps'][0]['replicate_exp']) - assert 'publications_of_exp' in response - assert len(response.json['publications_of_exp']) == 2 - publications = response.json['publications_of_exp'] - for pub in publications: - assert pub['uuid'] in pubuuids - - -def test_calculated_no_of_expts_in_set_w_no_exps(empty_replicate_set): - assert 'number_of_experiments' not in empty_replicate_set - - -def test_calculated_no_of_expts_in_set_w_2_exps(two_experiment_replicate_set): - assert two_experiment_replicate_set['number_of_experiments'] == 2 - - -# tests for category calculated_property -@pytest.fixture -def target_w_prot(testapp, lab, award): - item = { - 'description': "Protein target", - 'targeted_proteins': ['CTCF (ABCD)'], - 'award': award['@id'], - 'lab': lab['@id'], - } - return testapp.post_json('/target', item).json['@graph'][0] - - -@pytest.fixture -def exp_w_target_info(lab, award, human_biosample, exp_types, - mboI, genomic_region_bio_feature): - return { - 'lab': lab['@id'], - 'award': award['@id'], - 'biosample': human_biosample['@id'], - 'experiment_type': exp_types['capc']['@id'], - 'targeted_regions': [{'target': [genomic_region_bio_feature['@id']]}] - } - - -@pytest.fixture -def expt_w_targ_region(testapp, exp_w_target_info): - return testapp.post_json('/experiment_capture_c', exp_w_target_info).json['@graph'][0] - - -@pytest.fixture -def expt_w_2_targ_regions(testapp, exp_w_target_info, gene_bio_feature): - region = {'target': [gene_bio_feature['@id']]} - exp_w_target_info['targeted_regions'].append(region) - return testapp.post_json('/experiment_capture_c', exp_w_target_info).json['@graph'][0] - - -@pytest.fixture -def expt_w_target_data(lab, award, human_biosample, - prot_bio_feature, exp_types): - return { - 'lab': lab['@id'], - 'award': award['@id'], - 'biosample': human_biosample['@id'], - 'experiment_type': exp_types['chia']['@id'], - 'targeted_factor': [prot_bio_feature['@id']] - } - - -@pytest.fixture -def expt_w_target(testapp, expt_w_target_data): - return testapp.post_json('/experiment_chiapet', expt_w_target_data).json['@graph'][0] - - -@pytest.fixture -def chipseq_expt(testapp, expt_w_target_data, exp_types): - expt_w_target_data['experiment_type'] = exp_types['chipseq']['@id'] - return testapp.post_json('/experiment_seq', expt_w_target_data).json['@graph'][0] - - -@pytest.fixture -def tsaseq_expt(testapp, expt_w_target_data, exp_types): - expt_w_target_data['experiment_type'] = exp_types['tsaseq']['@id'] - return testapp.post_json('/experiment_tsaseq', expt_w_target_data).json['@graph'][0] - - -@pytest.fixture -def repliseq_info(lab, award, human_biosample, exp_types): - return { - 'lab': lab['@id'], - 'award': award['@id'], - 'biosample': human_biosample['@id'], - 'experiment_type': exp_types['repliseq']['@id'], - } - - -@pytest.fixture -def repliseq_1(testapp, repliseq_info): - return testapp.post_json('/experiment_repliseq', repliseq_info).json['@graph'][0] - - -@pytest.fixture -def repliseq_2(testapp, repliseq_info): - repliseq_info['stage_fraction'] = 'early' - return testapp.post_json('/experiment_repliseq', repliseq_info).json['@graph'][0] - - -@pytest.fixture -def repliseq_3(testapp, repliseq_info): - repliseq_info['stage_fraction'] = 'early' - repliseq_info['total_fractions_in_exp'] = 16 - return testapp.post_json('/experiment_repliseq', repliseq_info).json['@graph'][0] - - -@pytest.fixture -def repliseq_4(testapp, repliseq_info): - repliseq_info['stage_fraction'] = 'early' - repliseq_info['total_fractions_in_exp'] = 2 - repliseq_info['cell_cycle_phase'] = 'S' - return testapp.post_json('/experiment_repliseq', repliseq_info).json['@graph'][0] - - -@pytest.fixture -def experiment_atacseq(testapp, repliseq_info, exp_types): - repliseq_info['experiment_type'] = exp_types['atacseq']['@id'] - return testapp.post_json('/experiment_atacseq', repliseq_info).json['@graph'][0] - - -@pytest.fixture -def damid_no_fusion(testapp, repliseq_info, exp_types): - repliseq_info['experiment_type'] = exp_types['dam']['@id'] - return testapp.post_json('/experiment_damid', repliseq_info).json['@graph'][0] - - -@pytest.fixture -def damid_w_fusion(testapp, repliseq_info, prot_bio_feature, exp_types): - repliseq_info['experiment_type'] = exp_types['dam']['@id'] - repliseq_info['targeted_factor'] = [prot_bio_feature['@id']] - return testapp.post_json('/experiment_damid', repliseq_info).json['@graph'][0] - - -@pytest.fixture -def damid_w_multifusion(testapp, repliseq_info, prot_bio_feature, gene_bio_feature, exp_types): - repliseq_info['experiment_type'] = exp_types['dam']['@id'] - repliseq_info['targeted_factor'] = [prot_bio_feature['@id'], gene_bio_feature['@id']] - return testapp.post_json('/experiment_damid', repliseq_info).json['@graph'][0] - - -@pytest.fixture -def basic_info(lab, award): - return { - 'lab': lab['@id'], - 'award': award['@id'], - } - - -@pytest.fixture -def list_of_region_biofeatures(testapp, basic_info, region_term): - item = {'description': 'Test Region Biofeature', - 'feature_type': region_term['@id']} - item.update(basic_info) - feats = [] - for i in range(10): - item['preferred_label'] = f'genomic region {i + 1}' - feats.append(testapp.post_json('/bio_feature', item).json['@graph'][0]) - return feats - -@pytest.fixture -def list_of_3_reg_biofeatures(list_of_region_biofeatures): - return list_of_region_biofeatures[:3] - - -@pytest.fixture -def imaging_path_1(testapp, basic_info, genomic_region_bio_feature): - basic_info['target'] = [genomic_region_bio_feature['@id']] - basic_info['labeled_probe'] = 'FITC goat anti rabbit' - return testapp.post_json('/imaging_path', basic_info).json['@graph'][0] - - -@pytest.fixture -def imaging_path_2(testapp, basic_info, genomic_region_bio_feature): - basic_info['target'] = [genomic_region_bio_feature['@id']] - basic_info['labeled_probe'] = 'TRITC horse anti rabbit' - return testapp.post_json('/imaging_path', basic_info).json['@graph'][0] - - -@pytest.fixture -def imaging_path_3(testapp, basic_info, basic_region_bio_feature): - basic_info['target'] = [basic_region_bio_feature['@id']] - basic_info['labeled_probe'] = 'DAPI' - return testapp.post_json('/imaging_path', basic_info).json['@graph'][0] - - -@pytest.fixture -def imaging_path_4(testapp, basic_info, list_of_region_biofeatures): - basic_info['target'] = [bf.get('@id') for bf in list_of_region_biofeatures] - basic_info['labeled_probe'] = 'DAPI' - return testapp.post_json('/imaging_path', basic_info).json['@graph'][0] - - -@pytest.fixture -def imaging_path_5(testapp, basic_info, list_of_3_reg_biofeatures): - basic_info['target'] = [bf.get('@id') for bf in list_of_3_reg_biofeatures] - basic_info['labeled_probe'] = 'DAPI' - return testapp.post_json('/imaging_path', basic_info).json['@graph'][0] - - -@pytest.fixture -def imaging_path_6(testapp, basic_info, prot_bio_feature): - basic_info['target'] = [prot_bio_feature['@id']] - basic_info['labeled_probe'] = 'DAPI' - return testapp.post_json('/imaging_path', basic_info).json['@graph'][0] - - -@pytest.fixture -def microscopy_no_path(testapp, repliseq_info, exp_types): - repliseq_info['experiment_type'] = exp_types['fish']['@id'] - return testapp.post_json('/experiment_mic', repliseq_info).json['@graph'][0] - - -@pytest.fixture -def microscopy_w_path_w_many_targets(testapp, repliseq_info, imaging_path_4, exp_types): - repliseq_info['experiment_type'] = exp_types['fish']['@id'] - img_path = {'path': imaging_path_4['@id'], 'channel': 'ch01'} - repliseq_info['imaging_paths'] = [img_path] - return testapp.post_json('/experiment_mic', repliseq_info).json['@graph'][0] - - -@pytest.fixture -def microscopy_w_path_w_many_targets_and_split_path(testapp, repliseq_info, imaging_path_4, - imaging_path_2, imaging_path_6, exp_types): - repliseq_info['experiment_type'] = exp_types['fish']['@id'] - img_path1 = {'path': imaging_path_4['@id'], 'channel': 'ch01'} - img_path2 = {'path': imaging_path_2['@id'], 'channel': 'ch02'} - img_path3 = {'path': imaging_path_6['@id'], 'channel': 'ch03'} - repliseq_info['imaging_paths'] = [img_path1, img_path2, img_path3] - return testapp.post_json('/experiment_mic', repliseq_info).json['@graph'][0] - -@pytest.fixture -def microscopy_w_path_w_few_targets_and_split_path(testapp, repliseq_info, imaging_path_5, - imaging_path_6, exp_types): - repliseq_info['experiment_type'] = exp_types['fish']['@id'] - img_path1 = {'path': imaging_path_5['@id'], 'channel': 'ch01'} - img_path2 = {'path': imaging_path_6['@id'], 'channel': 'ch02'} - repliseq_info['imaging_paths'] = [img_path1, img_path2] - return testapp.post_json('/experiment_mic', repliseq_info).json['@graph'][0] - - -@pytest.fixture -def microscopy_w_path(testapp, repliseq_info, imaging_path_1, exp_types): - repliseq_info['experiment_type'] = exp_types['fish']['@id'] - img_path = {'path': imaging_path_1['@id'], 'channel': 'ch01'} - repliseq_info['imaging_paths'] = [img_path] - return testapp.post_json('/experiment_mic', repliseq_info).json['@graph'][0] - - -@pytest.fixture -def microscopy_w_multipath(testapp, repliseq_info, imaging_path_1, imaging_path_2, - imaging_path_3, exp_types): - repliseq_info['experiment_type'] = exp_types['fish']['@id'] - img_path1 = {'path': imaging_path_1['@id'], 'channel': 'ch01'} - img_path2 = {'path': imaging_path_2['@id'], 'channel': 'ch02'} - img_path3 = {'path': imaging_path_3['@id'], 'channel': 'ch03'} - repliseq_info['imaging_paths'] = [img_path1, img_path2, img_path3] - return testapp.post_json('/experiment_mic', repliseq_info).json['@graph'][0] - - -@pytest.fixture -def microscopy_w_splitpath(testapp, repliseq_info, exp_types, - imaging_path_1, imaging_path_3, - basic_region_bio_feature, genomic_region_bio_feature): - '''Sometimes a (group of) target(s) is split into different imaging paths, - e.g. due to multiplexing. If text is formatted as follows, the split group - will be found and replaced with the sum''' - repliseq_info['experiment_type'] = exp_types['fish']['@id'] - img_path1 = {'path': imaging_path_1['@id'], 'channel': 'ch01'} - img_path3 = {'path': imaging_path_3['@id'], 'channel': 'ch03'} - repliseq_info['imaging_paths'] = [img_path1, img_path3] - testapp.patch_json(basic_region_bio_feature['@id'], - {'preferred_label': '15 TADs on chr19'}).json['@graph'][0] - testapp.patch_json(genomic_region_bio_feature['@id'], - {'preferred_label': '22 TADs on chr19'}).json['@graph'][0] - return testapp.post_json('/experiment_mic', repliseq_info).json['@graph'][0] - - -def test_experiment_atacseq_display_title(experiment_atacseq): - assert experiment_atacseq.get('display_title') == 'ATAC-seq on GM12878 - ' + experiment_atacseq.get('accession') - - -def test_experiment_damid_w_multifusion_display_title(damid_w_multifusion): - assert damid_w_multifusion.get('display_title') == 'DamID-seq with mulitiple DAM fusions on GM12878 - ' + damid_w_multifusion.get('accession') - - -def test_experiment_chiapet_w_target_display_title(expt_w_target): - assert expt_w_target.get('display_title') == 'ChIA-PET against RAD21 protein on GM12878 - ' + expt_w_target.get('accession') - - -def test_experiment_chipseq_w_target_display_title(chipseq_expt): - assert chipseq_expt.get('display_title') == 'ChIP-seq against RAD21 protein on GM12878 - ' + chipseq_expt.get('accession') - - -def test_experiment_tsaseq_display_title(tsaseq_expt): - assert tsaseq_expt.get('display_title') == 'TSA-seq against RAD21 protein on GM12878 - ' + tsaseq_expt.get('accession') - - -def test_experiment_categorizer_4_mic_no_path(testapp, microscopy_no_path): - assert microscopy_no_path['experiment_categorizer']['field'] == 'Default' - assert microscopy_no_path['experiment_categorizer'].get('value') is None - - -def test_experiment_categorizer_4_mic_w_path(testapp, microscopy_w_path, genomic_region_bio_feature): - assert microscopy_w_path['experiment_categorizer']['field'] == 'Target' - assert microscopy_w_path['experiment_categorizer']['value'] == genomic_region_bio_feature['display_title'] - - -def test_experiment_categorizer_4_mic_w_multi_path(testapp, microscopy_w_multipath, genomic_region_bio_feature, basic_region_bio_feature): - vals2chk = [genomic_region_bio_feature['display_title'], basic_region_bio_feature['display_title']] - len2chk = len(vals2chk[0]) + len(vals2chk[1]) + 2 - assert microscopy_w_multipath['experiment_categorizer']['field'] == 'Target' - value = microscopy_w_multipath['experiment_categorizer']['value'] - assert len(value) == len2chk - for v in vals2chk: - assert v in value - - -def test_experiment_categorizer_4_mic_w_path_w_many_targets(testapp, microscopy_w_path_w_many_targets): - assert microscopy_w_path_w_many_targets['experiment_categorizer']['field'] == 'Target' - assert microscopy_w_path_w_many_targets['experiment_categorizer']['value'] == '10 genomic regions' - - -def test_experiment_categorizer_4_mic_w_path_w_many_targets_and_split_path(testapp, microscopy_w_path_w_many_targets_and_split_path, - prot_bio_feature): - assert microscopy_w_path_w_many_targets_and_split_path['experiment_categorizer']['field'] == 'Target' - value = microscopy_w_path_w_many_targets_and_split_path['experiment_categorizer']['value'] - assert '11 genomic regions' in value - assert prot_bio_feature.get('display_title') in value - - -def test_experiment_categorizer_4_mic_w_paths_w_fewer_targets(testapp, microscopy_w_path_w_few_targets_and_split_path, - list_of_3_reg_biofeatures, prot_bio_feature): - # import pdb; pdb.set_trace() - assert microscopy_w_path_w_few_targets_and_split_path['experiment_categorizer']['field'] == 'Target' - value = microscopy_w_path_w_few_targets_and_split_path['experiment_categorizer']['value'] - for bf in list_of_3_reg_biofeatures: - assert bf.get('display_title') in value - assert prot_bio_feature.get('display_title') in value - - - -def test_experiment_categorizer_4_mic_w_split_path(testapp, microscopy_w_splitpath): - '''Sometimes a (group of) target(s) is split into different imaging paths, - e.g. due to multiplexing. Sum the split targets and return only one string.''' - assert microscopy_w_splitpath['experiment_categorizer']['value'] == '37 TADs on chr19' - - -def test_experiment_categorizer_4_chiapet_no_fusion(testapp, repliseq_info, exp_types): - repliseq_info['experiment_type'] = exp_types['chia']['@id'] - res = testapp.post_json('/experiment_chiapet', repliseq_info).json['@graph'][0] - assert res['experiment_categorizer']['field'] == 'Default' - assert res['experiment_categorizer']['value'] is None - - -def test_experiment_categorizer_4_damid_no_fusion(testapp, damid_no_fusion): - assert damid_no_fusion['experiment_categorizer']['field'] == 'Target' - assert damid_no_fusion['experiment_categorizer'].get('value') == 'None (Control)' - - -def test_experiment_categorizer_4_damid_w_fusion(testapp, damid_w_fusion, prot_bio_feature): - assert damid_w_fusion['experiment_categorizer']['field'] == 'Target' - assert damid_w_fusion['experiment_categorizer']['value'] == prot_bio_feature['display_title'] - - -def test_experiment_categorizer_4_repliseq_no_fraction_info(testapp, repliseq_1): - assert repliseq_1['experiment_categorizer']['field'] == 'Default' - assert repliseq_1['experiment_categorizer'].get('value') is None - - -def test_experiment_categorizer_4_repliseq_only_fraction(testapp, repliseq_2): - wanted = 'early of an unspecified number of fractions' - assert repliseq_2['experiment_categorizer']['field'] == 'Fraction' - assert repliseq_2['experiment_categorizer']['value'] == wanted - - -def test_experiment_categorizer_4_repliseq_fraction_and_total(testapp, repliseq_3): - wanted = 'early of 16 fractions' - assert repliseq_3['experiment_categorizer']['field'] == 'Fraction' - assert repliseq_3['experiment_categorizer']['value'] == wanted - - -def test_experiment_categorizer_w_target(testapp, expt_w_target, prot_bio_feature): - assert expt_w_target['experiment_categorizer']['field'] == 'Target' - assert expt_w_target['experiment_categorizer']['value'] == prot_bio_feature['display_title'] - - -def test_experiment_categorizer_w_enzyme(testapp, experiment, mboI): - assert experiment['experiment_categorizer']['field'] == 'Enzyme' - assert experiment['experiment_categorizer']['value'] == mboI['display_title'] - - -def test_experiment_categorizer_w_target_and_enzyme(testapp, expt_w_target, prot_bio_feature, mboI): - # import pdb; pdb.set_trace() - res = testapp.patch_json(expt_w_target['@id'], {'digestion_enzyme': mboI['@id']}).json['@graph'][0] - assert res['digestion_enzyme'] == mboI['@id'] - assert res['experiment_categorizer']['field'] == 'Target' - assert res['experiment_categorizer']['value'] == prot_bio_feature['display_title'] - - -def test_experiment_categorizer_w_no_cat1(testapp, experiment_data, exp_types): - del experiment_data['digestion_enzyme'] - experiment_data['experiment_type'] = exp_types['rnaseq']['@id'] - expt = testapp.post_json('/experiment_seq', experiment_data).json['@graph'][0] - assert expt['experiment_categorizer']['field'] == 'Default' - assert expt['experiment_categorizer'].get('value') is None - - -def test_experiment_categorizer_cap_c_no_regions(testapp, experiment_data, mboI, exp_types): - experiment_data['experiment_type'] = exp_types['capc']['@id'] - expt = testapp.post_json('/experiment_capture_c', experiment_data).json['@graph'][0] - assert expt['experiment_categorizer']['field'] == 'Enzyme' - assert expt['experiment_categorizer']['value'] == mboI['display_title'] - - -def test_experiment_categorizer_cap_c_w_region(expt_w_targ_region, genomic_region_bio_feature): - assert expt_w_targ_region['experiment_categorizer']['field'] == 'Target' - assert expt_w_targ_region['experiment_categorizer']['value'] == genomic_region_bio_feature['display_title'] - - -def test_experiment_categorizer_cap_c_w_2regions( - expt_w_2_targ_regions, genomic_region_bio_feature, gene_bio_feature): - wanted = ', '.join(sorted([genomic_region_bio_feature['display_title'], gene_bio_feature['display_title']])) - assert expt_w_2_targ_regions['experiment_categorizer']['field'] == 'Target' - assert expt_w_2_targ_regions['experiment_categorizer']['value'] == wanted - - -@pytest.fixture -def new_exp_type(lab, award): - data = { - 'uuid': str(uuid4()), - 'title': 'Title', - 'lab': lab['@id'], - 'award': award['@id'], - 'status': 'released', - 'valid_item_types': ['ExperimentSeq'] - } - return data - - -def test_validate_exp_type_valid(testapp, experiment_data, new_exp_type): - exp_type1 = testapp.post_json('/experiment_type', new_exp_type).json['@graph'][0] - experiment_data['experiment_type'] = exp_type1['@id'] - expt = testapp.post_json('/experiment_hi_c', experiment_data, status=422) - testapp.patch_json(exp_type1['@id'], {'valid_item_types': ['ExperimentSeq', 'ExperimentHiC']}) - expt = testapp.post_json('/experiment_hi_c', experiment_data, status=201).json['@graph'][0] - assert expt['experiment_type'] == '/experiment-types/title/' - - -def test_validate_experiment_set_duplicate_replicate_experiments(testapp, rep_set_data, experiment): - rep_set_data['replicate_exps'] = [{'bio_rep_no': 1, 'tec_rep_no': 1, 'replicate_exp': experiment['@id']}, - {'bio_rep_no': 1, 'tec_rep_no': 2, 'replicate_exp': experiment['@id']}] - repset = testapp.post_json('/experiment_set_replicate', rep_set_data, status=422) - assert repset.json['errors'][0]['name'] == 'ExperimentSet: non-unique exps' - assert 'Duplicate experiment' in repset.json['errors'][0]['description'] diff --git a/src/encoded/tests/test_types_gene.py b/src/encoded/tests/test_types_gene.py deleted file mode 100644 index 62fa1bc2d5..0000000000 --- a/src/encoded/tests/test_types_gene.py +++ /dev/null @@ -1,203 +0,0 @@ -import pytest - -from unittest import mock -from ..types.gene import ( - fetch_gene_info_from_ncbi, - get_gene_info_from_response_text, - map_ncbi2schema, -) - - -pytestmark = [pytest.mark.working, pytest.mark.schema] - - -def test_get_gene_info_from_response_text_good_response(): - resp = ('\n' - '\n' - '
tax_id\tOrg_name\tGeneID\tCurrentID\tStatus\tSymbol\tAliases\t'
-            'description\tother_designations\tmap_location\tchromosome\t'
-            'genomic_nucleotide_accession.version\tstart_position_on_the_genomic_accession\t'
-            'end_position_on_the_genomic_accession\torientation\texon_count\tOMIM\t\n'
-            '9606\tHomo sapiens\t10664\t0\tlive\tCTCF\tMRD21\tCCCTC-binding factor\t'
-            'transcriptional repressor CTCF|11 zinc finger transcriptional repressor|'
-            '11-zinc finger protein|CCCTC-binding factor (zinc finger protein)|'
-            'CTCFL paralog\t16q22.1\t16\tNC_000016.10\t67562407\t67639185\tplus\t13\t604167\t
') - respdict = get_gene_info_from_response_text(resp) - assert respdict.get('tax_id') == '9606' - assert respdict.get('Aliases') == 'MRD21' - assert respdict.get('orientation') == 'plus' - assert respdict.get('OMIM') == '604167' - - -def test_get_gene_info_from_response_text_bad_pre(): - resp = ('\n' - '\n' - '
tax_id\tOrg_name\tGeneID\tCurrentID\tStatus\tSymbol\tAliases\t'
-            'description\tother_designations\tmap_location\tchromosome\t'
-            'genomic_nucleotide_accession.version\tstart_position_on_the_genomic_accession\t'
-            'end_position_on_the_genomic_accession\torientation\texon_count\tOMIM\t\n'
-            '9606\tHomo sapiens\t10664\t0\tlive\tCTCF\tMRD21\tCCCTC-binding factor\t'
-            'transcriptional repressor CTCF|11 zinc finger transcriptional repressor|'
-            '11-zinc finger protein|CCCTC-binding factor (zinc finger protein)|'
-            'CTCFL paralog\t16q22.1\t16\tNC_000016.10\t67562407\t67639185\tplus\t13\t604167\t')
-    respdict = get_gene_info_from_response_text(resp)
-    assert not respdict
-
-
-def test_get_gene_info_from_response_text_error_from_ncbi():
-    resp = ('\n'
-            '\n'
-            '
tax_id\tOrg_name\tGeneID\tCurrentID\tStatus\tSymbol\tAliases\t'
-            'description\tother_designations\tmap_location\tchromosome\t'
-            'genomic_nucleotide_accession.version\tstart_position_on_the_genomic_accession\t'
-            'end_position_on_the_genomic_accession\torientation\texon_count\tOMIM\t\n'
-            ' Error occurred: cannot get document summary
') - respdict = get_gene_info_from_response_text(resp) - assert not respdict - - -def test_get_gene_info_from_response_text_only_one_line(): - resp = ('\n' - '\n' - '
tax_id\tOrg_name\tGeneID\tCurrentID\tStatus\tSymbol\tAliases\t'
-            'description\tother_designations\tmap_location\tchromosome\t'
-            'genomic_nucleotide_accession.version\tstart_position_on_the_genomic_accession\t'
-            'end_position_on_the_genomic_accession\torientation\texon_count\tOMIM\t'
-            ' Error occurred: cannot get document summary
') - respdict = get_gene_info_from_response_text(resp) - assert not respdict - - -def test_get_gene_info_from_response_text_multiple_value_lines(): - resp = ('\n' - '\n' - '
tax_id\tOrg_name\tGeneID\tCurrentID\tStatus\tSymbol\tAliases\t'
-            'description\tother_designations\tmap_location\tchromosome\t'
-            'genomic_nucleotide_accession.version\tstart_position_on_the_genomic_accession\t'
-            'end_position_on_the_genomic_accession\torientation\texon_count\tOMIM\t\n'
-            '9606\tHomo sapiens\t10664\t0\tlive\tCTCF\tMRD21\tCCCTC-binding factor\t'
-            'transcriptional repressor CTCF|11 zinc finger transcriptional repressor|'
-            '11-zinc finger protein|CCCTC-binding factor (zinc finger protein)|'
-            'CTCFL paralog\t16q22.1\t16\tNC_000016.10\t67562407\t67639185\tplus\t13\t604167\t\n'
-            ' Error occurred: cannot get document summary
') - respdict = get_gene_info_from_response_text(resp) - assert not respdict - - -def test_fetch_gene_info_from_ncbi(): - geneid = '5885' # human rad21 - # as of 2019-01-28 - syns = ['CDLS4', 'HR21', 'HRAD21', 'MCD1', 'MGS', 'NXP1', 'SCC1', 'hHR21'] - rad21 = {'Symbol': 'RAD21', 'tax_id': '9606', 'Status': 'live', - 'description': 'RAD21 cohesin complex component', - 'url': 'https://www.ncbi.nlm.nih.gov/gene/5885'} - gene_info = fetch_gene_info_from_ncbi(geneid) - for f, v in rad21.items(): - assert gene_info.get(f) == v - aliases = gene_info.get('Aliases') - for a in aliases: - assert a in syns - - -class MockedResponse(object): - def __init__(self, status, text): - self.status_code = status - self.text = text - - -def test_fetch_gene_info_from_ncbi_429_response(): - """ mocking a bad ncbi response - because this sleeps it's slow""" - geneid = '5885' # human rad21 - with mock.patch('encoded.types.gene.requests.get', side_effect=[MockedResponse(429, 'response')] * 5): - result = fetch_gene_info_from_ncbi(geneid) - assert not result - - -def test_fetch_gene_info_from_ncbi_200_bogus_response(): - """ mocking a bad but 200 ncbi response""" - geneid = '5885' # human rad21 - with mock.patch('encoded.types.gene.requests.get', return_value=MockedResponse(200, 'response')): - result = fetch_gene_info_from_ncbi(geneid) - assert not result - - -@pytest.fixture -def rad21_ncbi(): - return {'Symbol': 'RAD21', 'tax_id': '9606', 'Status': 'live', - 'description': 'RAD21 cohesin complex component', - 'url': 'https://www.ncbi.nlm.nih.gov/gene/5885', - 'chromosome': '8', 'genomic_nucleotide_accession.version': 'NC_000008.11', - 'Aliases': ['CDLS4', 'HR21', 'HRAD21', 'MCD1', 'MGS', 'NXP1', 'SCC1', 'hHR21']} - - -def test_map_ncbi2schema_all_present_plus_extra(rad21_ncbi): - info = map_ncbi2schema(rad21_ncbi) - assert len(info) == 6 - assert info.get('official_symbol') == rad21_ncbi.get('Symbol') - assert info.get('organism') == rad21_ncbi.get('tax_id') - assert info.get('fullname') == rad21_ncbi.get('description') - assert info.get('url') == rad21_ncbi.get('url') - assert len(info.get('synonyms')) == 8 - assert info.get('ncbi_entrez_status') == rad21_ncbi.get('Status') - assert 'chromosome' not in info - assert 'genomic_nucleotide_accession.version' not in info - - -def test_map_ncbi2schema_none_there(): - fake = {'A': 'RAD21', 'B': '9606', 'C': 'live', - 'D': 'RAD21 cohesin complex component'} - - info = map_ncbi2schema(fake) - assert not info - - -def test_update_with_good_gene_id_post(testapp, human, rad21_ncbi, lab, award): - geneid = '5885' # human rad21 - # import pdb; pdb.set_trace() - gene = testapp.post_json('/gene', {'geneid': geneid, 'lab': lab['@id'], 'award': award['@id']}).json['@graph'][0] - assert gene.get('official_symbol') == rad21_ncbi.get('Symbol') - assert gene.get('organism') == human.get('@id') - assert gene.get('fullname') == rad21_ncbi.get('description') - assert gene.get('url') == rad21_ncbi.get('url') - assert len(gene.get('synonyms')) == 8 - assert gene.get('ncbi_entrez_status') == rad21_ncbi.get('Status') - assert gene.get('preferred_symbol') == gene.get('official_symbol') - - -def test_update_post_with_preferred_symbol(testapp, human, rad21_ncbi, lab, award): - geneid = '5885' # human rad21 - gene = testapp.post_json('/gene', {'geneid': geneid, 'preferred_symbol': 'George', 'lab': lab['@id'], 'award': award['@id']}).json['@graph'][0] - assert gene.get('official_symbol') == rad21_ncbi.get('Symbol') - assert gene.get('preferred_symbol') == 'George' - - -def test_update_patch_with_preferred_symbol(testapp, human, rad21_ncbi, lab, award): - geneid = '5885' # human rad21 - gene = testapp.post_json('/gene', {'geneid': geneid, 'lab': lab['@id'], 'award': award['@id']}).json['@graph'][0] - assert gene.get('official_symbol') == rad21_ncbi.get('Symbol') - assert gene.get('preferred_symbol') == gene.get('official_symbol') - upd = testapp.patch_json(gene['@id'], {'preferred_symbol': 'George'}).json['@graph'][0] - assert upd.get('official_symbol') == rad21_ncbi.get('Symbol') - assert upd.get('preferred_symbol') == 'George' - - -def test_update_post_with_bogus_geneid(testapp, lab, award): - geneid = '999999999' - missing_fields = ['official_symbol', 'preferred_symbol', 'ncbi_entrez_status', - 'fullname', 'organism', 'url'] - gene = testapp.post_json('/gene', {'geneid': geneid, 'lab': lab['@id'], 'award': award['@id']}).json['@graph'][0] - assert gene.get('geneid') == geneid - assert 'lab' in gene - assert 'award' in gene - for mf in missing_fields: - assert mf not in gene - - -def test_invalid_geneid(testapp, lab, award): - geneid = '99999999999999' - testapp.post_json('/gene', {'geneid': geneid, 'lab': lab['@id'], 'award': award['@id']}, status=422) diff --git a/src/encoded/tests/test_types_imaging.py b/src/encoded/tests/test_types_imaging.py deleted file mode 100644 index 0d1497db0d..0000000000 --- a/src/encoded/tests/test_types_imaging.py +++ /dev/null @@ -1,78 +0,0 @@ -import pytest - - -pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema] - - -@pytest.fixture -def img_path_blank(testapp, lab, award): - item = {'award': award['@id'], 'lab': lab['@id']} - return testapp.post_json('/imaging_path', item).json['@graph'][0] - - -@pytest.fixture -def p_antibody(testapp, lab, award): - item = {'award': award['@id'], - 'lab': lab['@id'], - 'antibody_name': 'RAD21 antibody', - 'antibody_product_no': 'ab12043'} - return testapp.post_json('/antibody', item).json['@graph'][0] - - -@pytest.fixture -def s_antibody(testapp, lab, award): - item = {'award': award['@id'], - 'lab': lab['@id'], - 'antibody_name': 'anti-mouse antibody', - 'antibody_product_no': '9876'} - return testapp.post_json('/antibody', item).json['@graph'][0] - - -def test_imgpath_displaytitle_target_probe(testapp, img_path_blank, prot_bio_feature): - res = testapp.patch_json(img_path_blank['@id'], {'target': [prot_bio_feature['@id']]}).json['@graph'][0] - assert res['display_title'] == 'RAD21 protein' - res = testapp.patch_json(img_path_blank['@id'], {'labeled_probe': 'imaging probe'}).json['@graph'][0] - assert res['display_title'] == 'RAD21 protein targeted by imaging probe' - - -def test_imgpath_displaytitle(testapp, img_path_blank, prot_bio_feature): - assert img_path_blank['display_title'] == 'not enough information' - res = testapp.patch_json(img_path_blank['@id'], {'target': [prot_bio_feature['@id']], - 'labels': ['GFP', 'RFP']}).json['@graph'][0] - assert res['display_title'] == 'RAD21 protein targeted by GFP,RFP' - res = testapp.patch_json(img_path_blank['@id'], {'labeled_probe': 'imaging probe'}).json['@graph'][0] - assert res['display_title'] == 'RAD21 protein targeted by GFP,RFP-labeled imaging probe' - res = testapp.patch_json(img_path_blank['@id'], {'other_probes': ['intermediate probe 1', 'other probe 2']}).json['@graph'][0] - assert res['display_title'] == 'RAD21 protein targeted by intermediate probe 1, other probe 2 (with GFP,RFP-labeled imaging probe)' - res = testapp.patch_json(img_path_blank['@id'], {'override_display_title': 'Custom title'}).json['@graph'][0] - assert res['display_title'] == 'Custom title' - - -def test_imgpath_displaytitle_antibodies(testapp, img_path_blank, prot_bio_feature, p_antibody, s_antibody): - res = testapp.patch_json(img_path_blank['@id'], {'target': [prot_bio_feature['@id']], - 'primary_antibodies': [p_antibody['@id']], - 'secondary_antibody': s_antibody['@id'], - 'labels': ['AF 647']}).json['@graph'][0] - assert res['display_title'] == 'RAD21 protein targeted by RAD21 antibody (with AF 647-labeled anti-mouse antibody)' - res = testapp.patch_json(img_path_blank['@id'], {'other_probes': ['other probe'], - 'labeled_probe': 'imaging probe'}).json['@graph'][0] - assert res['display_title'] == 'RAD21 protein targeted by other probe, RAD21 antibody (with AF 647-labeled imaging probe, anti-mouse antibody)' - - -def test_imgpath_displaytitle_duplicate_label_on_secondary_ab(testapp, img_path_blank, prot_bio_feature, s_antibody): - labeled_sec_ab = testapp.patch_json(s_antibody['@id'], {'antibody_name': 'anti-mouse AF 647'}).json['@graph'][0] - res = testapp.patch_json(img_path_blank['@id'], {'target': [prot_bio_feature['@id']], - 'secondary_antibody': labeled_sec_ab['@id'], - 'labels': ['AF 647']}).json['@graph'][0] - assert res['display_title'] == 'RAD21 protein targeted by anti-mouse AF 647' - - -def test_imgpath_displaytitle_labels_only(testapp, img_path_blank): - res = testapp.patch_json(img_path_blank['@id'], {'labels': ['GFP', 'RFP']}).json['@graph'][0] - assert res['display_title'] == 'GFP,RFP' - - -def test_imgpath_displaytitle_labeled_probe_only(testapp, img_path_blank): - res = testapp.patch_json(img_path_blank['@id'], {'labels': ['GFP'], - 'labeled_probe': 'imaging probe'}).json['@graph'][0] - assert res['display_title'] == 'GFP-labeled imaging probe' diff --git a/src/encoded/tests/test_types_individual.py b/src/encoded/tests/test_types_individual.py deleted file mode 100644 index b9f444d7a1..0000000000 --- a/src/encoded/tests/test_types_individual.py +++ /dev/null @@ -1,100 +0,0 @@ -import pytest - - -pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema] - - -@pytest.fixture -def mouse_child(testapp, mouse, lab, award): - return { - 'uuid': '4731449b-f283-4fdf-ad8a-b69cf5a7e68a', - 'award': award['@id'], - 'lab': lab['@id'], - 'organism': mouse['@id'], - "sex": "female", - } - - -@pytest.fixture -def mouse_individual_2(testapp, mouse, lab, award): - item = { - 'uuid': 'd89c5c5b-a427-4efa-b6da-44239d92f2e7', - "age": 99, - "age_units": "day", - 'award': award['@id'], - 'lab': lab['@id'], - 'organism': mouse['@id'], - "mouse_strain": "Balb-c", - "mouse_life_stage": "adult", - "sex": "male", - } - return testapp.post_json('/individual_mouse', item).json['@graph'][0] - - -def test_validate_individual_relation_valid_post(testapp, award, lab, mouse_individual, mouse_child): - mouse_child['individual_relation'] = [ - {'relationship_type': 'derived from', 'individual': mouse_individual['@id']}] - res = testapp.post_json('/individual_mouse', mouse_child, status=201) - assert not res.json.get('errors') - - -def test_validate_individual_relation_species(testapp, award, lab, mouse_child, human_individual): - mouse_child['individual_relation'] = [{'relationship_type': 'derived from', 'individual': human_individual['@id']}] - res = testapp.post_json('/individual_mouse', mouse_child, status=422) - errors = res.json['errors'] - assert errors[0]['name'] == 'Individual relation: different species' - - -def test_validate_individual_relation_valid_patch(testapp, award, lab, mouse_child, - mouse_individual, mouse_individual_2): - res = testapp.post_json('/individual_mouse', mouse_child, status=201) - assert not res.json.get('errors') - patch_body = { - 'individual_relation': [ - {'relationship_type': 'derived from', 'individual': mouse_individual['@id']}, - {'relationship_type': 'derived from', 'individual': mouse_individual_2['@id']} - ] - } - res2 = testapp.patch_json(res.json['@graph'][0]['@id'], patch_body, status=200) - assert not res2.json.get('errors') - - -def test_validate_individual_relation_valid_patch_contains_uuid(testapp, award, lab, mouse_child, - mouse_individual, mouse_individual_2): - child_mouse = testapp.post_json('/individual_mouse', mouse_child, status=201).json['@graph'][0] - patch_body = { - 'uuid': child_mouse.get('uuid'), - 'individual_relation': [ - {'relationship_type': 'derived from', 'individual': mouse_individual['@id']}, - {'relationship_type': 'derived from', 'individual': mouse_individual_2['@id']} - ] - } - res2 = testapp.patch_json(child_mouse['@id'], patch_body, status=200) - assert not res2.json.get('errors') - - -def test_validate_individual_relation_self(testapp, award, lab, mouse_child): - res = testapp.post_json('/individual_mouse', mouse_child, status=201) - assert not res.json.get('errors') - patch_body = [{'relationship_type': 'derived from', 'individual': res.json['@graph'][0]['@id']}] - res2 = testapp.patch_json(res.json['@graph'][0]['@id'], {'individual_relation': patch_body}, status=422) - errors = res2.json['errors'] - assert errors[0]['name'] == 'Individual relation: self-relation' - - -def test_validate_individual_relation_same(testapp, award, lab, mouse_individual, mouse_individual_2, mouse_child): - mouse_child['individual_relation'] = [ - {'relationship_type': 'derived from (maternal strain)', 'individual': mouse_individual['@id']}, - {'relationship_type': 'derived from (maternal strain)', 'individual': mouse_individual_2['@id']}] - res = testapp.post_json('/individual_mouse', mouse_child, status=422) - errors = res.json['errors'] - assert errors[0]['name'] == 'Individual relation: too many of the same type' - - -def test_validate_individual_relation_duplicate(testapp, award, lab, mouse_individual, mouse_child): - mouse_child['individual_relation'] = [ - {'relationship_type': 'derived from', 'individual': mouse_individual['@id']}, - {'relationship_type': 'derived from (maternal strain)', 'individual': mouse_individual['@id']}] - res = testapp.post_json('/individual_mouse', mouse_child, status=422) - errors = res.json['errors'] - assert errors[0]['name'] == 'Individual relation: multiple relations with same parent' diff --git a/src/encoded/tests/test_types_init_collections.py b/src/encoded/tests/test_types_init_collections.py deleted file mode 100644 index 6a5b2316a6..0000000000 --- a/src/encoded/tests/test_types_init_collections.py +++ /dev/null @@ -1,255 +0,0 @@ -import pytest - -from dcicutils.misc_utils import utc_today_str -from ..types.image import Image -# from ..util import utc_today_str - - -pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema] - - -@pytest.fixture -def genomic_region_w_onlyendloc(testapp, lab, award): - item = { - "genome_assembly": "dm6", - "end_coordinate": 3, - 'award': award['@id'], - 'lab': lab['@id'] - } - return testapp.post_json('/genomic_region', item).json['@graph'][0] - - -@pytest.fixture -def dt4genomic_regions(genomic_region_w_onlyendloc, some_genomic_region, basic_genomic_region, - vague_genomic_region, vague_genomic_region_w_desc): - return { - 'dm6': genomic_region_w_onlyendloc, - 'GRCh38:1:17-544': some_genomic_region, - 'GRCh38': basic_genomic_region, - 'GRCm38:5': vague_genomic_region, - 'gene X enhancer': vague_genomic_region_w_desc - } - - -@pytest.fixture -def targets(target_w_desc, target_w_region, target_w_genes): - return {'target_w_desc': target_w_desc, - 'target_w_region': target_w_region, - 'target_w_genes': target_w_genes - } - - -@pytest.mark.skip # targets are not used -def test_calculated_target_summaries(testapp, targets): - for name in targets: - summary = targets[name]['target_summary'] - if name == 'target_w_genes': - assert summary == 'Gene:eeny,meeny' - if name == 'target_w_regions' in targets: - assert summary == 'GRCh38:X:1-3' - if name == 'target_w_desc': - assert summary == 'no target' - - -def test_document_display_title_w_attachment(testapp, protocol_data, attachment): - protocol_data['attachment'] = attachment - del(protocol_data['protocol_type']) - res = testapp.post_json('/document', protocol_data).json['@graph'][0] - assert res.get('display_title') == 'red-dot.png' - - -def test_document_display_title_wo_attachment(testapp, protocol_data): - del(protocol_data['protocol_type']) - res = testapp.post_json('/document', protocol_data).json['@graph'][0] - assert res.get('display_title') == 'Document from ' + utc_today_str() - - -def test_organism_display_title_standard_scientific_name(testapp, human_data): - res = testapp.post_json('/organism', human_data).json['@graph'][0] - assert res.get('display_title') == 'H. sapiens' - - -def test_organism_display_title_three_part_scientific_name(testapp, human_data): - human_data['scientific_name'] = 'Drosophila pseudoobscura pseudoobscura' - res = testapp.post_json('/organism', human_data).json['@graph'][0] - assert res.get('display_title') == 'D. pseudoobscura pseudoobscura' - - -def test_organism_display_title_one_part_scientific_name(testapp, human_data): - human_data['scientific_name'] = 'george' - res = testapp.post_json('/organism', human_data).json['@graph'][0] - assert res.get('display_title') == 'george' - - -def test_organism_display_title_no_scientific_name(testapp, human_data): - del(human_data['scientific_name']) - res = testapp.post_json('/organism', human_data).json['@graph'][0] - assert res.get('display_title') == 'human' - - -@pytest.fixture -def google_analytics_tracking_data(): - return { - "status": "released", - "tracking_type": "google_analytics", - "google_analytics": { - "reports": { - "views_by_experiment_set": [ - { - "ga:productCategoryLevel2": "ExperimentSetReplicate", - "ga:productName": "4DNESKSPBI9A", - "ga:productListClicks": 1, - "ga:productListViews": 21, - "ga:productSku": "4DNESKSPBI9A", - "ga:productDetailViews": 4, - "ga:productBrand": "Chuck Murry, UW" - } - ], - "fields_faceted": [ - { - "ga:users": 12, - "ga:totalEvents": 19, - "ga:sessions": 13, - "ga:dimension3": "experiments_in_set.experiment_type.display_title" - }, - { - "ga:users": 13, - "ga:totalEvents": 16, - "ga:sessions": 15, - "ga:dimension3": "experiments_in_set.biosample.biosource.organism.name" - } - ], - "views_by_file": [ - { - "ga:productCategoryLevel2": "FileProcessed", - "ga:productName": "4DNFIC2XS1Y3.mcool", - "ga:productListClicks": 0, - "ga:productListViews": 0, - "ga:productSku": "4DNFIC2XS1Y3", - "ga:productDetailViews": 1, - "ga:productBrand": "Erez Lieberman Aiden, BCM" - } - ] - }, - "for_date": "2019-05-09", - "date_increment": "daily"} - } - - -@pytest.fixture -def google_analytics(testapp, google_analytics_tracking_data): - return testapp.post_json('/tracking_item', google_analytics_tracking_data).json['@graph'][0] - - -@pytest.fixture -def download_tracking_item_data(): - return { - "status": "released", - "tracking_type": "download_tracking", - "download_tracking": { - "geo_country": "NL", - "geo_city": "Utrecht, Provincie Utrecht", - "request_path": "/files-processed/4DNFI6BTR1IC/@@download/4DNFI6BTR1IC.pairs.gz.px2", - "user_uuid": "anonymous", - "user_agent": "Wget/1.17.1 (linux-gnu)", - "remote_ip": "192.87.138.11", - "file_format": "pairs_px2", - "filename": "4DNFI6BTR1IC.pairs.gz.px2", - "experiment_type": "in situ Hi-C" - } - } - - -@pytest.fixture -def download_tracking(testapp, download_tracking_item_data): - return testapp.post_json('/tracking_item', download_tracking_item_data).json['@graph'][0] - - -@pytest.fixture -def jupyterhub_session_tracking_data(): - return { - "status": "in review by lab", - "tracking_type": "jupyterhub_session", - "jupyterhub_session": { - "date_initialized": "2019-05-09T05:11:56.389876+00:00", - "date_culled": "2019-05-09T06:21:54.726782+00:00", - "user_uuid": "e0beacd7-225f-4fa8-81fb-a1856603e204" - }, - "uuid": "ff4575d4-67b4-458f-8b1c-b3fcb3690ce9", - } - - -@pytest.fixture -def jupyterhub_session(testapp, jupyterhub_session_tracking_data): - return testapp.post_json('/tracking_item', jupyterhub_session_tracking_data).json['@graph'][0] - - -def test_tracking_item_display_title_google_analytic(google_analytics): - assert google_analytics.get('display_title') == 'Google Analytics for 2019-05-09' - - -def test_tracking_item_display_title_download(download_tracking): - assert download_tracking.get('display_title') == 'Download Tracking Item from ' + utc_today_str() - - -def test_tracking_item_display_title_other(jupyterhub_session): - assert jupyterhub_session.get('display_title') == 'Tracking Item from ' + utc_today_str() - - -@pytest.fixture -def vendor_data(lab, award): - return {"title": "WorTHington Biochemical", 'lab': lab['@id'], 'award': award['@id']} - - -def test_vendor_update_name_no_caps(testapp, vendor_data): - res = testapp.post_json('/vendor', vendor_data, status=201) - assert res.json['@graph'][0]['name'] == "worthington-biochemical" - - -def test_vendor_update_name_no_punctuation_or_space(testapp, vendor_data): - vendor_data['title'] = "Eeny, = Meeny! # -miny?" - res = testapp.post_json('/vendor', vendor_data, status=201) - assert res.json['@graph'][0]['name'] == "eeny-meeny-miny" - - -def test_vendor_name_updates_on_patch(testapp, vendor_data): - res = testapp.post_json('/vendor', vendor_data, status=201) - assert res.json['@graph'][0]['name'] == "worthington-biochemical" - res = testapp.patch_json(res.json['@graph'][0]['@id'], {'title': 'WaHoo'}, status=200) - assert res.json['@graph'][0]['name'] == "wahoo" - - -@pytest.fixture -def vendor_data_alias(lab, award): - return { - 'title': 'Wrong Alias Biochemical', - 'lab': lab['@id'], - 'award': award['@id'], - 'aliases': ['my_lab:this_is_correct_one', - 'my_lab:this/is_wrong', - 'my_lab:this\\is_wrong_too']} - - -def test_vendor_alias_wrong_format(testapp, vendor_data_alias): - res = testapp.post_json('/vendor', vendor_data_alias, status=422) - response = res.json - print(res.json) - assert response['status'] == 'error' - assert response['code'] == 422 - problematic_aliases = 0 - for an_error in response['errors']: - if an_error['name'].startswith('Schema: aliases'): - problematic_aliases += 1 - assert problematic_aliases == 2 - - -def test_genomic_region_display_title(testapp, dt4genomic_regions): - for dt, region in dt4genomic_regions.items(): - assert region.get('display_title') == dt - - -def test_image_unique_key(registry, image_data): - uuid = "0afb6080-1c08-11e4-8c21-0800200c9a44" - image = Image.create(registry, uuid, image_data) - keys = image.unique_keys(image.properties) - assert 'red-dot.png' in keys['image:filename'] diff --git a/src/encoded/tests/test_types_microscope_configuration.py b/src/encoded/tests/test_types_microscope_configuration.py deleted file mode 100644 index 1b738c454a..0000000000 --- a/src/encoded/tests/test_types_microscope_configuration.py +++ /dev/null @@ -1,33 +0,0 @@ -import pytest -from ..schema_formats import is_uuid - - -pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema] - - -@pytest.fixture -def tier1_microscope_configuration(testapp): - item = { - 'uuid': 'e700e61c-9da5-465f-9b4f-189852897df5', - 'microscope': { - 'Tier': 1, - 'ValidationTier': 1, - 'Name': 'Test Mic. Conf.' - } - } - return testapp.post_json('/microscope-configurations', item).json['@graph'][0] - - -def test_get_tier1_microscope(testapp, tier1_microscope_configuration): - assert tier1_microscope_configuration['microscope']['Tier'] == 1 - assert tier1_microscope_configuration['microscope']['ValidationTier'] == 1 - assert is_uuid(tier1_microscope_configuration['microscope']['ID']) - - -def test_tier1_microscope_display_title(testapp, tier1_microscope_configuration): - assert tier1_microscope_configuration['display_title'] == 'Test Mic. Conf.' - tier1_microscope_configuration['microscope']['Name'] = 'Test Mic. Conf. Updated' - res = testapp.patch_json(tier1_microscope_configuration['@id'], { - 'microscope': tier1_microscope_configuration['microscope']}, status=200) - assert res.json['@graph'][0].get( - 'display_title') == 'Test Mic. Conf. Updated' diff --git a/src/encoded/tests/test_types_modification.py b/src/encoded/tests/test_types_modification.py deleted file mode 100644 index 960e60f912..0000000000 --- a/src/encoded/tests/test_types_modification.py +++ /dev/null @@ -1,41 +0,0 @@ -import pytest - - -pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema] - - -@pytest.fixture -def modifications(basic_modification, mod_w_genomic_change, mod_w_target, - mod_w_change_and_target): - return { - 'basic_mod': basic_modification, - 'mod_w_gen_chg': mod_w_genomic_change, - 'mod_w_target': mod_w_target, - 'mod_w_both': mod_w_change_and_target - } - - -def test_calculated_modification_name(testapp, modifications): - for name in modifications: - modname = modifications[name]['modification_name'] - short = modifications[name]['modification_name_short'] - # assert modifications[name]['modification_name_short'] == modname - if name == 'basic_mod': - assert modname == 'Crispr' and short == 'Crispr' - elif name == 'mod_w_gen_chg': - assert modname == 'Crispr deletion' and short == 'deletion' - elif name == 'mod_w_target': - assert modname == 'Crispr for RAD21 gene' - assert short == 'RAD21 Crispr' - elif name == 'mod_w_both': - assert modname == 'Crispr deletion for RAD21 gene' - assert short == 'RAD21 deletion' - - -def test_calculated_modification_name_w_override(testapp, mod_w_change_and_target): - assert mod_w_change_and_target.get('modification_name') == 'Crispr deletion for RAD21 gene' - assert mod_w_change_and_target.get('modification_name_short') == 'RAD21 deletion' - item_patch = {'override_modification_name': 'RAD21 is gone!'} - res = testapp.patch_json(mod_w_change_and_target['@id'], item_patch, status=200).json['@graph'][0] - assert res.get('modification_name') == 'RAD21 is gone!' - assert res.get('modification_name_short') == 'RAD21 is gone!' diff --git a/src/encoded/tests/test_types_ontology_term.py b/src/encoded/tests/test_types_ontology_term.py deleted file mode 100644 index 9be6a576cf..0000000000 --- a/src/encoded/tests/test_types_ontology_term.py +++ /dev/null @@ -1,68 +0,0 @@ -import pytest - - -pytestmark = [pytest.mark.setone, pytest.mark.working] - -""" -FF-701 -Multiple ontologies get imported to form each of the three ontologies that we are currently using -(and I can foresee us wanting to add additional ontology sources). The same name can be used for -terms with different identifiers (we may not currently have such cases but I can check) so you might -have the term ‘lung’ from UBERON and ‘lung’ from EFO and in theory these 2 terms could be meaning -different things. For the most part this shouldn’t be a problem but without pre-screening for name -uniqueness and somehow (likely through human inspection) figuring out which name should be used and -which changed during the generate_ontology processing step then names aren’t guaranteed unique. - -Ideally I’m hoping that the sort of check that I’m asking for can be set up in such a way so that -there can be some additional validation built in. - -For instance checking to see that if someone is entering a new tissue term then that term is not -really referring to a cell line. - -This can be determined by info that will be stored with the term in the system. - -So basically what I’m looking for is validation that can get the json from the request and also get -info on existing terms that are in the system and do some checks prior to post or patch of the item -and also change the json to use uuid rather than the info included in the post (eg. preferred_name) -as an identifying property if the term validates. -""" - - -def test_store_ontology_term_by_uuid(testapp, oterm): - oterm.pop('term_name') # this will create preferred_name in _update - oterm.pop('preferred_name') - res = testapp.post_json('/ontology_term', oterm) - assert res.json['@graph'][0]['uuid'] == oterm['uuid'] - assert res.json['@graph'][0]['term_id'] == oterm['term_id'] - assert res.json['@graph'][0].get('preferred_name', None) is None - - -def test_store_ontology_term_by_term_id(testapp, oterm): - oterm.pop('term_name') # this will create preferred_name in _update - oterm.pop('preferred_name') - oterm.pop('uuid') - res = testapp.post_json('/ontology_term', oterm) - assert res.json['@graph'][0]['term_id'] == oterm['term_id'] - assert res.json['@graph'][0].get('preferred_name', None) is None - testapp.get('/ontology_term/' + oterm['term_id']) - - -def test_store_ontology_no_required_keys(testapp, oterm): - oterm.pop('term_name') - oterm.pop('uuid') - oterm.pop('term_id') - testapp.post_json('/ontology_term', oterm, status=422) - - -def test_linkto_ontology_term_by_term_id(testapp, lab, award, oterm): - item = { - "accession": "4DNSROOOAAQ1", - "biosource_type": "immortalized cell line", - 'award': award['@id'], - 'lab': lab['@id'], - 'tissue': oterm['term_id'] - } - - res = testapp.post_json('/ontology_term', oterm).json['@graph'][0] - res_biosource = testapp.post_json('/biosource', item).json['@graph'][0] - assert res['@id'] == res_biosource['tissue'] diff --git a/src/encoded/tests/test_types_protocol.py b/src/encoded/tests/test_types_protocol.py deleted file mode 100644 index 9c02b3934c..0000000000 --- a/src/encoded/tests/test_types_protocol.py +++ /dev/null @@ -1,66 +0,0 @@ -import pytest -from dcicutils.misc_utils import utc_today_str - - -pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema] - - -@pytest.fixture -def protocol_w_attach(testapp, protocol_data, attachment): - protocol_data['attachment'] = attachment - return testapp.post_json('/protocol', protocol_data).json['@graph'][0] - - -def test_protocol_display_title_w_attachment(testapp, protocol_data, attachment): - res = testapp.post_json('/protocol', protocol_data).json['@graph'][0] - assert res.get('display_title').startswith('Experimental protocol') - patched = testapp.patch_json(res['@id'], {'attachment': attachment}).json['@graph'][0] - assert patched.get('display_title') == 'red-dot.png' - - -def test_protocol_display_title_w_title(testapp, protocol_data, attachment): - protocol_data['attachment'] = attachment - res = testapp.post_json('/protocol', protocol_data).json['@graph'][0] - assert res.get('display_title') == 'red-dot.png' - patched = testapp.patch_json(res['@id'], {'title': 'The best method'}).json['@graph'][0] - assert patched.get('display_title') == 'The best method' - - -def test_protocol_display_title_wo_attachment(testapp, protocol_data): - protocol = testapp.post_json('/protocol', protocol_data).json['@graph'][0] - assert protocol['display_title'] == 'Experimental protocol from ' + utc_today_str() - - -def test_protocol_other_display_title_wo_attachment(testapp, protocol_data): - protocol_data['protocol_type'] = 'Other' - protocol = testapp.post_json('/protocol', protocol_data).json['@graph'][0] - assert protocol['display_title'] == 'Protocol from ' + utc_today_str() - - -def test_protocol_experiment_type(testapp, protocol_data, exp_types): - hic_exptype = exp_types.get('hic') - protocol = testapp.post_json('/protocol', protocol_data).json['@graph'][0] - assert 'experiment_type' not in protocol - testapp.patch_json(hic_exptype['@id'], {'other_protocols': [protocol['@id']]}) - res = testapp.get(protocol['@id']) - assert res.json.get('experiment_type', {}).get('@id') == hic_exptype['@id'] - - -def test_protocol_experiment_type_sop(testapp, protocol_data, exp_types): - hic_exptype = exp_types.get('hic') - protocol = testapp.post_json('/protocol', protocol_data).json['@graph'][0] - assert 'experiment_type' not in protocol - testapp.patch_json(hic_exptype['@id'], {'sop': protocol['@id']}) - res = testapp.get(protocol['@id']) - assert res.json.get('experiment_type', {}).get('@id') == hic_exptype['@id'] - - -def test_protocol_experiment_type_multiple(testapp, protocol_data, exp_types): - hic_exptype = exp_types.get('hic') - dam_exptype = exp_types.get('dam') - protocol = testapp.post_json('/protocol', protocol_data).json['@graph'][0] - assert 'experiment_type' not in protocol - testapp.patch_json(hic_exptype['@id'], {'sop': protocol['@id']}) - testapp.patch_json(dam_exptype['@id'], {'other_protocols': [protocol['@id']]}) - res = testapp.get(protocol['@id']) - assert res.json.get('experiment_type', {}).get('@id') == hic_exptype['@id'] diff --git a/src/encoded/tests/test_types_publication.py b/src/encoded/tests/test_types_publication.py deleted file mode 100644 index 6b3bbe592a..0000000000 --- a/src/encoded/tests/test_types_publication.py +++ /dev/null @@ -1,157 +0,0 @@ -import pytest - -from ..types.publication import find_best_date - - -pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema] - - -@pytest.fixture -def publication_PMID(testapp, lab, award): - item = { - 'uuid': '8312fc0c-b241-4cb2-9b01-143891055000', - 'award': award['@id'], - 'lab': lab['@id'], - 'ID': "PMID:26673895", - } - return testapp.post_json('/publication', item, status=201).json['@graph'][0] - - -@pytest.fixture -def publication_doi_pubmed(testapp, lab, award): - item = { - 'uuid': '8312fc0c-b241-4cb2-9b01-143891055001', - 'award': award['@id'], - 'lab': lab['@id'], - 'ID': "doi:10.1093/nar/gkv1046", - } - return testapp.post_json('/publication', item, status=201).json['@graph'][0] - - -@pytest.fixture -def publication_doi_biorxiv(testapp, lab, award): - item = { - 'uuid': '8312fc0c-b241-4cb2-9b01-143891055002', - 'award': award['@id'], - 'lab': lab['@id'], - 'ID': "doi:10.1101/2021.10.14.464435" - } - return testapp.post_json('/publication', item, status=201).json['@graph'][0] - - -# data from test/datafixtures -def test_update_publication_PMID(testapp, publication_PMID): - assert publication_PMID['title'][:50] == 'A deep proteomics perspective on CRM1-mediated nuc' - assert publication_PMID['abstract'][:50] == 'CRM1 is a highly conserved, RanGTPase-driven expor' - assert publication_PMID['authors'][:4] == ['Kirli K', 'Karaca S', 'Dehne HJ', 'Samwer M'] - assert publication_PMID['url'] == 'https://www.ncbi.nlm.nih.gov/pubmed/26673895' - assert publication_PMID['date_published'] == '2015-12-17' - assert publication_PMID['journal'] == 'eLife' - - -def test_update_publication_doi_pubmed(testapp, publication_doi_pubmed): - assert publication_doi_pubmed['title'][:50] == 'FlyBase: establishing a Gene Group resource for Dr' - assert publication_doi_pubmed['abstract'][:50] == 'Many publications describe sets of genes or gene p' - assert publication_doi_pubmed['authors'][:4] == ['Attrill H', 'Falls K', 'Goodman JL', 'Millburn GH'] - assert publication_doi_pubmed['url'] == 'https://www.ncbi.nlm.nih.gov/pubmed/26467478' - assert publication_doi_pubmed['date_published'] == '2016-01-04' - assert publication_doi_pubmed['journal'] == 'Nucleic acids research' - - -def test_update_publication_doi_biorxiv(testapp, publication_doi_biorxiv): - assert publication_doi_biorxiv['title'][:50] == 'The 4D Nucleome Data Portal: a resource for search' - assert publication_doi_biorxiv['abstract'][:50] == 'The 4D Nucleome (4DN) Network aims to elucidate th' - assert publication_doi_biorxiv['authors'][:4] == ['Reiff SB', 'Schroeder AJ', 'Kirli K', 'Cosolo A'] - assert publication_doi_biorxiv['url'] == 'http://biorxiv.org/lookup/doi/10.1101/2021.10.14.464435' - assert publication_doi_biorxiv['date_published'] == '2021-10-15' - assert publication_doi_biorxiv['journal'] == 'bioRxiv' - assert publication_doi_biorxiv['version'] == '1' - - -def test_update_publication_date_published(testapp, publication_PMID): - assert publication_PMID['date_published'] == '2015-12-17' - # make sure we can overwrite date_published - res = testapp.patch_json(publication_PMID['@id'], {'date_published': '01-01-1990'}) - assert res.json['@graph'][0]['date_published'] == '01-01-1990' - # now make sure it reverts when we delete it - res2 = testapp.patch_json(publication_PMID['@id'] + '?delete_fields=date_published', {}) - assert res2.json['@graph'][0]['date_published'] == '2015-12-17' - - -def test_find_best_date_full_dp(): - date_info = {'DP': '2018 Jan 7', 'DEP': '20170122', 'DA': '20170111'} - date = find_best_date(date_info) - assert date == '2018-01-07' - - -def test_find_best_date_dp_missing_day(): - date_info = {'DP': '2018 Nov', 'DEP': '20170122', 'DA': '20170111'} - date = find_best_date(date_info) - assert date == '2018-11' - - -def test_find_best_date_dp_missing_mnth_day(): - date_info = {'DP': '2018', 'DEP': '20170122', 'DA': '20170111'} - date = find_best_date(date_info) - assert date == '2018' - - -def test_find_best_date_dp_misformat_yr(): - date_info = {'DP': '201', 'DEP': '20170122', 'DA': '20170111'} - date = find_best_date(date_info) - assert date == '2017-01-22' - - -def test_find_best_date_dp_unknown_month(): - date_info = {'DP': '2018 22', 'DEP': '20170122', 'DA': '20170111'} - date = find_best_date(date_info) - assert date == '2018' - - -def test_find_best_date_dp_misformated_day(): - date_info = {'DP': '2018 Jan 222', 'DEP': '20170122', 'DA': '20170111'} - date = find_best_date(date_info) - assert date == '2018-01' - - -def test_find_best_date_no_dp(): - date_info = {'DEP': '20170122', 'DA': '20170111'} - date = find_best_date(date_info) - assert date == '2017-01-22' - - -def test_find_best_date_misformatted_dp_w_da(): - date_info = {'DEP': '2017012', 'DA': '20170111'} - date = find_best_date(date_info) - assert date == '2017-01-11' - - -def test_find_best_date_misformatted_dp_only(): - date_info = {'DEP': '2017012'} - date = find_best_date(date_info) - assert date is None - - -def test_find_best_date_da_only(): - date_info = {'DA': '20161104'} - date = find_best_date(date_info) - assert date == '2016-11-04' - - -def test_publication_display_title(testapp, publication_PMID): - print(publication_PMID) - assert publication_PMID['display_title'] == 'Kirli K et al. (2015) PMID:26673895' - - -def test_publication_unique_ID(testapp, publication_doi_pubmed, publication_doi_biorxiv): - # POST again with same ID and expect a ValidationError - new_pub = {fld: publication_doi_pubmed[fld] for fld in ['ID', 'lab', 'award']} - res = testapp.post_json('/publication', new_pub, status=422) - expected_val_err = "%s already exists with ID '%s'" % (publication_doi_pubmed['uuid'], new_pub['ID']) - assert res.json['errors'][0]['name'] == 'Publication: non-unique ID' - assert expected_val_err in res.json['errors'][0]['description'] - - # also test PATCH of an existing publication with another pub's ID - res = testapp.patch_json(publication_doi_biorxiv['@id'], {'ID': new_pub['ID']}, status=422) - assert res.json['errors'][0]['name'] == 'Publication: non-unique ID' - assert expected_val_err in res.json['errors'][0]['description'] diff --git a/src/encoded/tests/test_types_quality_metric.py b/src/encoded/tests/test_types_quality_metric.py deleted file mode 100644 index eee1607884..0000000000 --- a/src/encoded/tests/test_types_quality_metric.py +++ /dev/null @@ -1,36 +0,0 @@ -import pytest - - -pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema] - - -@pytest.fixture -def qc_bamcheck_data1(lab, award): - return { - "uuid": "af8e47c1-35bd-46fd-8a2e-e5d7b89560aa", - 'lab': lab['@id'], - 'award': award['@id'], - "number_of_lines": 1234567, - "quickcheck": "OK" - } - - -@pytest.fixture -def qc_bamcheck_data2(lab, award): - return { - "uuid": "af8e47c1-35bd-46fd-8a2e-e5d7b89560ab", - 'lab': lab['@id'], - 'award': award['@id'], - "number_of_lines": 1234568, - "quickcheck": " not OK" - } - - -def test_overall_quality_pass(testapp, qc_bamcheck_data1): - res = testapp.post_json('/quality_metric_bamcheck', qc_bamcheck_data1, status=201) - assert res.json['@graph'][0]['overall_quality_status'] == "PASS" - - -def test_overall_quality_fail(testapp, qc_bamcheck_data2): - res = testapp.post_json('/quality_metric_bamcheck', qc_bamcheck_data2, status=201) - assert res.json['@graph'][0]['overall_quality_status'] == "FAIL" diff --git a/src/encoded/tests/test_types_tracking_item.py b/src/encoded/tests/test_types_tracking_item.py deleted file mode 100644 index 1695b3e0fc..0000000000 --- a/src/encoded/tests/test_types_tracking_item.py +++ /dev/null @@ -1,39 +0,0 @@ -import pytest - -# Code that uses this is commented-out below. -# from ..types import TrackingItem - - -pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema] - - -@pytest.fixture -def tracking_item(): - return {"tracking_type": "other", "other_tracking": {"extra_field": "extra_value"}} - - -def test_insert_and_get_tracking_item(testapp, tracking_item): - res = testapp.post_json('/tracking-items', tracking_item, status=201) - assert res.json['@graph'][0]['tracking_type'] == tracking_item['tracking_type'] - res_uuid = res.json['@graph'][0]['uuid'] - get_res = testapp.get('/tracking-items/' + res_uuid).follow() - assert get_res.json['other_tracking']['extra_field'] == tracking_item['other_tracking']['extra_field'] - assert get_res.json.get('date_created') - - -# def test_tracking_item_create_and_commit(testapp, dummy_request): -# test_body = { -# "tracking_type": "other", -# "other_tracking": {"key1": "val1"}, -# "submitted_by": "4dndcic@gmail.com" -# } -# res = TrackingItem.create_and_commit(dummy_request, test_body) -# assert res['status'] == 'success' -# res_path = res['@graph'][0] -# app_res = testapp.get(res_path) -# assert app_res.json['tracking_type'] == test_body['tracking_type'] -# assert app_res.json['other_tracking']['key1'] == test_body['other_tracking']['key1'] -# # should not have date created in this case (no validators run) -# assert 'date_created' not in app_res.json -# # however status is added automatically when using create_and_commit fxn -# assert app_res.json['status'] == 'in review by lab' diff --git a/src/encoded/tests/test_types_treatment.py b/src/encoded/tests/test_types_treatment.py deleted file mode 100644 index ed29f56977..0000000000 --- a/src/encoded/tests/test_types_treatment.py +++ /dev/null @@ -1,90 +0,0 @@ -import pytest - - -pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema] - - -@pytest.fixture -def drug_treatment(testapp, lab, award): - item = { - 'award': award['@id'], - 'lab': lab['@id'], - 'treatment_type': 'Chemical', - 'chemical': 'Drug', - } - return testapp.post_json('/treatment_agent', item).json['@graph'][0] - - -@pytest.fixture -def viral_treatment(testapp, lab, award): - item = { - 'award': award['@id'], - 'lab': lab['@id'], - 'treatment_type': 'Biological', - 'biological_agent': 'Virus', - } - return testapp.post_json('/treatment_agent', item).json['@graph'][0] - - -@pytest.fixture -def heatshock_treatment(testapp, lab, award): - item = { - 'award': award['@id'], - 'lab': lab['@id'], - 'treatment_type': 'Heat Shock', - } - return testapp.post_json('/treatment_agent', item).json['@graph'][0] - - -def test_calculated_agent_treatment_display_title(testapp, heatshock_treatment): - assert heatshock_treatment['display_title'] == 'Heat Shock' - res = testapp.patch_json( - heatshock_treatment['@id'], - {'duration': 3.5, 'duration_units': 'hour'}) - assert res.json['@graph'][0]['display_title'] == 'Heat Shock (3.5h)' - res = testapp.patch_json(heatshock_treatment['@id'], {'temperature': 42}) - assert res.json['@graph'][0]['display_title'] == 'Heat Shock (3.5h at 42°C)' - - -def test_calculated_chemical_treatment_display_title(testapp, drug_treatment): - assert drug_treatment['display_title'] == 'Drug treatment' - res = testapp.patch_json( - drug_treatment['@id'], - {'duration': 3.5, 'duration_units': 'hour'}) - assert res.json['@graph'][0]['display_title'] == 'Drug treatment (3.5h)' - res = testapp.patch_json( - drug_treatment['@id'], - {'concentration': 3.5, 'concentration_units': 'M'}) - assert res.json['@graph'][0]['display_title'] == 'Drug treatment (3.5 M, 3.5h)' - res = testapp.patch_json(drug_treatment['@id'], {'temperature': 3.5}) - assert res.json['@graph'][0]['display_title'] == 'Drug treatment (3.5 M, 3.5h at 3.5°C)' - - -def test_calculated_chemical_treatment_display_title_temp_only(testapp, drug_treatment): - assert drug_treatment['display_title'] == 'Drug treatment' - res = testapp.patch_json(drug_treatment['@id'], {'temperature': 37}) - assert res.json['@graph'][0]['display_title'] == 'Drug treatment (at 37°C)' - - -def test_calculated_chemical_treatment_washout_display_title(testapp, drug_treatment): - assert drug_treatment['display_title'] == 'Drug treatment' - res = testapp.patch_json( - drug_treatment['@id'], - {'duration': 3.5, 'duration_units': 'hour', 'concentration': 0, 'concentration_units': 'M'} - ) - assert res.json['@graph'][0]['display_title'] == 'Drug washout (3.5h)' - - -def test_calculated_biological_treatment_display_title(testapp, viral_treatment): - assert viral_treatment['display_title'] == 'Virus treatment' - res = testapp.patch_json(viral_treatment['@id'], { - 'duration': 3.5, 'duration_units': 'hour', - 'concentration': 2, 'concentration_units': 'MOI' - }) - assert res.json['@graph'][0]['display_title'] == 'Virus treatment (2 MOI, 3.5h)' - - -def test_calculated_rnai_treatment_display_title(testapp, rnai, gene_bio_feature): - assert rnai['display_title'] == 'shRNA treatment' - res = testapp.patch_json(rnai['@id'], {'target': [gene_bio_feature['@id']]}) - assert res.json['@graph'][0]['display_title'] == 'shRNA of RAD21 gene' diff --git a/src/encoded/tests/test_types_user.py b/src/encoded/tests/test_types_user.py deleted file mode 100644 index a1bf5579ce..0000000000 --- a/src/encoded/tests/test_types_user.py +++ /dev/null @@ -1,63 +0,0 @@ -import pytest - -from ..types.user import User - - -pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema] - - -@pytest.fixture -def user_w_lab(testapp, lab): - item = { - 'first_name': 'User', - 'last_name': 'McGee', - 'email': 'user@mcgee.org', - 'status': 'current', - 'lab': lab['@id'] - } - # User @@object view has keys omitted. - res = testapp.post_json('/user', item) - return testapp.get(res.location).json - - -def test_user_subscriptions(testapp, submitter, admin, user_w_lab, lab): - # submitter has submits_for but no lab - assert 'submits_for' in submitter - assert len(submitter['subscriptions']) == 1 - assert submitter['subscriptions'][0]['title'] == 'My submissions' - # subscription url contains user and lab uuids - assert submitter['uuid'] in submitter['subscriptions'][0]['url'] - # ensure that submissions are the same after another update - subscriptions_before_update = submitter['subscriptions'].copy() - submitter.update() - assert submitter['subscriptions'] == subscriptions_before_update - - # user_w_lab has lab but no submits_for - assert 'lab' in user_w_lab - assert len(user_w_lab['subscriptions']) == 1 - assert user_w_lab['subscriptions'][0]['title'] == 'Submissions for my lab' - # subscription url contains just lab uuid - assert user_w_lab['uuid'] not in user_w_lab['subscriptions'][0]['url'] - assert lab['uuid'] in user_w_lab['subscriptions'][0]['url'] - - # admin has no submits_for and no lab, thus should have no subscriptions - assert 'lab' not in admin - assert admin.get('submits_for', []) == [] - assert len(admin['subscriptions']) == 0 - - -def test_subscriptions_dont_duplicate_on_update(registry, lab): - # run _update on a new user with already formed subscriptions. - # Ensure they're not duplicated in the process - user_data = { - 'first_name': 'User', - 'last_name': 'McUser', - 'email': 'user@mcuser.org', - 'status': 'current', - 'lab': lab['uuid'], - 'subscriptions': [{'url': '?lab.uuid=' + lab['uuid'] + '&sort=-date_created', - 'title': 'Submissions for my lab'}] - } - test_user = User.create(registry, None, user_data) - assert len(test_user.properties['subscriptions']) == 1 - assert test_user.properties['subscriptions'] == user_data['subscriptions'] diff --git a/src/encoded/tests/test_types_workflow.py b/src/encoded/tests/test_types_workflow.py deleted file mode 100644 index dea6b9382e..0000000000 --- a/src/encoded/tests/test_types_workflow.py +++ /dev/null @@ -1,96 +0,0 @@ -import json -import pytest - -from dcicutils.ff_utils import patch_metadata, purge_metadata -from ..types.workflow import _wfoutput_bucket_for_env - - -pytestmark = [pytest.mark.setone, pytest.mark.working] - - -@pytest.fixture() -def input_json(workflow): - # use workflow that should always be in webdev - item = { - "app_name": "hi-c-processing-bam", - "parameters": { - }, - "output_bucket": "elasticbeanstalk-fourfront-webdev-wfoutput", - "tag": "0.2.5", - "config": { - "log_bucket": "tibanna-output" - }, - "workflow_uuid": "023bfb3e-9a8b-42b9-a9d4-216079526f68", - "input_files": [ - { - "object_key": [ - "4DNFI9H51IRL.bam", - "4DNFIP16HHGH.bam" - ], - "bucket_name": "elasticbeanstalk-fourfront-webdev-wfoutput", - "workflow_argument_name": "input_bams", - "uuid": [ - "68f38e45-8c66-41e2-99ab-b0b2fcd20d45", - "7420a20a-aa77-4ea8-b0b0-32a8e80c9bcb" - ] - }, - { - "object_key":"4DNFI823LSII.chrom.sizes", - "bucket_name": "elasticbeanstalk-fourfront-webprod-files", - "workflow_argument_name": "chromsize", - "uuid": "4a6d10ee-2edb-4402-a98f-0edb1d58f5e9" - } - ], - "metadata_only": True, - "output_files": [ - {"workflow_argument_name": "annotated_bam", - "uuid": "68f38e45-8c66-41e2-99ab-b0b2fcd20d45" - }, - {"workflow_argument_name": "filtered_pairs", - "uuid": "7054061b-e87d-4ca4-9693-d186348f5206" - } - ] - } - - return item - - -@pytest.fixture() -def workflow(testapp, software, award, lab): - # ensure we always use uuid that's on fourfront-webdev - workflow_uuid = '023bfb3e-9a8b-42b9-a9d4-216079526f68' - return workflow_uuid - - -@pytest.mark.broken -@pytest.mark.skip -def test_pseudo_run(testapp, input_json): - # this test can be problematic; uncomment the following line to disable it - # assert False - - res = testapp.post_json('/WorkflowRun/pseudo-run', input_json) - assert(res) - - # cleanup - output = json.loads(res.json['output']) - patch_metadata({'status':'deleted'}, output['ff_meta']['uuid'], ff_env='fourfront-webdev') - purge_metadata(output['ff_meta']['uuid'], ff_env='fourfront-webdev') - - -@pytest.mark.skip # no longer should be used -def test_workflow_for_env(): - - # These tests will want to become more abstract sometime, but for transition they test that - # we're getting obvious values we expect. -kmp 1-Apr-2020 - - # Fourfront prod environments - assert _wfoutput_bucket_for_env('data') == 'elasticbeanstalk-fourfront-webprod-wfoutput' - assert _wfoutput_bucket_for_env('staging') == 'elasticbeanstalk-fourfront-webprod-wfoutput' - assert _wfoutput_bucket_for_env('fourfront-production-blue') == 'elasticbeanstalk-fourfront-webprod-wfoutput' - assert _wfoutput_bucket_for_env('fourfront-production-green') == 'elasticbeanstalk-fourfront-webprod-wfoutput' - - # Other (non-prod) Fourfront environments - assert _wfoutput_bucket_for_env('fourfront-mastertest') == 'elasticbeanstalk-fourfront-mastertest-wfoutput' - assert _wfoutput_bucket_for_env('fourfront-webdev') == 'elasticbeanstalk-fourfront-webdev-wfoutput' - assert _wfoutput_bucket_for_env('fourfront-hotseat') == 'elasticbeanstalk-fourfront-hotseat-wfoutput' - diff --git a/src/encoded/tests/test_upgrade_antibody.py b/src/encoded/tests/test_upgrade_antibody.py deleted file mode 100644 index 092aa3cadc..0000000000 --- a/src/encoded/tests/test_upgrade_antibody.py +++ /dev/null @@ -1,25 +0,0 @@ -import pytest - -from snovault import UPGRADER - - -pytestmark = pytest.mark.working - - -@pytest.fixture -def antibody_1(targ_w_alias): - return{ - 'schema_version': '1', - 'antibody_target': targ_w_alias.get('aliases')[0] - } - - -def test_antibody_1_2( - registry, targ_w_alias, biofeat_w_alias, antibody_1): - ''' need to use registry to check items ''' - upgrader = registry[UPGRADER] - value = upgrader.upgrade('antibody', antibody_1, registry=registry, - current_version='1', target_version='2') - assert value['schema_version'] == '2' - assert value['antibody_target'][0] == biofeat_w_alias['uuid'] - assert targ_w_alias['aliases'][0] in value['notes'] diff --git a/src/encoded/tests/test_upgrade_biosample.py b/src/encoded/tests/test_upgrade_biosample.py deleted file mode 100644 index 54142a3f5d..0000000000 --- a/src/encoded/tests/test_upgrade_biosample.py +++ /dev/null @@ -1,21 +0,0 @@ -import pytest -pytestmark = [pytest.mark.setone, pytest.mark.working] - - -@pytest.fixture -def biosample_1(biosample_cc_wo_diff, GM12878_biosource, award, lab): - return{ - "schema_version": '1', - "award": award['@id'], - "lab": lab['@id'], - "cell_culture_details": biosample_cc_wo_diff['@id'], - "biosource": [GM12878_biosource['@id']] - } - - -def test_biosample_1_2( - app, biosample_1, biosample_cc_wo_diff): - migrator = app.registry['upgrader'] - value = migrator.upgrade('biosample', biosample_1, current_version='1', target_version='2') - assert value['schema_version'] == '2' - assert value['cell_culture_details'][0] == biosample_cc_wo_diff['@id'] diff --git a/src/encoded/tests/test_upgrade_biosample_cell_culture.py b/src/encoded/tests/test_upgrade_biosample_cell_culture.py deleted file mode 100644 index b9fc50c00e..0000000000 --- a/src/encoded/tests/test_upgrade_biosample_cell_culture.py +++ /dev/null @@ -1,21 +0,0 @@ -import pytest -pytestmark = [pytest.mark.setone, pytest.mark.working] - - -@pytest.fixture -def biosample_cell_culture_1(de_term, award, lab): - return{ - "schema_version": '1', - "award": award['@id'], - "lab": lab['@id'], - "differentiation_tissue": de_term['@id'] - } - - -def test_biosample_cell_culture_1_2( - app, biosample_cell_culture_1, de_term): - migrator = app.registry['upgrader'] - value = migrator.upgrade('biosample_cell_culture', biosample_cell_culture_1, current_version='1', target_version='2') - assert value['schema_version'] == '2' - assert 'differentiation_tissue' not in value - assert value['tissue'] == de_term['@id'] diff --git a/src/encoded/tests/test_upgrade_biosource.py b/src/encoded/tests/test_upgrade_biosource.py deleted file mode 100644 index 4670200e4a..0000000000 --- a/src/encoded/tests/test_upgrade_biosource.py +++ /dev/null @@ -1,45 +0,0 @@ -import pytest - -from snovault import UPGRADER - - -pytestmark = [pytest.mark.setone, pytest.mark.working] - - -@pytest.fixture -def biosource_1(award, lab): - return{ - "schema_version": '1', - "award": award['@id'], - "lab": lab['@id'], - "biosource_type": "immortalized cell line", - "cell_line": "GM12878", - "cell_line_termid": "EFO:0000001" - } - - -@pytest.fixture -def biosource_2(biosource_1): - item = biosource_1.copy() - item['cell_line'] = 'blah' - return item - - -def test_biosource_convert_cell_line_to_link_to_ontology_term( - registry, biosource_1, gm12878_oterm): - upgrader = registry[UPGRADER] - value = upgrader.upgrade('biosource', biosource_1, registry=registry, - current_version='1', target_version='2') - assert value['schema_version'] == '2' - assert value['cell_line'] == gm12878_oterm['uuid'] - assert 'cell_line_termid' not in value - - -def test_biosource_convert_cell_line_w_no_ontology_term( - registry, biosource_2): - upgrader = registry[UPGRADER] - value = upgrader.upgrade('biosource', biosource_2, registry=registry, - current_version='1', target_version='2') - assert value['schema_version'] == '2' - assert 'cell_line' not in value - assert 'cell_line_termid' not in value diff --git a/src/encoded/tests/test_upgrade_data_release_update.py b/src/encoded/tests/test_upgrade_data_release_update.py deleted file mode 100644 index 866f0c1443..0000000000 --- a/src/encoded/tests/test_upgrade_data_release_update.py +++ /dev/null @@ -1,90 +0,0 @@ -import pytest -pytestmark = [pytest.mark.setone, pytest.mark.working] - - -@pytest.fixture -def data_release_update_1(award, lab): - return{ - "schema_version": '1', - "award": award['@id'], - "lab": lab['@id'], - "summary": "Upgrader test.", - "update_tag": "UPGRADERTEST", - "submitted_by": "4dndcic@gmail.com", - "severity": 1, - "is_internal": False, - "parameters": [ - "tags=4DN Joint Analysis 2018" - ], - "comments": "Test upgrader", - "foursight_uuid": "2018-02-12T16:54:38.526810+00:00", - "end_date": "2018-02-14", - "start_date": "2018-02-13", - "update_items": [ - { - "primary_id": "431106bc-8535-4448-903e-854af460b112", - "secondary_id": "431106bc-8535-4448-903e-854af460b112" - } - ] - } - - -@pytest.fixture -def data_release_update_2(award, lab): - return{ - "schema_version": '2', - "award": award['@id'], - "lab": lab['@id'], - "summary": "Upgrader test.", - "update_tag": "UPGRADERTEST", - "submitted_by": "4dndcic@gmail.com", - "severity": 1, - "is_internal": False, - "parameters": [ - "tags=4DN Joint Analysis 2018" - ], - "comments": "Test upgrader 2 to 3", - "foursight_uuid": "2018-02-12T16:54:38.526810+00:00", - "end_date": "2018-02-14", - "start_date": "2018-02-13", - "update_items": [ - { - "primary_id": "431106bc-8535-4448-903e-854af460b112", - "secondary_ids": ["431106bc-8535-4448-903e-854af460b112"] - } - ] - } - - -def test_data_release_updates_secondary_id_to_secondary_ids( - app, data_release_update_1): - migrator = app.registry['upgrader'] - value = migrator.upgrade('data_release_update', data_release_update_1, current_version='1', target_version='2') - assert value['schema_version'] == '2' - update_items = value['update_items'] - assert len(update_items) == 1 - assert 'primary_id' in update_items[0] - assert 'secondary_ids' in update_items[0] - assert 'secondary_id' not in update_items[0] - assert isinstance(update_items[0]['secondary_ids'], list) - assert len(update_items[0]['secondary_ids']) == 1 - - -def test_data_release_updates_secondary_ids_to_objects( - app, data_release_update_2): - """ - Needed because secondary IDs got the 'additional_info' field and are now - an array of objects - """ - migrator = app.registry['upgrader'] - value = migrator.upgrade('data_release_update', data_release_update_2, current_version='2', target_version='3') - assert value['schema_version'] == '3' - update_items = value['update_items'] - assert len(update_items) == 1 - assert 'primary_id' in update_items[0] - assert 'secondary_ids' in update_items[0] - assert isinstance(update_items[0]['secondary_ids'], list) - assert len(update_items[0]['secondary_ids']) == 1 - assert isinstance(update_items[0]['secondary_ids'][0], dict) - assert 'secondary_id' in update_items[0]['secondary_ids'][0] - assert 'additional_info' in update_items[0]['secondary_ids'][0] diff --git a/src/encoded/tests/test_upgrade_experiment.py b/src/encoded/tests/test_upgrade_experiment.py deleted file mode 100644 index dcaa5cc2a4..0000000000 --- a/src/encoded/tests/test_upgrade_experiment.py +++ /dev/null @@ -1,259 +0,0 @@ -import pytest - -from snovault import UPGRADER - - -pytestmark = [pytest.mark.setone, pytest.mark.working] - - -@pytest.fixture -def experiment_repliseq_1(award, lab): - return{ - "schema_version": '1', - "award": award['@id'], - "lab": lab['@id'], - "experiment_type": "repliseq", - "total_fractions_in_exp": 2 - } - - -@pytest.fixture -def experiment_chiapet_1(award, lab): - return{ - "schema_version": '1', - "award": award['@id'], - "lab": lab['@id'], - "experiment_type": "CHIA-pet", - "antibody": "ENCAB1234567" - } - - -@pytest.fixture -def experiment_seq_1(award, lab): - return{ - "schema_version": '1', - "award": award['@id'], - "lab": lab['@id'], - "experiment_type": "CHIP-seq", - "antibody": "ENCAB1234567" - } - - -@pytest.fixture -def experiment_repliseq_2(award, lab): - return{ - "schema_version": '2', - "award": award['@id'], - "lab": lab['@id'], - "experiment_type": "Repli-seq", - "antibody": "ENCAB1234567", - "antibody_lot_id": "1234", - "total_fractions_in_exp": 16 - } - - -@pytest.fixture -def experiment_damid_1(award, lab): - return{ - "schema_version": '1', - "award": award['@id'], - "lab": lab['@id'], - "experiment_type": "DAM-ID seq", - "index_pcr_cycles": 5, - "fusion": 'LaminB' - } - - -@pytest.fixture -def experiment_mic_1(award, lab): - return{ - "schema_version": '1', - "award": award['@id'], - "lab": lab['@id'], - "experiment_type": "DNA-FiSH", - } - - -@pytest.fixture -def experiment_n(targ_w_alias): - return{ - 'targeted_factor': targ_w_alias.get('aliases')[0] - } - - -@pytest.fixture -def experiment_dilution_hic_1(award, lab): - return { - "schema_version": '1', - "award": award['@id'], - "lab": lab['@id'], - "experiment_type": "dilution Hi-C", - } - - -@pytest.fixture -def experiment_capc_w2targs(targ_w_alias, targ_gr_w_alias, file_fastq): - return{ - 'schema_version': '1', - 'targeted_regions': [ - {'target': targ_w_alias.get('aliases')[0], - 'oligo_file': file_fastq['@id']}, - {'target': targ_gr_w_alias.get('aliases')[0]} - ] - } - - -def test_experiment_convert_targeted_factor_to_biofeat( - registry, targ_w_alias, biofeat_w_alias, experiment_n): - ''' need to use registry to check items ''' - upgrader = registry[UPGRADER] - upgrade_info = [ - ('experiment_seq', '4', '5'), - ('experiment_chiapet', '4', '5'), - ('experiment_damid', '3', '4'), - ('experiment_tsaseq', '2', '3') - ] - for upg in upgrade_info: - test_expt = experiment_n.copy() - test_expt['schema_version'] = upg[1] - value = upgrader.upgrade(upg[0], test_expt, registry=registry, - current_version=upg[1], target_version=upg[2]) - assert value['schema_version'] == upg[2] - assert value['targeted_factor'][0] == biofeat_w_alias['uuid'] - assert targ_w_alias['aliases'][0] in value['notes'] - - -def test_experiment_capture_c_target_to_biofeat( - registry, targ_w_alias, biofeat_w_alias, targ_gr_w_alias, - gr_biofeat_w_alias, experiment_capc_w2targs -): - ''' need to use registry to check items ''' - upgrader = registry[UPGRADER] - value = upgrader.upgrade('experiment_capture_c', experiment_capc_w2targs, registry=registry, - current_version='2', target_version='3') - assert value['schema_version'] == '3' - aliases2chk = [targ_w_alias.get('aliases')[0], targ_gr_w_alias.get('aliases')[0]] - uuids2chk = [biofeat_w_alias['uuid'], gr_biofeat_w_alias['uuid']] - trs = value['targeted_regions'] - for a2c in aliases2chk: - assert a2c in value.get('notes') - for tr in trs: - for t in tr.get('target'): - assert t in uuids2chk - - -@pytest.fixture -def experiment_hic_new_type_1(award, lab): - return { - "schema_version": '1', - "award": award['@id'], - "lab": lab['@id'], - "experiment_type": "special new Hi-C", - } - - -def test_experiment_damid_upgrade_pcr_cycles(app, experiment_damid_1): - migrator = app.registry['upgrader'] - value = migrator.upgrade('experiment_damid', experiment_damid_1, current_version='1', target_version='2') - assert value['schema_version'] == '2' - assert value['pcr_cycles'] == 5 - assert 'index_pcr_cycles' not in value - assert 'fusion' not in value - assert 'LaminB' in value['notes'] - - -def test_experiment_damid_update_type(registry, experiment_damid_1, exp_types): - upgrader = registry['upgrader'] - value = upgrader.upgrade('experiment_damid', experiment_damid_1, registry=registry, - current_version='1', target_version='3') - assert value['schema_version'] == '3' - assert value['experiment_type'] == exp_types['dam']['uuid'] - - -def test_experiment_repliseq_2stage_update_type(registry, experiment_repliseq_1, exp_types): - upgrader = registry['upgrader'] - value = upgrader.upgrade('experiment_repliseq', experiment_repliseq_1, registry=registry, - current_version='1', target_version='4') - assert value['schema_version'] == '4' - assert value['experiment_type'] == exp_types['repliseq']['uuid'] - - -def test_experiment_repliseq_multi_update_type(registry, experiment_repliseq_2, exp_types): - migrator = registry['upgrader'] - value = migrator.upgrade('experiment_repliseq', experiment_repliseq_2, registry=registry, - current_version='2', target_version='4') - assert value['schema_version'] == '4' - assert value['experiment_type'] == exp_types['multi']['uuid'] - - -def test_experiment_chiapet_update_type(registry, experiment_chiapet_1, exp_types): - migrator = registry['upgrader'] - value = migrator.upgrade('experiment_chiapet', experiment_chiapet_1, registry=registry, - current_version='1', target_version='4') - assert value['schema_version'] == '4' - assert value['experiment_type'] == exp_types['chia']['uuid'] - - -def test_experiment_seq_update_type(registry, experiment_seq_1, exp_types): - upgrader = registry['upgrader'] - value = upgrader.upgrade('experiment_seq', experiment_seq_1, registry=registry, - current_version='1', target_version='4') - assert value['schema_version'] == '4' - assert value['experiment_type'] == exp_types['chipseq']['uuid'] - - -def test_experiment_mic_update_type(registry, experiment_mic_1, exp_types): - migrator = registry['upgrader'] - value = migrator.upgrade('experiment_mic', experiment_mic_1, registry=registry, - current_version='1', target_version='3') - assert value['schema_version'] == '3' - assert value['experiment_type'] == exp_types['fish']['uuid'] - - -def test_dilution_hic_update_type(registry, experiment_dilution_hic_1, exp_types): - ''' there is a captilization difference between string type and item title''' - migrator = registry['upgrader'] - value = migrator.upgrade('experiment_hi_c', experiment_dilution_hic_1, registry=registry, - current_version='1', target_version='2') - assert value['schema_version'] == '2' - assert value['experiment_type'] == exp_types['dilution']['uuid'] - - -def test_expt_w_unknown_experiment_type(registry, experiment_hic_new_type_1, exp_types): - migrator = registry['upgrader'] - value = migrator.upgrade('experiment_hi_c', experiment_hic_new_type_1, registry=registry, - current_version='1', target_version='2') - assert value['schema_version'] == '2' - assert 'special new Hi-C ITEM NOT FOUND' in value['notes'] - assert value['experiment_type'] is None - - -def test_experiment_repliseq_update_antibody(app, experiment_repliseq_2): - ab = experiment_repliseq_2['antibody'] - migrator = app.registry['upgrader'] - value = migrator.upgrade('experiment_repliseq', experiment_repliseq_2, - current_version='2', target_version='3') - assert value['schema_version'] == '3' - assert not value.get('antibody') - assert ab in value['notes'] - assert value['antibody_lot_id'] == "1234" - - -def test_experiment_chiapet_update_antibody(app, experiment_chiapet_1): - ab = experiment_chiapet_1['antibody'] - migrator = app.registry['upgrader'] - value = migrator.upgrade('experiment_chiapet', experiment_chiapet_1, - current_version='1', target_version='2') - assert value['schema_version'] == '2' - assert not value.get('antibody') - assert ab in value['notes'] - - -def test_experiment_seq_update_antibody(app, experiment_seq_1): - ab = experiment_seq_1['antibody'] - migrator = app.registry['upgrader'] - value = migrator.upgrade('experiment_seq', experiment_seq_1, - current_version='1', target_version='2') - assert value['schema_version'] == '2' - assert not value.get('antibody') - assert ab in value['notes'] diff --git a/src/encoded/tests/test_upgrade_experiment_set.py b/src/encoded/tests/test_upgrade_experiment_set.py deleted file mode 100644 index b75820ee02..0000000000 --- a/src/encoded/tests/test_upgrade_experiment_set.py +++ /dev/null @@ -1,42 +0,0 @@ -import pytest - - -pytestmark = [pytest.mark.setone, pytest.mark.working] - - -@pytest.fixture -def experiment_set_1(award, lab): - return{ - "schema_version": '1', - "award": award['@id'], - "lab": lab['@id'], - "date_released": "2017-01-01" - } - - -@pytest.fixture -def experiment_set_replicate_1(award, lab): - return{ - "schema_version": '1', - "award": award['@id'], - "lab": lab['@id'], - "date_released": "2017-01-01" - } - - -def test_experiment_set_convert_date_released_to_public_release( - app, experiment_set_1): - migrator = app.registry['upgrader'] - value = migrator.upgrade('experiment_set', experiment_set_1, current_version='1', target_version='2') - assert value['schema_version'] == '2' - assert 'date_released' not in value - assert value['public_release'] == "2017-01-01" - - -def test_experiment_set_replicate_convert_date_released_to_public_release( - app, experiment_set_replicate_1): - migrator = app.registry['upgrader'] - value = migrator.upgrade('experiment_set_replicate', experiment_set_replicate_1, current_version='1', target_version='2') - assert value['schema_version'] == '2' - assert 'date_released' not in value - assert value['public_release'] == "2017-01-01" diff --git a/src/encoded/tests/test_upgrade_file.py b/src/encoded/tests/test_upgrade_file.py deleted file mode 100644 index 79c2d0d673..0000000000 --- a/src/encoded/tests/test_upgrade_file.py +++ /dev/null @@ -1,141 +0,0 @@ -import pytest - -from snovault import UPGRADER - - -pytestmark = [pytest.mark.setone, pytest.mark.working] - - -@pytest.fixture -def file_1(award, lab): - return{ - "schema_version": '1', - "award": award['@id'], - "lab": lab['@id'], - } - - -@pytest.fixture -def file_w_extra_1(award, lab): - return{ - "schema_version": '1', - "award": award['@id'], - "lab": lab['@id'], - "file_format": "pairs", - "extra_files": [{"file_format": "pairs_px2"}] - } - - -@pytest.fixture -def file_wo_underscore_fields(): - return { - "dataset_type": "in situ Hi-C", - "assay_info": "Dpn II", - "replicate_identifiers": ["Biorep 1, Techrep 1"], - "biosource_name": "H1-hESC", - "experiment_bucket": "processed files", - "project_lab": "Some Lab" - } - - -@pytest.fixture -def file_w_underscore_fields(file_wo_underscore_fields): - fieldmap = { - "dataset_type": "override_experiment_type", - "assay_info": "override_assay_info", - "replicate_identifiers": "override_replicate_info", - "biosource_name": "override_biosource_name", - "experiment_bucket": "override_experiment_bucket", - "project_lab": "override_lab_name" - } - mod = {v: file_wo_underscore_fields.get(f) for f, v in fieldmap.items()} - mod['override_replicate_info'] = mod['override_replicate_info'][0] - return mod - - -def test_upgrade_vistrack_meta_one_repid(registry, file_wo_underscore_fields, file_w_underscore_fields): - file_wo_underscore_fields['schema_version'] = '1' - file_w_underscore_fields['schema_version'] = '2' - del file_wo_underscore_fields['experiment_bucket'] - del file_w_underscore_fields['override_experiment_bucket'] - upgrader = registry[UPGRADER] - value = upgrader.upgrade('file_vistrack', file_wo_underscore_fields, registry=registry, - current_version='1', target_version='2') - for f, v in file_w_underscore_fields.items(): - assert f in value - assert value.get(f) == v - - -def test_upgrade_vistrack_meta_multi_repids(registry, file_wo_underscore_fields, file_w_underscore_fields): - file_wo_underscore_fields['schema_version'] = '1' - file_w_underscore_fields['schema_version'] = '2' - file_wo_underscore_fields['replicate_identifiers'] = ['Biorep 1, Techrep 1', 'Biorep 2, Techrep 1'] - file_w_underscore_fields['override_replicate_info'] = 'merged replicates' - upgrader = registry[UPGRADER] - value = upgrader.upgrade('file_vistrack', file_wo_underscore_fields, registry=registry, - current_version='1', target_version='2') - for f, v in file_w_underscore_fields.items(): - assert f in value - assert value.get(f) == v - - -def test_upgrade_file_processed_meta_multi_repids(registry, file_wo_underscore_fields, file_w_underscore_fields): - file_wo_underscore_fields['schema_version'] = '2' - file_w_underscore_fields['schema_version'] = '3' - file_wo_underscore_fields['replicate_identifiers'] = ['Biorep 1, Techrep 1', 'Biorep 2, Techrep 1'] - file_w_underscore_fields['override_replicate_info'] = 'merged replicates' - upgrader = registry[UPGRADER] - value = upgrader.upgrade('file_processed', file_wo_underscore_fields, registry=registry, - current_version='2', target_version='3') - for f, v in file_w_underscore_fields.items(): - assert f in value - assert value.get(f) == v - - -def test_upgrade_file_format_known( - registry, file_1, file_formats): - type2format = { - 'file_fastq': 'fastq', - 'file_processed': 'pairs', - 'file_reference': 'chromsizes', - 'file_microscopy': 'tiff', - 'file_calibration': 'zip' - } - upgrader = registry[UPGRADER] - for ftype, ff in type2format.items(): - file_1['file_format'] = ff - value = upgrader.upgrade(ftype, file_1, registry=registry, - current_version='1', target_version='2') - assert value['schema_version'] == '2' - assert value['file_format'] == file_formats[ff].get('uuid') - - -def test_upgrade_file_format_w_unknown_format( - registry, file_1, file_formats): - upgrader = registry[UPGRADER] - file_1['file_format'] = 'hic' - value = upgrader.upgrade('file_processed', file_1, registry=registry, - current_version='1', target_version='2') - assert value['schema_version'] == '2' - assert value['file_format'] == file_formats['other'].get('uuid') - assert ' FILE FORMAT: hic' in value['notes'] - - -def test_upgrade_extrafile_formats_good_formats( - registry, file_w_extra_1, file_formats): - upgrader = registry[UPGRADER] - value = upgrader.upgrade('file_processed', file_w_extra_1, registry=registry, - current_version='1', target_version='2') - assert value['schema_version'] == '2' - assert value['extra_files'][0]['file_format'] == file_formats['pairs_px2'].get('uuid') - - -def test_upgrade_extrafile_format_w_unknown_format( - registry, file_w_extra_1, file_formats): - upgrader = registry[UPGRADER] - file_w_extra_1['extra_files'] = [{'file_format': 'hic'}] - value = upgrader.upgrade('file_processed', file_w_extra_1, registry=registry, - current_version='1', target_version='2') - assert value['schema_version'] == '2' - assert value['extra_files'][0]['file_format'] == file_formats['other'].get('uuid') - assert ' EXTRA FILE FORMAT: 0-hic' in value['notes'] diff --git a/src/encoded/tests/test_upgrade_imaging_path.py b/src/encoded/tests/test_upgrade_imaging_path.py deleted file mode 100644 index a87efd101f..0000000000 --- a/src/encoded/tests/test_upgrade_imaging_path.py +++ /dev/null @@ -1,38 +0,0 @@ -import pytest - -from snovault import UPGRADER - - -pytestmark = pytest.mark.working - - -@pytest.fixture -def imaging_path_1_w_3targs(targ_w_alias, targ_gr_w_alias, targ_agr_w_alias): - ''' one target will not have a corresponding biofeature ''' - return{ - 'target': [ - targ_w_alias.get('aliases')[0], - targ_gr_w_alias.get('aliases')[0], - targ_agr_w_alias.get('aliases')[0] - ] - } - - -def test_imaging_path_1_2( - registry, targ_w_alias, biofeat_w_alias, targ_gr_w_alias, - gr_biofeat_w_alias, targ_agr_w_alias, imaging_path_1_w_3targs -): - ''' need to use registry to check items ''' - upgrader = registry[UPGRADER] - value = upgrader.upgrade('imaging_path', imaging_path_1_w_3targs, registry=registry, - current_version='1', target_version='2') - assert value['schema_version'] == '2' - aliases2chk = [targ_w_alias.get('aliases')[0], targ_gr_w_alias.get('aliases')[0], targ_agr_w_alias.get('aliases')[0]] - uuids2chk = [biofeat_w_alias['uuid'], gr_biofeat_w_alias['uuid']] - trs = value['target'] - for a2c in aliases2chk: - assert a2c in value.get('notes') - for tr in trs: - assert tr in uuids2chk - # check that the one target without the corresponding biofeature has special note - assert 'UPDATE NEEDED {};'.format(targ_agr_w_alias.get('aliases')[0]) in value['notes'] diff --git a/src/encoded/tests/test_upgrade_modification.py b/src/encoded/tests/test_upgrade_modification.py deleted file mode 100644 index 22ff84a4a7..0000000000 --- a/src/encoded/tests/test_upgrade_modification.py +++ /dev/null @@ -1,25 +0,0 @@ -import pytest - -from snovault import UPGRADER - - -pytestmark = pytest.mark.working - - -@pytest.fixture -def modification_1(targ_w_alias): - return{ - 'schema_version': '1', - 'target_of_mod': targ_w_alias.get('aliases')[0] - } - - -def test_modification_1_2( - registry, targ_w_alias, biofeat_w_alias, modification_1): - ''' need to use registry to check items ''' - upgrader = registry[UPGRADER] - value = upgrader.upgrade('modification', modification_1, registry=registry, - current_version='1', target_version='2') - assert value['schema_version'] == '2' - assert value['target_of_mod'][0] == biofeat_w_alias['uuid'] - assert targ_w_alias['aliases'][0] in value['notes'] diff --git a/src/encoded/tests/test_upgrade_ontology_term.py b/src/encoded/tests/test_upgrade_ontology_term.py deleted file mode 100644 index b85f4419b5..0000000000 --- a/src/encoded/tests/test_upgrade_ontology_term.py +++ /dev/null @@ -1,22 +0,0 @@ -import pytest - - -pytestmark = [pytest.mark.setone, pytest.mark.working] - - -@pytest.fixture -def ontology_term_1(so_ont, award, lab): - return{ - "schema_version": '1', - "term_id": 'SO:0001111', - "term_name": 'so_term', - "source_ontology": so_ont['@id'] - } - - -def test_ontology_term_1_2( - app, ontology_term_1, so_ont): - migrator = app.registry['upgrader'] - value = migrator.upgrade('ontology_term', ontology_term_1, current_version='1', target_version='2') - assert value['schema_version'] == '2' - assert value['source_ontologies'][0] == so_ont['@id'] diff --git a/src/encoded/tests/test_upgrade_publication.py b/src/encoded/tests/test_upgrade_publication.py deleted file mode 100644 index 87e7e605a4..0000000000 --- a/src/encoded/tests/test_upgrade_publication.py +++ /dev/null @@ -1,24 +0,0 @@ -import pytest - - -pytestmark = [pytest.mark.setone, pytest.mark.working] - - -@pytest.fixture -def publication_1(award, lab): - return{ - "schema_version": '1', - "award": award['@id'], - "lab": lab['@id'], - "authors": "Black JC, White AL, Red A" - } - - -def test_publication_convert_author_string_to_list( - app, publication_1): - migrator = app.registry['upgrader'] - value = migrator.upgrade('publication', publication_1, current_version='1', target_version='2') - assert value['schema_version'] == '2' - authors = ["Black JC", "White AL", "Red A"] - for author in authors: - assert author in value['authors'] diff --git a/src/encoded/tests/test_upgrade_target.py b/src/encoded/tests/test_upgrade_target.py deleted file mode 100644 index 48d13bfd2b..0000000000 --- a/src/encoded/tests/test_upgrade_target.py +++ /dev/null @@ -1,22 +0,0 @@ -import pytest - - -pytestmark = [pytest.mark.setone, pytest.mark.working] - - -@pytest.fixture -def target_1(basic_genomic_region, award, lab): - return{ - "schema_version": '1', - "award": award['@id'], - "lab": lab['@id'], - "targeted_region": basic_genomic_region['@id'] - } - - -def test_target_convert_targeted_region_to_targeted_genome_regions( - app, target_1, basic_genomic_region): - migrator = app.registry['upgrader'] - value = migrator.upgrade('target', target_1, current_version='1', target_version='2') - assert value['schema_version'] == '2' - assert value['targeted_genome_regions'][0] == basic_genomic_region['@id'] diff --git a/src/encoded/tests/test_upgrade_tracking_item.py b/src/encoded/tests/test_upgrade_tracking_item.py deleted file mode 100644 index 7d4477dfed..0000000000 --- a/src/encoded/tests/test_upgrade_tracking_item.py +++ /dev/null @@ -1,37 +0,0 @@ -import pytest - - -pytestmark = [pytest.mark.setone, pytest.mark.working] - - -@pytest.fixture -def tracking_item_1(): - return { - "tracking_type": "download_tracking", - "uuid": "b068f9d3-c026-4ef6-8769-104d745b9ca0", - "download_tracking": { - "range_query": True, - "experiment_type": "in situ Hi-C", - "remote_ip": "86.154.184.239", - "request_path": "/files-processed/4DNFIBBKG9KD/@@download/4DNFIBBKG9KD.hic", - "user_uuid": "anonymous", - "filename": "4DNFIBBKG9KD.hic", - "file_format": "hic", - "geo_country": "GB", - "geo_city": "Summertown, Oxfordshire", - "user_agent": - "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/72.0.3626.121" - " Chrome/72.0.3626.121 Safari/537.36", - "is_visualization": False - }, - "status": "released", - "schema_version": "1" - } - - -def test_tracking_item_delete_is_visualization(app, tracking_item_1): - migrator = app.registry['upgrader'] - value = migrator.upgrade('tracking_item', tracking_item_1, current_version='1', target_version='2') - dt = value['download_tracking'] - assert 'range_query' in dt - assert 'is_visualization' not in dt diff --git a/src/encoded/tests/test_upgrade_treatment.py b/src/encoded/tests/test_upgrade_treatment.py deleted file mode 100644 index ef992fbc78..0000000000 --- a/src/encoded/tests/test_upgrade_treatment.py +++ /dev/null @@ -1,24 +0,0 @@ -import pytest - -from snovault import UPGRADER - - -pytestmark = pytest.mark.working - - -@pytest.fixture -def treatment_1(targ_w_alias): - return{ - 'schema_version': '1', - 'target': targ_w_alias.get('aliases')[0] - } - - -def test_treatment_rna1_1_2(registry, targ_w_alias, biofeat_w_alias, treatment_1): - """need to use registry to check items """ - upgrader = registry[UPGRADER] - value = upgrader.upgrade('treatment_rnai', treatment_1, registry=registry, - current_version='1', target_version='2') - assert value['schema_version'] == '2' - assert value['target'][0] == biofeat_w_alias['uuid'] - assert targ_w_alias['aliases'][0] in value['notes'] diff --git a/src/encoded/tests/test_upgrade_workflow.py b/src/encoded/tests/test_upgrade_workflow.py deleted file mode 100644 index 8d92a58a2a..0000000000 --- a/src/encoded/tests/test_upgrade_workflow.py +++ /dev/null @@ -1,670 +0,0 @@ -import pytest - -from snovault import UPGRADER - - -pytestmark = [pytest.mark.setone, pytest.mark.working] - - -@pytest.fixture -def workflow_2(software, award, lab): - return{ - "schema_version": '2', - "award": award['@id'], - "lab": lab['@id'], - "title": "some workflow", - "name": "some workflow", - "workflow_type": "Other", - "steps": [ - { - "meta": { - "software_used": software['@id'] - } - } - ] - } - - -def test_workflow_convert_software_used_to_list_2( - app, workflow_2, software): - migrator = app.registry['upgrader'] - value = migrator.upgrade('workflow', workflow_2, current_version='2', target_version='3') - assert value['schema_version'] == '3' - assert value['steps'][0]['meta']['software_used'] == [software['@id']] - - -@pytest.fixture -def workflow_3(software, award, lab): - return { - "arguments": [ - { - "argument_format": "chromsizes", - "argument_type": "Input file", - "workflow_argument_name": "chrsizes" - }, - { - "argument_format": "pairs", - "argument_type": "Input file", - "workflow_argument_name": "input_pairs" - }, - { - "argument_format": "pairs", - "argument_type": "Input file", - "workflow_argument_name": "input_pairs_index" - }, - { - "argument_type": "parameter", - "workflow_argument_name": "ncores" - }, - { - "argument_type": "parameter", - "workflow_argument_name": "binsize" - }, - { - "argument_type": "parameter", - "workflow_argument_name": "min_res" - }, - { - "argument_type": "parameter", - "workflow_argument_name": "normalization_type" - }, - { - "argument_format": "pairs_px2", - "argument_type": "Output processed file", - "workflow_argument_name": "output_pairs_index" - }, - { - "argument_format": "pairs", - "argument_type": "Output processed file", - "workflow_argument_name": "output_pairs" - }, - { - "argument_format": "cool", - "argument_type": "Output processed file", - "workflow_argument_name": "out_cool" - }, - { - "argument_format": "hic", - "argument_type": "Output processed file", - "workflow_argument_name": "output_hic" - }, - { - "argument_format": "mcool", - "argument_type": "Output processed file", - "workflow_argument_name": "out_mcool" - } - ], - "category": "merging + matrix generation", - "data_types": [ - "Hi-C" - ], - "description": "Hi-C processing part B revision 15", - "award": award['@id'], - "lab": lab['@id'], - "name": "hi-c-processing-partb/15", - "schema_version": "3", - "steps": [ - { - "inputs": [ - { - "meta": { - "argument_format": "pairs", - "argument_type": "Input file" - }, - "name": "input_pairs", - "source": [ - { - "name": "input_pairs" - } - ] - }, - { - "meta": { - "argument_type": "Input file" - }, - "name": "input_pairs_index", - "source": [ - { - "name": "input_pairs_index" - } - ] - } - ], - "meta": { - "analysis_step_types": [ - "file merging" - ], - "software_used": [ - "/software/02d636b9-d82d-4da9-950c-2ca994a23547/" - ] - }, - "name": "merge_pairs", - "outputs": [ - { - "meta": { - "argument_format": "pairs_px2", - "argument_type": "Output processed file" - }, - "name": "output_pairs_index", - "target": [ - { - "name": "output_pairs_index" - }, - { - "name": "pairs_index", - "step": "cooler" - } - ] - }, - { - "meta": { - "argument_format": "pairs", - "argument_type": "Output processed file" - }, - "name": "output_pairs", - "target": [ - { - "name": "output_pairs" - }, - { - "name": "input_pairs", - "step": "pairs2hic" - }, - { - "name": "pairs", - "step": "cooler" - } - ] - } - ] - }, - { - "inputs": [ - { - "meta": { - "argument_type": "parameter" - }, - "name": "ncores", - "source": [ - { - "name": "ncores" - } - ] - }, - { - "meta": { - "argument_type": "parameter" - }, - "name": "binsize", - "source": [ - { - "name": "binsize" - } - ] - }, - { - "meta": {}, - "name": "pairs_index", - "source": [ - { - "name": "output_pairs_index", - "step": "merge_pairs" - } - ] - }, - { - "meta": {}, - "name": "pairs", - "source": [ - { - "name": "output_pairs", - "step": "merge_pairs" - } - ] - } - ], - "meta": { - "analysis_step_types": [ - "aggregation" - ], - "software_used": [ - "/software/02d636b9-d8dd-4da9-950c-2ca994b23555/" - ] - }, - "name": "cooler", - "outputs": [ - { - "meta": { - "argument_format": "cool", - "argument_type": "Output processed file" - }, - "name": "out_cool", - "target": [ - { - "name": "out_cool" - } - ] - } - ] - }, - { - "inputs": [ - { - "meta": { - "argument_format": "chromsizes", - "argument_type": "Input file" - }, - "name": "chrsizes", - "source": [ - { - "name": "chrsizes" - } - ] - }, - { - "meta": { - "argument_type": "parameter" - }, - "name": "min_res", - "source": [ - { - "name": "min_res" - } - ] - }, - { - "meta": {}, - "name": "input_pairs", - "source": [ - { - "name": "output_pairs", - "step": "merge_pairs" - } - ] - } - ], - "meta": { - "analysis_step_types": [ - "aggregation", - "normalization" - ], - "software_used": [ - "/software/02d636b9-d8dd-4da9-950c-2ca994b23576/" - ] - }, - "name": "pairs2hic", - "outputs": [ - { - "meta": { - "argument_format": "hic", - "argument_type": "Output processed file" - }, - "name": "output_hic", - "target": [ - { - "name": "output_hic" - }, - { - "name": "input_hic", - "step": "hic2mcool" - } - ] - } - ] - }, - { - "inputs": [ - { - "meta": { - "argument_type": "parameter" - }, - "name": "normalization_type", - "source": [ - { - "name": "normalization_type" - } - ] - }, - { - "meta": {}, - "name": "input_hic", - "source": [ - { - "name": "output_hic", - "step": "pairs2hic" - } - ] - } - ], - "meta": { - "analysis_step_types": [ - "file format conversion" - ], - "software_used": [ - "/software/02d636b9-d8dd-4da9-950c-2ca994b23555/" - ] - }, - "name": "hic2mcool", - "outputs": [ - { - "meta": { - "argument_format": "mcool", - "argument_type": "Output processed file" - }, - "name": "out_mcool", - "target": [ - { - "name": "out_mcool" - } - ] - } - ] - } - ], - "title": "Hi-C processing part B revision 15", - "workflow_type": "Hi-C data analysis" - } - - -def test_workflow_upgrade_3_4( - app, workflow_3, software): - migrator = app.registry['upgrader'] - value = migrator.upgrade('workflow', workflow_3, current_version='3', target_version='4') - assert value['schema_version'] == '4' - assert value['steps'][0]['inputs'][0]['source'][0].get('type') is None - assert value['steps'][0]['outputs'][0]['target'][0].get('type') is None - - assert value['steps'][0]['inputs'][0]['meta'].get('file_format') == 'pairs' - assert value['steps'][0]['inputs'][1]['meta'].get('file_format') == 'pairs' - assert value['steps'][0]['inputs'][0]['meta'].get('cardinality') == 'array' # 'input_pairs' arg of 'merge_pairs' should get this auto-assigned - - assert value['steps'][0]['inputs'][0]['meta'].get('type') == 'data file' - assert value['steps'][0]['inputs'][1]['meta'].get('type') == 'reference file' # 'input_pairs_index' has 'index' in name ==> 'reference file' upgrade. - - assert value['steps'][0]['inputs'][0]['meta'].get('global') is True - assert value['steps'][0]['inputs'][1]['meta'].get('global') is True - - assert value['steps'][0]['outputs'][0]['meta'].get('file_format') == 'pairs_px2' - assert value['steps'][0]['outputs'][1]['meta'].get('file_format') == 'pairs' - - assert value['steps'][0]['outputs'][0]['meta'].get('global') is True - assert value['steps'][0]['outputs'][1]['meta'].get('global') is True - - assert value['steps'][0]['outputs'][0]['meta'].get('type') == 'data file' # We don't transform outputs to reference files - assert value['steps'][0]['outputs'][1]['meta'].get('type') == 'data file' - - -@pytest.fixture -def workflow_4(): - return { - 'lab': '/labs/encode-lab/', - 'steps': [ - { - 'meta': { - 'analysis_step_types': ['file merging'], - 'software_used': ['/software/02d636b9-d82d-4da9-950c-2ca994a23547/'] - }, - 'name': 'merge_pairs', - 'outputs': [ - { - 'meta': { - 'cardinality': 'single', - 'type': 'data file', - 'global': True, - 'file_format': 'pairs_px2' - }, - 'target': [{'name': 'output_pairs_index'}, {'name': 'pairs_index', 'step': 'cooler'}], - 'name': 'output_pairs_index' - }, - { - 'meta': {'cardinality': 'single', 'type': 'data file', 'global': True, 'file_format': 'pairs'}, - 'target': [{'name': 'output_pairs'}, - {'name': 'input_pairs', 'step': 'pairs2hic'}, - {'name': 'pairs', 'step': 'cooler'}], - 'name': 'output_pairs' - } - ], - 'inputs': [ - { - 'meta': {'cardinality': 'array', 'type': 'data file', 'global': True, 'file_format': 'pairs'}, - 'name': 'input_pairs', - 'source': [{'name': 'input_pairs'}] - }, - { - 'meta': { - 'type': 'reference file', - 'global': True, - 'file_format': 'pairs', - 'cardinality': 'array' - }, - 'name': 'input_pairs_index', - 'source': [{'name': 'input_pairs_index'}] - } - ] - }, - { - 'meta': { - 'analysis_step_types': ['aggregation'], - 'software_used': ['/software/02d636b9-d8dd-4da9-950c-2ca994b23555/'] - }, - 'name': 'cooler', - 'outputs': [ - { - 'meta': {'cardinality': 'single', 'type': 'data file', 'global': True, 'file_format': 'cool'}, - 'target': [{'name': 'out_cool'}], - 'name': 'out_cool' - } - ], - 'inputs': [ - { - 'meta': {'type': 'parameter', 'global': True, 'cardinality': 'single'}, - 'name': 'ncores', - 'source': [{'name': 'ncores'}] - }, - { - 'meta': {'type': 'parameter', 'global': True, 'cardinality': 'single'}, - 'name': 'binsize', - 'source': [{'name': 'binsize'}] - }, - { - 'meta': { - 'type': 'reference file', - 'global': False, - 'file_format': 'pairs', - 'cardinality': 'single' - }, - 'name': 'pairs_index', - 'source': [{'name': 'output_pairs_index', 'step': 'merge_pairs'}] - }, - { - 'meta': {'type': 'data file', 'global': False, 'file_format': 'pairs', 'cardinality': 'single'}, - 'name': 'pairs', - 'source': [{'name': 'output_pairs', 'step': 'merge_pairs'}] - } - ] - }, - { - 'meta': { - 'analysis_step_types': ['aggregation', 'normalization'], - 'software_used': ['/software/02d636b9-d8dd-4da9-950c-2ca994b23576/'] - }, - 'name': 'pairs2hic', - 'outputs': [ - { - 'meta': {'cardinality': 'single', 'type': 'data file', 'global': True, 'file_format': 'hic'}, - 'target': [{'name': 'output_hic'}, {'name': 'input_hic', 'step': 'hic2mcool'}], - 'name': 'output_hic' - } - ], - 'inputs': [ - { - 'meta': {'cardinality': 'single', 'type': 'reference file', 'global': True, 'file_format': 'chromsizes'}, - 'name': 'chrsizes', - 'source': [{'name': 'chrsizes'}] - }, - { - 'meta': {'type': 'parameter', 'global': True, 'cardinality': 'single'}, - 'name': 'min_res', - 'source': [{'name': 'min_res'}] - }, - { - 'meta': {'type': 'data file', 'global': False, 'file_format': 'pairs', 'cardinality': 'single'}, - 'name': 'input_pairs', - 'source': [{'name': 'output_pairs', 'step': 'merge_pairs'}] - } - ] - }, - { - 'meta': { - 'analysis_step_types': ['file format conversion'], - 'software_used': ['/software/02d636b9-d8dd-4da9-950c-2ca994b23555/'] - }, - 'name': 'hic2mcool', - 'outputs': [ - { - 'meta': {'cardinality': 'single', 'type': 'data file', 'global': True, 'file_format': 'mcool'}, - 'target': [{'name': 'out_mcool'}], - 'name': 'out_mcool' - } - ], - 'inputs': [ - { - 'meta': {'type': 'parameter', 'global': True, 'cardinality': 'single'}, - 'name': 'normalization_type', - 'source': [{'name': 'normalization_type'}] - }, - { - 'meta': {'type': 'data file', 'global': False, 'cardinality': 'single'}, - 'name': 'input_hic', - 'source': [{'name': 'output_hic', 'step': 'pairs2hic'}] - } - ] - } - ], - 'name': 'hi-c-processing-partb/15', - 'data_types': ['Hi-C'], - 'description': 'Hi-C processing part B revision 15', - 'category': 'merging + matrix generation', - 'schema_version': '4', - 'arguments': [ - {'workflow_argument_name': 'chrsizes', 'argument_format': 'chromsizes', 'argument_type': 'Input file'}, - {'workflow_argument_name': 'input_pairs', 'argument_format': 'pairs', 'argument_type': 'Input file'}, - {'workflow_argument_name': 'input_pairs_index', 'argument_format': 'pairs', 'argument_type': 'Input file'}, - {'workflow_argument_name': 'ncores', 'argument_type': 'parameter'}, - {'workflow_argument_name': 'binsize', 'argument_type': 'parameter'}, - {'workflow_argument_name': 'min_res', 'argument_type': 'parameter'}, - {'workflow_argument_name': 'normalization_type', 'argument_type': 'parameter'}, - {'workflow_argument_name': 'output_pairs_index', 'argument_format': 'pairs_px2', 'argument_type': 'Output processed file'}, - {'workflow_argument_name': 'output_pairs', 'argument_format': 'pairs', 'argument_type': 'Output processed file'}, - {'workflow_argument_name': 'out_cool', 'argument_format': 'cool', 'argument_type': 'Output processed file'}, - {'workflow_argument_name': 'output_hic', 'argument_format': 'hic', 'argument_type': 'Output processed file'}, - {'workflow_argument_name': 'out_mcool', 'argument_format': 'mcool', 'argument_type': 'Output processed file'} - ], - 'award': '/awards/encode3-award/', - 'workflow_type': 'Hi-C data analysis', - 'title': 'Hi-C processing part B revision 15' - } - - -def test_workflow_upgrade_4_5( - workflow_4, registry, file_formats): - upgrader = registry[UPGRADER] - value = upgrader.upgrade('workflow', workflow_4, registry=registry, - current_version='4', target_version='5') - format_uuids = [f.get('uuid') for f in file_formats.values()] - assert value['schema_version'] == '5' - arguments = value.get('arguments', []) - for arg in arguments: - secondary_formats = arg.get('secondary_file_formats', []) - for sformat in secondary_formats: - assert sformat in format_uuids - argument_format = arg.get('argument_format') - if argument_format: - assert argument_format in format_uuids - steps = value.get('steps', []) - for step in steps: - inputs = step.get('inputs', []) - for inp in inputs: - meta = inp.get('meta', {}) - fformat = meta.get('file_format') - if fformat: - assert fformat in format_uuids - - -@pytest.fixture -def workflow_5(software, award, lab): - return{ - "schema_version": '5', - "award": award['@id'], - "lab": lab['@id'], - "title": "some workflow", - "name": "some workflow", - "workflow_type": "Other", - "data_types": ["Hi-C"], - "category": "alignment", - "steps": [{ "meta": { "software_used" : software['@id'] } }] - } - - -def test_workflow_upgrade_5_6( - workflow_5, registry): - upgrader = registry[UPGRADER] - value = upgrader.upgrade('workflow', workflow_5, registry=registry, - current_version='5', target_version='6') - assert value['schema_version'] == '6' - assert 'workflow_type' not in value - assert 'category' in value - assert isinstance(value['category'], list) - assert 'experiment_types' in value - assert 'data_types' not in value - - -@pytest.fixture -def workflow_6(software, award, lab, workflow_bam): - return{ - "schema_version": '6', - "award": award['@id'], - "lab": lab['@id'], - "title": "some workflow", - "name": "some workflow", - "previous_version": workflow_bam['@id'] - } - - -def test_workflow_upgrade_6_7( - workflow_6, registry): - upgrader = registry[UPGRADER] - value = upgrader.upgrade('workflow', workflow_6, registry=registry, - current_version='6', target_version='7') - assert value['schema_version'] == '7' - assert 'previous_version' in value - assert isinstance(value['previous_version'], list) - - -# @pytest.fixture -# def workflow_7(software, award, lab, workflow_bam): -# return{ -# "schema_version": '6', -# "award": award['@id'], -# "lab": lab['@id'], -# "title": "some workflow", -# "name": "some workflow", -# "previous_version": [workflow_bam['@id']] -# } -# -# -# def test_workflow_upgrade_7_8(workflow_7, registry, exp_types): -# types2upg = {'Repli-seq': None, 'in situ Hi-C': 'hic', 'Capture Hi-C': 'capc', 'DNA FISH': 'fish', 'dilution Hi-C': 'dilution'} -# upgrader = registry[UPGRADER] -# for etype, lookup in types2upg.items(): -# workflow_7['experiment_types'] = [etype] -# value = upgrader.upgrade('workflow', workflow_7, registry=registry, -# current_version='7', target_version='8') -# assert value['schema_version'] == '8' -# newetypes = value['experiment_types'] -# if etype == 'Repli-seq': -# assert len(newetypes) == 2 -# assert exp_types['repliseq'].get('uuid') in newetypes -# assert exp_types['multi'].get('uuid') in newetypes -# else: -# assert exp_types[lookup].get('uuid') == newetypes[0] diff --git a/src/encoded/tests/test_upgrade_workflow_run.py b/src/encoded/tests/test_upgrade_workflow_run.py deleted file mode 100644 index 57d9cce052..0000000000 --- a/src/encoded/tests/test_upgrade_workflow_run.py +++ /dev/null @@ -1,110 +0,0 @@ -import pytest - -from snovault import UPGRADER - - -pytestmark = [pytest.mark.setone, pytest.mark.working] - - -@pytest.fixture -def workflow_run_1(): - return { - "uuid": "3b7066ef-f9e4-43ce-85b5-a994a15bbcaf", - "lab": "4dn-dcic-lab", - "award": "1U01CA200059-01", - "workflow": "c77a117b-9a58-477e-aaa5-291a109a99f6", - "run_status": "complete", - "status": "in review by lab", - "metadata_only": True, - "input_files": [ - { - "ordinal": 1, - "value": "4ecd9cfb-369b-4f8e-866c-cfb2cc3c8ad2", - "workflow_argument_name": "inputs", - "format_if_extra": "pairs_px2" - } - ], - "title": "Some md5 workflow run on an extra file", - "output_files": [ - { - "type": "Output report file", - "workflow_argument_name": "report" - } - ] - } - - -def test_workflow_run_upgrade_1_2(workflow_run_1, registry, file_formats): - upgrader = registry[UPGRADER] - value = upgrader.upgrade('workflow_run', workflow_run_1, registry=registry, - current_version='1', target_version='2') - assert value['schema_version'] == '2' - ef_format = value['input_files'][0].get('format_if_extra') - assert ef_format == file_formats['pairs_px2'].get('uuid') - - -def test_workflow_run_upgrade_1_2_bad_file_format(workflow_run_1, registry): - upgrader = registry[UPGRADER] - value = upgrader.upgrade('workflow_run', workflow_run_1, registry=registry, - current_version='1', target_version='2') - assert value['schema_version'] == '2' - assert not value['input_files'][0].get('format_if_extra') - assert value['input_files'][0].get('notes') == ' EXTRA_FILE_FORMAT: pairs_px2 NOT FOUND' - - -def test_workflow_run_upgrade_1_2_missing_file_format(workflow_run_1, registry): - upgrader = registry[UPGRADER] - del workflow_run_1['input_files'][0]['format_if_extra'] - value = upgrader.upgrade('workflow_run', workflow_run_1, registry=registry, - current_version='1', target_version='2') - assert value['schema_version'] == '2' - ef_format = value['input_files'][0].get('format_if_extra') - assert ef_format is None - - -@pytest.fixture -def workflow_run_2(quality_metric_fastqc, file_fastq): - return { - "uuid": "4a43c93c-af77-4bab-adc2-433febc3e76c", - "lab": "4dn-dcic-lab", - "award": "1U01CA200059-01", - "workflow": "2324ad76-ff37-4157-8bcc-3ce72b7dace9", - "run_status": "complete", - "status": "in review by lab", - "metadata_only": True, - "input_files": [ - { - "ordinal": 1, - "value": file_fastq['@id'], - "workflow_argument_name": "input_fastq" - } - ], - "title": "some fastqc workflow run", - "output_files": [ - { - "type": "Output report file", - "workflow_argument_name": "report_zip" - } - ], - "output_quality_metrics": [ - { - "workflow_argument_name": "report_zip", - "name": "quality_metric_fastqc", - "value": quality_metric_fastqc['@id'] - } - ] - } - - -def test_workflow_run_upgrade_2_3(workflow_run_2, registry): - upgrader = registry[UPGRADER] - value = upgrader.upgrade('workflow_run', workflow_run_2, registry=registry, - current_version='2', target_version='3') - assert 'output_quality_metrics' not in value - - -def test_workflow_run_awsem_upgrade_2_3(workflow_run_2, registry): - upgrader = registry[UPGRADER] - value = upgrader.upgrade('workflow_run_awsem', workflow_run_2, registry=registry, - current_version='2', target_version='3') - assert 'output_quality_metrics' not in value diff --git a/src/encoded/tests/test_util.py b/src/encoded/tests/test_util.py deleted file mode 100644 index 89d3398c38..0000000000 --- a/src/encoded/tests/test_util.py +++ /dev/null @@ -1,68 +0,0 @@ -import datetime -import io -import os -import pytest -from pyramid.httpexceptions import HTTPForbidden - -from ..util import ( - # compute_set_difference_one, find_other_in_pair, - delay_rerun, # utc_today_str, - customized_delay_rerun, check_user_is_logged_in, - temporary_file -) - - -pytestmark = pytest.mark.working -DELAY_FUZZ_SECONDS = 0.1 - - -def test_delay_rerun(): - expected_delay = 1.0 - t0 = datetime.datetime.now() - delay_rerun() - t1 = datetime.datetime.now() - assert (t1 - t0).total_seconds() > expected_delay - assert (t1 - t0).total_seconds() < expected_delay + DELAY_FUZZ_SECONDS - - -def test_customize_delay_rerun(): - custom_delay = 0.5 - half_delay_rerun = customized_delay_rerun(sleep_seconds=custom_delay) - t0 = datetime.datetime.now() - half_delay_rerun() - t1 = datetime.datetime.now() - assert (t1 - t0).total_seconds() > custom_delay - assert (t1 - t0).total_seconds() < custom_delay + DELAY_FUZZ_SECONDS - - -@pytest.mark.parametrize('principals, allow', [ - (['role1', 'role2'], False), - (['role1', 'userid.uuid'], True), - (['role1', 'group.admin'], True), - (['system.Everyone'], False) -]) -def test_check_user_is_logged_in(principals, allow): - """ Simple test that ensures the logged in check is working as expected """ - class MockRequest: - def __init__(self, principals): - self.effective_principals = principals - req = MockRequest(principals) - if allow: - check_user_is_logged_in(req) - else: - with pytest.raises(HTTPForbidden): - check_user_is_logged_in(req) - - -def test_temporary_file_context_manager(): - temporary_filename = None - with temporary_file(extension=".json") as filename: - assert filename.endswith(".json") - temporary_filename = filename - sample_content = "Hello, world!" - with io.open(filename, "w") as fp: - fp.write(sample_content) - with io.open(filename, "r") as fp: - content = fp.read() - assert content == sample_content - assert not os.path.exists(temporary_filename) diff --git a/src/encoded/tests/test_validation_errors.py b/src/encoded/tests/test_validation_errors.py deleted file mode 100644 index 8537a03ddc..0000000000 --- a/src/encoded/tests/test_validation_errors.py +++ /dev/null @@ -1,34 +0,0 @@ -import pytest - -from dcicutils.qa_utils import notice_pytest_fixtures -#from .workbook_fixtures import es_app_settings, es_app, es_testapp, workbook -# from ..util import delay_rerun - - -# notice_pytest_fixtures(es_app_settings, es_app, es_testapp, workbook) - -pytestmark = [ - pytest.mark.working, - # pytest.mark.indexing, - pytest.mark.workbook, - # pytest.mark.flaky(rerun_filter=delay_rerun), -] - - -@pytest.mark.skip(reason="validation_errors facet was removed in search.py") -def test_validation_err_facet(workbook, es_testapp): - res = es_testapp.get('/search/?type=ExperimentSetReplicate').json - val_err_facets = [facet for facet in res['facets'] if facet['title'] == 'Validation Errors'] - assert len(val_err_facets) == 1 - assert val_err_facets[0]['aggregation_type'] == 'terms' - - -def test_validation_err_itemview(workbook, es_testapp): - res = es_testapp.get('/experiment-set-replicates/4DNESAAAAAA1/').json - assert 'validation-errors' in res.keys() - - -def test_validation_err_view(workbook, es_testapp): - res = es_testapp.get('/experiment-set-replicates/4DNESAAAAAA1/@@validation-errors').json - assert res['@id'] == '/experiment-set-replicates/4DNESAAAAAA1/' - assert 'validation_errors' in res diff --git a/src/encoded/tests/test_views.py b/src/encoded/tests/test_views.py deleted file mode 100644 index 220840fa4d..0000000000 --- a/src/encoded/tests/test_views.py +++ /dev/null @@ -1,305 +0,0 @@ -import codecs -import json -import pkg_resources -import pytest - -from base64 import b64encode -from jsonschema import Draft202012Validator -from pyramid.compat import ascii_native_ -from snovault import TYPES -from urllib.parse import urlparse -from .datafixtures import ORDER - - -pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema, pytest.mark.indexing] - - -def _type_length(): - # Not a fixture as we need to parameterize tests on this - utf8 = codecs.getreader("utf-8") - type_length_dict = {} - for name in ORDER: - try: - utf8_stream = utf8(pkg_resources.resource_stream('encoded', 'tests/data/workbook-inserts/%s.json' % name)) - type_length_dict[name] = len(json.load(utf8_stream)) - except Exception: - type_length_dict[name] = 0 - - return type_length_dict - - -TYPE_LENGTH = _type_length() - -INDEX_DATA_TYPES = ['file_fastq', 'workflow_run_awsem', 'biosample', 'experiment_set'] - -PUBLIC_COLLECTIONS = [ - 'source', - 'platform', - 'treatment', - 'lab', - 'award', - 'target', - 'organism', -] - - -def test_home(anonhtmltestapp): - res = anonhtmltestapp.get('/', status=200) - assert res.body.startswith(b'') - - -def test_home_json(testapp): - res = testapp.get('/', status=200) - assert res.json['@type'] - - -def test_home_app_version(testapp): - res = testapp.get('/', status=200) - assert 'app_version' in res.json - - -def test_vary_html(anonhtmltestapp): - res = anonhtmltestapp.get('/', status=200) - assert res.vary is not None - assert 'Accept' in res.vary - - -def test_vary_json(anontestapp): - res = anontestapp.get('/', status=200) - assert res.vary is not None - assert 'Accept' in res.vary - - -def test_get_health_page(testapp): - """ - Tests that we can get the health page and various fields we expect are there - """ - res = testapp.get('/health', status=200).json - assert 'namespace' in res - assert 'blob_bucket' in res - assert 'elasticsearch' in res - - -@pytest.mark.parametrize('item_type', [k for k in TYPE_LENGTH if k != 'user']) -def test_collections_anon(anontestapp, item_type): - res = anontestapp.get('/' + item_type).follow(status=200) - assert '@graph' in res.json - - -@pytest.mark.parametrize('item_type', [k for k in TYPE_LENGTH if k != 'user']) -def test_html_collections_anon(anonhtmltestapp, item_type): - res = anonhtmltestapp.get('/' + item_type).follow(status=200) - assert res.body.startswith(b'') - - -@pytest.mark.parametrize('item_type', TYPE_LENGTH) -def test_html_collections(htmltestapp, item_type): - res = htmltestapp.get('/' + item_type).follow(status=200) - assert res.body.startswith(b'') - - -@pytest.mark.slow -@pytest.mark.parametrize('item_type', [k for k in TYPE_LENGTH if k != 'user']) -def test_html_server_pages(item_type, htmltestapp): - res = htmltestapp.get( - '/%s?limit=1' % item_type, - headers={'Accept': 'application/json'}, - ).follow( - status=200, - headers={'Accept': 'application/json'}, - ) - for item in res.json['@graph']: - res = htmltestapp.get(item['@id'], status=200) - assert res.body.startswith(b'') - assert b'Internal Server Error' not in res.body - - -@pytest.mark.parametrize('item_type', TYPE_LENGTH) -def test_json(testapp, item_type): - res = testapp.get('/' + item_type).follow(status=200) - assert res.json['@type'] - - -def test_json_basic_auth(anonhtmltestapp): - url = '/' - value = "Authorization: Basic %s" % ascii_native_(b64encode(b'nobody:pass')) - res = anonhtmltestapp.get(url, headers={'Authorization': value}, status=401) - assert res.content_type == 'application/json' - - -def _test_antibody_approval_creation(testapp): - new_antibody = {'foo': 'bar'} - res = testapp.post_json('/antibodies/', new_antibody, status=201) - assert res.location - assert '/profiles/result' in res.json['@type']['profile'] - assert res.json['@graph'] == [{'href': urlparse(res.location).path}] - res = testapp.get(res.location, status=200) - assert '/profiles/antibody_approval' in res.json['@type'] - data = res.json - for key in new_antibody: - assert data[key] == new_antibody[key] - res = testapp.get('/antibodies/', status=200) - assert len(res.json['@graph']) == 1 - - -def test_load_sample_data( - analysis_step, - award, - human_biosample, - construct, - document, - experiment, - file, - lab, - organism, - publication, - publication_tracking, - software, - human_biosource, - submitter, - workflow_mapping, - workflow_run_sbg, - workflow_run_awsem, - ): - assert True, 'Fixtures have loaded sample data' - - -def test_abstract_collection(testapp, experiment): - # TODO: ASK_BEN how to get experiment to function as catch all - pass - # testapp.get('/experiment/{accession}'.format(**experiment)) - # testapp.get('/expermient/{accession}'.format(**experiment)) - - -def test_collection_post(testapp): - item = { - 'name': 'human', - 'scientific_name': 'Homo sapiens', - 'taxon_id': '9606', - } - return testapp.post_json('/organism', item, status=201) - - -def test_collection_post_bad_json(testapp): - item = {'foo': 'bar'} - res = testapp.post_json('/organism', item, status=422) - assert res.json['errors'] - - -def test_collection_post_malformed_json(testapp): - item = '{' - headers = {'Content-Type': 'application/json'} - res = testapp.post('/organism', item, status=400, headers=headers) - assert res.json['detail'].startswith('Expecting') - - -def test_collection_post_missing_content_type(testapp): - item = '{}' - testapp.post('/organism', item, status=415) - - -def test_collection_post_bad_(anontestapp): - value = "Authorization: Basic %s" % ascii_native_(b64encode(b'nobody:pass')) - anontestapp.post_json('/organism', {}, headers={'Authorization': value}, status=401) - - -def test_item_actions_filtered_by_permission(testapp, authenticated_testapp, human_biosource): - location = human_biosource['@id'] + '?frame=page' - - res = testapp.get(location) - assert any(action for action in res.json.get('actions', []) if action['name'] == 'edit') - - res = authenticated_testapp.get(location) - assert not any(action for action in res.json.get('actions', []) if action['name'] == 'edit') - - -def test_collection_put(testapp, execute_counter): - initial = { - "name": "human", - "scientific_name": "Homo sapiens", - "taxon_id": "9606", - } - item_url = testapp.post_json('/organism', initial).location - - with execute_counter.expect(1): - item = testapp.get(item_url + '?frame=object').json - - for key in initial: - assert item[key] == initial[key] - - update = { - 'name': 'mouse', - 'scientific_name': 'Mus musculus', - 'taxon_id': '10090', - } - testapp.put_json(item_url, update, status=200) - res = testapp.get('/' + item['uuid'] + '?frame=object').follow().json - - for key in update: - assert res[key] == update[key] - - -def test_post_duplicate_uuid(testapp, mouse): - item = { - 'uuid': mouse['uuid'], - 'name': 'human', - 'scientific_name': 'Homo sapiens', - 'taxon_id': '9606', - } - testapp.post_json('/organism', item, status=409) - - -def test_user_effective_principals(submitter, lab, anontestapp, execute_counter): - email = submitter['email'] - with execute_counter.expect(1): - res = anontestapp.get('/@@testing-user', - extra_environ={'REMOTE_USER': str(email)}) - assert sorted(res.json['effective_principals']) == [ - 'group.submitter', - 'lab.%s' % lab['uuid'], - 'remoteuser.%s' % email, - 'submits_for.%s' % lab['uuid'], - 'system.Authenticated', - 'system.Everyone', - 'userid.%s' % submitter['uuid'], - 'viewing_group.4DN', - ] - - -def test_jsonld_context(testapp): - res = testapp.get('/terms/') - assert res.json - - -def test_jsonld_term(testapp): - res = testapp.get('/terms/submitted_by') - assert res.json - - -@pytest.mark.parametrize('item_type', TYPE_LENGTH) -def test_profiles(testapp, item_type): - # this will only be non-abstract types - res = testapp.get('/profiles/%s.json' % item_type).maybe_follow(status=200) - errors = Draft202012Validator.check_schema(res.json) - assert not errors - # added from snovault.schema_views._annotated_schema - assert 'rdfs:seeAlso' in res.json - assert 'rdfs:subClassOf' in res.json - assert 'children' in res.json - assert res.json['isAbstract'] is False - - -def test_profiles_all(testapp, registry): - res = testapp.get('/profiles/').maybe_follow(status=200) - # make sure all types are present, including abstract types - for ti in registry[TYPES].by_item_type.values(): - assert ti.name in res.json - assert res.json[ti.name]['isAbstract'] is False - for ti in registry[TYPES].by_abstract_type.values(): - assert ti.name in res.json - assert res.json[ti.name]['isAbstract'] is True - - -def test_bad_frame(testapp, human): - res = testapp.get(human['@id'] + '?frame=bad', status=404) - assert res.json['detail'] == '?frame=bad'