diff --git a/.travis.yml b/.travis.yml
index 43b0bac2..79a5fff8 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,25 +1,22 @@
-# the new trusty images of Travis cause build errors with psycopg2, see https://github.com/travis-ci/travis-ci/issues/8897
-dist: trusty
-group: deprecated-2017Q4
-
+os: linux
+dist: focal
language: python
-install:
- - bash bin/travis-build.bash
services:
+ - docker
- redis
- - postgresql
+install: bash bin/travis-build.bash
script: bash bin/travis-run.bash
-before_install:
- - pip install codecov
-after_success:
- - codecov
+
+stages:
+ - Flake8
+ - Tests
jobs:
include:
- stage: Flake8
- python: 2.7
+ python: 3.6
env: FLAKE8=True
install:
- pip install flake8==3.5.0
@@ -30,21 +27,60 @@ jobs:
- flake8 . --count --select=E901,E999,F821,F822,F823 --show-source --statistics --exclude ckan
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
- flake8 . --count --max-line-length=127 --statistics --exclude ckan --exit-zero
+
- stage: Tests
- python: "2.7"
+ python: "3.6"
env: CKANVERSION=master
+ services:
+ - postgresql
+ - redis
+ - docker
+
+ - python: "2.7"
+ env: CKANVERSION=2.9
+ services:
+ - postgresql
+ - redis
+ - docker
+
+
- python: "3.6"
- env: CKANVERSION=master
+ env: CKANVERSION=2.9
+ services:
+ - postgresql
+ - redis
+ - docker
+
- python: "2.7"
env: CKANVERSION=2.8
+ addons:
+ postgresql: '11'
+ apt:
+ sources:
+ - sourceline: 'deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main'
+ packages:
+ - postgresql-11
+
- python: "2.7"
env: CKANVERSION=2.7
+ addons:
+ postgresql: '9.6'
+ apt:
+ sources:
+ - sourceline: 'deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main'
+ packages:
+ - postgresql-9.6
+
- python: "2.7"
env: CKANVERSION=2.6
- - python: "2.7"
- env: CKANVERSION=2.5
- - python: "2.7"
- env: CKANVERSION=2.4
+ addons:
+ postgresql: '9.6'
+ apt:
+ sources:
+ - sourceline: 'deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main'
+ packages:
+ - postgresql-9.6
+
cache:
directories:
diff --git a/bin/travis-build.bash b/bin/travis-build.bash
index 3dbc7649..e10ec723 100644
--- a/bin/travis-build.bash
+++ b/bin/travis-build.bash
@@ -12,11 +12,6 @@ fi
export PYTHON_MAJOR_VERSION=${TRAVIS_PYTHON_VERSION%.*}
-
-echo "Installing the packages that CKAN requires..."
-sudo apt-get update -qq
-sudo apt-get install solr-jetty
-
echo "Installing CKAN and its Python dependencies..."
git clone https://github.com/ckan/ckan
cd ckan
@@ -29,48 +24,48 @@ else
echo "CKAN version: ${CKAN_TAG#ckan-}"
fi
-# install the recommended version of setuptools
+echo "Installing the recommended setuptools requirement"
if [ -f requirement-setuptools.txt ]
then
- echo "Updating setuptools..."
pip install -r requirement-setuptools.txt
fi
python setup.py develop
-
-# TODO: remove once 2.5.3 is relesed
-# Pin this as newer versions installed by RDFLib give setuptools troubles
-pip install "html5lib==0.9999999"
-
-if [ $CKANVERSION == '2.7' ]
-then
- echo "Installing setuptools"
- pip install setuptools==39.0.1
-fi
-
if (( $CKAN_MINOR_VERSION >= 9 )) && (( $PYTHON_MAJOR_VERSION == 2 ))
then
pip install -r requirements-py2.txt
else
pip install -r requirements.txt
fi
+
pip install -r dev-requirements.txt
cd -
echo "Setting up Solr..."
-printf "NO_START=0\nJETTY_HOST=127.0.0.1\nJETTY_PORT=8983\nJAVA_HOME=$JAVA_HOME" | sudo tee /etc/default/jetty
-sudo cp ckan/ckan/config/solr/schema.xml /etc/solr/conf/schema.xml
-sudo service jetty restart
+docker run --name ckan-solr -p 8983:8983 -d openknowledge/ckan-solr-dev:$CKANVERSION
+
+echo "Setting up Postgres..."
+export PG_VERSION="$(pg_lsclusters | grep online | awk '{print $1}')"
+export PG_PORT="$(pg_lsclusters | grep online | awk '{print $3}')"
+echo "Using Postgres $PGVERSION on port $PG_PORT"
+if [ $PG_PORT != "5432" ]
+then
+ echo "Using non-standard Postgres port, updating configuration..."
+ sed -i -e "s/postgresql:\/\/ckan_default:pass@localhost\/ckan_test/postgresql:\/\/ckan_default:pass@localhost:$PG_PORT\/ckan_test/" ckan/test-core.ini
+ sed -i -e "s/postgresql:\/\/ckan_default:pass@localhost\/datastore_test/postgresql:\/\/ckan_default:pass@localhost:$PG_PORT\/datastore_test/" ckan/test-core.ini
+ sed -i -e "s/postgresql:\/\/datastore_default:pass@localhost\/datastore_test/postgresql:\/\/datastore_default:pass@localhost:$PG_PORT\/datastore_test/" ckan/test-core.ini
+fi
+
echo "Creating the PostgreSQL user and database..."
-sudo -u postgres psql -c "CREATE USER ckan_default WITH PASSWORD 'pass';"
-sudo -u postgres psql -c 'CREATE DATABASE ckan_test WITH OWNER ckan_default;'
+sudo -u postgres psql -p $PG_PORT -c "CREATE USER ckan_default WITH PASSWORD 'pass';"
+sudo -u postgres psql -p $PG_PORT -c "CREATE USER datastore_default WITH PASSWORD 'pass';"
+sudo -u postgres psql -p $PG_PORT -c 'CREATE DATABASE ckan_test WITH OWNER ckan_default;'
+sudo -u postgres psql -p $PG_PORT -c 'CREATE DATABASE datastore_test WITH OWNER ckan_default;'
echo "Initialising the database..."
cd ckan
-
-
if (( $CKAN_MINOR_VERSION >= 9 ))
then
ckan -c test-core.ini db init
@@ -102,6 +97,5 @@ python setup.py develop
echo "Moving test.ini into a subdir..."
mkdir subdir
mv test.ini subdir
-mv test-nose.ini subdir
echo "travis-build.bash is done."
diff --git a/bin/travis-run.bash b/bin/travis-run.bash
index c2a2b2ab..9ed18e1b 100644
--- a/bin/travis-run.bash
+++ b/bin/travis-run.bash
@@ -1,17 +1,4 @@
#!/bin/bash
set -e
-if [ $CKANVERSION == 'master' ]
-then
- export CKAN_MINOR_VERSION=100
-else
- export CKAN_MINOR_VERSION=${CKANVERSION##*.}
-fi
-
-
-if (( $CKAN_MINOR_VERSION >= 9 ))
-then
- pytest --ckan-ini=subdir/test.ini --cov=ckanext.dcat ckanext/dcat/tests
-else
- nosetests --ckan --nologcapture --with-pylons=subdir/test-nose.ini --with-coverage --cover-package=ckanext.dcat --cover-inclusive --cover-erase --cover-tests ckanext/dcat/tests/nose
-fi
+pytest --ckan-ini=subdir/test.ini --cov=ckanext.dcat ckanext/dcat/tests
diff --git a/ckanext/dcat/plugins/__init__.py b/ckanext/dcat/plugins/__init__.py
index c9b59363..5d78a824 100644
--- a/ckanext/dcat/plugins/__init__.py
+++ b/ckanext/dcat/plugins/__init__.py
@@ -80,6 +80,7 @@ def update_config(self, config):
def get_helpers(self):
return {
'helper_available': utils.helper_available,
+ 'dcat_get_endpoint': utils.get_endpoint,
}
# IActions
diff --git a/ckanext/dcat/profiles.py b/ckanext/dcat/profiles.py
index 698014d4..b90059d9 100644
--- a/ckanext/dcat/profiles.py
+++ b/ckanext/dcat/profiles.py
@@ -459,10 +459,7 @@ def _access_rights(self, subject, predicate):
if isinstance(obj, BNode) and self._object(obj, RDF.type) == DCT.RightsStatement:
result = self._object_value(obj, RDFS.label)
elif isinstance(obj, Literal):
- if six.PY2:
- result = unicode(obj)
- else:
- result = str(obj)
+ result = six.text_type(obj)
return result
def _distribution_format(self, distribution, normalize_ckan_format=True):
diff --git a/ckanext/dcat/templates/home/index.html b/ckanext/dcat/templates/home/index.html
index b72050fa..6bc53755 100644
--- a/ckanext/dcat/templates/home/index.html
+++ b/ckanext/dcat/templates/home/index.html
@@ -1,7 +1,7 @@
{% ckan_extends %}
{% block links %}
{{ super() }}
- {% with endpoint='dcat.read_catalog' if h.ckan_version() > '2.9' else 'dcat_catalog' %}
+ {% with endpoint=h.dcat_get_endpoint('catalog') %}
diff --git a/ckanext/dcat/templates/package/read_base.html b/ckanext/dcat/templates/package/read_base.html
index e1cf784e..7811a2ec 100644
--- a/ckanext/dcat/templates/package/read_base.html
+++ b/ckanext/dcat/templates/package/read_base.html
@@ -1,7 +1,7 @@
{% ckan_extends %}
{% block links %}
{{ super() }}
- {% with endpoint='dcat.read_dataset' if h.ckan_version() > '2.9' else 'dcat_dataset' %}
+ {% with endpoint=h.dcat_get_endpoint('dataset') %}
diff --git a/ckanext/dcat/templates/package/search.html b/ckanext/dcat/templates/package/search.html
index b72050fa..6bc53755 100644
--- a/ckanext/dcat/templates/package/search.html
+++ b/ckanext/dcat/templates/package/search.html
@@ -1,7 +1,7 @@
{% ckan_extends %}
{% block links %}
{{ super() }}
- {% with endpoint='dcat.read_catalog' if h.ckan_version() > '2.9' else 'dcat_catalog' %}
+ {% with endpoint=h.dcat_get_endpoint('catalog') %}
diff --git a/ckanext/dcat/tests/nose/__init__.py b/ckanext/dcat/tests/nose/__init__.py
deleted file mode 100644
index 8fbc68a1..00000000
--- a/ckanext/dcat/tests/nose/__init__.py
+++ /dev/null
@@ -1,12 +0,0 @@
-from ckan.tests.helpers import FunctionalTestBase
-
-from ckanext.harvest.model import setup as harvest_setup
-
-
-class DCATFunctionalTestBase(FunctionalTestBase):
-
- def setup(self):
-
- super(DCATFunctionalTestBase, self).setup()
-
- harvest_setup()
diff --git a/ckanext/dcat/tests/nose/test_base_parser.py b/ckanext/dcat/tests/nose/test_base_parser.py
deleted file mode 100644
index 0bf21487..00000000
--- a/ckanext/dcat/tests/nose/test_base_parser.py
+++ /dev/null
@@ -1,276 +0,0 @@
-from builtins import str
-from builtins import object
-import nose
-
-from ckantoolkit import config
-
-from rdflib import Graph, URIRef, Literal
-from rdflib.namespace import Namespace, RDF
-
-from ckanext.dcat.processors import (
- RDFParser,
- RDFParserException,
- RDFProfileException,
- DEFAULT_RDF_PROFILES,
- RDF_PROFILES_CONFIG_OPTION
-)
-
-from ckanext.dcat.profiles import RDFProfile
-
-DCT = Namespace("http://purl.org/dc/terms/")
-DCAT = Namespace("http://www.w3.org/ns/dcat#")
-
-eq_ = nose.tools.eq_
-
-
-def _default_graph():
-
- g = Graph()
-
- dataset1 = URIRef("http://example.org/datasets/1")
- g.add((dataset1, RDF.type, DCAT.Dataset))
- g.add((dataset1, DCT.title, Literal('Test Dataset 1')))
-
- distribution1_1 = URIRef("http://example.org/datasets/1/ds/1")
- g.add((distribution1_1, RDF.type, DCAT.Distribution))
- distribution1_2 = URIRef("http://example.org/datasets/1/ds/2")
- g.add((distribution1_2, RDF.type, DCAT.Distribution))
-
- g.add((dataset1, DCAT.distribution, distribution1_1))
- g.add((dataset1, DCAT.distribution, distribution1_2))
-
- dataset2 = URIRef("http://example.org/datasets/2")
- g.add((dataset2, RDF.type, DCAT.Dataset))
- g.add((dataset2, DCT.title, Literal('Test Dataset 2')))
-
- distribution2_1 = URIRef("http://example.org/datasets/2/ds/1")
- g.add((distribution2_1, RDF.type, DCAT.Distribution))
- g.add((dataset2, DCAT.distribution, distribution2_1))
-
- dataset3 = URIRef("http://example.org/datasets/3")
- g.add((dataset3, RDF.type, DCAT.Dataset))
- g.add((dataset3, DCT.title, Literal('Test Dataset 3')))
-
- return g
-
-
-class MockRDFProfile1(RDFProfile):
-
- def parse_dataset(self, dataset_dict, dataset_ref):
-
- dataset_dict['profile_1'] = True
-
- return dataset_dict
-
-
-class MockRDFProfile2(RDFProfile):
-
- def parse_dataset(self, dataset_dict, dataset_ref):
-
- dataset_dict['profile_2'] = True
-
- return dataset_dict
-
-
-class TestRDFParser(object):
-
- def test_default_profile(self):
-
- p = RDFParser()
-
- eq_(sorted([pr.name for pr in p._profiles]),
- sorted(DEFAULT_RDF_PROFILES))
-
- def test_profiles_via_config_option(self):
-
- original_config = config.copy()
-
- config[RDF_PROFILES_CONFIG_OPTION] = 'profile_conf_1 profile_conf_2'
- try:
- RDFParser()
- except RDFProfileException as e:
-
- eq_(str(e), 'Unknown RDF profiles: profile_conf_1, profile_conf_2')
-
- config.clear()
- config.update(original_config)
-
- def test_no_profile_provided(self):
- try:
- RDFParser(profiles=[])
- except RDFProfileException as e:
-
- eq_(str(e), 'No suitable RDF profiles could be loaded')
-
- def test_profile_not_found(self):
- try:
- RDFParser(profiles=['not_found'])
- except RDFProfileException as e:
-
- eq_(str(e), 'Unknown RDF profiles: not_found')
-
- def test_profiles_are_called_on_datasets(self):
-
- p = RDFParser()
-
- p._profiles = [MockRDFProfile1, MockRDFProfile2]
-
- p.g = _default_graph()
-
- for dataset in p.datasets():
- assert dataset['profile_1']
- assert dataset['profile_2']
-
- def test_parse_data(self):
-
- data = '''
-
-
- Some label
-
-
- '''
-
- p = RDFParser()
-
- eq_(len(p.g), 0)
-
- p.parse(data)
-
- eq_(len(p.g), 2)
-
- def test_parse_pagination_next_page(self):
-
- data = '''
-
-
- 245
- http://example.com/catalog.xml?page=3
- 100
- http://example.com/catalog.xml?page=2
- http://example.com/catalog.xml?page=1
-
-
- '''
-
- p = RDFParser()
-
- p.parse(data)
-
- eq_(p.next_page(), 'http://example.com/catalog.xml?page=2')
-
- def test_parse_without_pagination(self):
-
- data = '''
-
-
- Some label
-
-
- '''
-
- p = RDFParser()
-
- p.parse(data)
-
- eq_(p.next_page(), None)
-
- def test_parse_pagination_last_page(self):
-
- data = '''
-
-
- 245
- http://example.com/catalog.xml?page=3
- 100
- http://example.com/catalog.xml?page=1
- http://example.com/catalog.xml?page=2
-
-
- '''
-
- p = RDFParser()
-
- p.parse(data)
-
- eq_(p.next_page(), None)
-
- def test_parse_data_different_format(self):
-
- data = '''
- @prefix rdf: .
- @prefix rdfs: .
-
- a rdfs:SomeClass ;
- rdfs:label "Some label" .
- '''
-
- p = RDFParser()
-
- eq_(len(p.g), 0)
-
- p.parse(data, _format='n3')
-
- eq_(len(p.g), 2)
-
- def test_parse_data_raises_on_parse_error(self):
-
- p = RDFParser()
-
- data = 'Wrong data'
-
- nose.tools.assert_raises(RDFParserException, p.parse, '')
-
- nose.tools.assert_raises(RDFParserException, p.parse, data)
-
- nose.tools.assert_raises(RDFParserException, p.parse, data,
- _format='n3',)
-
- def test__datasets(self):
-
- p = RDFParser()
-
- p.g = _default_graph()
-
- eq_(len([d for d in p._datasets()]), 3)
-
- def test__datasets_none_found(self):
-
- p = RDFParser()
-
- p.g = Graph()
-
- eq_(len([d for d in p._datasets()]), 0)
-
- def test_datasets(self):
-
- p = RDFParser()
-
- p.g = _default_graph()
-
- datasets = []
- for dataset in p.datasets():
-
- assert 'title' in dataset
-
- datasets.append(dataset)
-
- eq_(len(datasets), 3)
-
- def test_datasets_none_found(self):
-
- p = RDFParser()
-
- p.g = Graph()
-
- eq_(len([d for d in p.datasets()]), 0)
diff --git a/ckanext/dcat/tests/nose/test_base_profile.py b/ckanext/dcat/tests/nose/test_base_profile.py
deleted file mode 100644
index 5eae55c9..00000000
--- a/ckanext/dcat/tests/nose/test_base_profile.py
+++ /dev/null
@@ -1,409 +0,0 @@
-from builtins import str
-from builtins import object
-import nose
-
-from rdflib import Graph, URIRef, Literal
-from rdflib.namespace import Namespace
-
-from ckantoolkit.tests import helpers
-
-from ckanext.dcat.profiles import RDFProfile, CleanedURIRef
-
-from ckanext.dcat.tests.nose.test_base_parser import _default_graph
-
-
-eq_ = nose.tools.eq_
-
-DCT = Namespace("http://purl.org/dc/terms/")
-TEST = Namespace("http://test.org/")
-DCAT = Namespace("http://www.w3.org/ns/dcat#")
-ADMS = Namespace("http://www.w3.org/ns/adms#")
-
-
-class TestURIRefPreprocessing(object):
-
- def test_with_valid_items(self):
- testUriPart = "://www.w3.org/ns/dcat#"
-
- for prefix in ['http', 'https']:
- eq_(CleanedURIRef(prefix + testUriPart), URIRef(prefix + testUriPart))
- # leading and trailing whitespace should be removed
- eq_(CleanedURIRef(' ' + prefix + testUriPart + ' '), URIRef(prefix + testUriPart))
-
- testNonHttpUri = "mailto:someone@example.com"
- eq_(CleanedURIRef(testNonHttpUri), URIRef(testNonHttpUri))
- # leading and trailing whitespace should be removed again
- eq_(CleanedURIRef(' ' + testNonHttpUri + ' '), URIRef(testNonHttpUri))
-
- def test_with_invalid_items(self):
- testUriPart = "://www.w3.org/ns/!dcat #"
- expectedUriPart = "://www.w3.org/ns/%21dcat%20#"
-
- for prefix in ['http', 'https']:
- eq_(CleanedURIRef(prefix + testUriPart), URIRef(prefix + expectedUriPart))
- # applying on escaped data should have no effect
- eq_(CleanedURIRef(prefix + expectedUriPart), URIRef(prefix + expectedUriPart))
-
- # leading and trailing space should not be escaped
- testNonHttpUri = " mailto:with space!@example.com "
- expectedNonHttpUri = "mailto:with%20space%21@example.com"
-
- eq_(CleanedURIRef(testNonHttpUri), URIRef(expectedNonHttpUri))
- # applying on escaped data should have no effect
- eq_(CleanedURIRef(expectedNonHttpUri), URIRef(expectedNonHttpUri))
-
-
-class TestBaseRDFProfile(object):
-
- def test_datasets(self):
-
- p = RDFProfile(_default_graph())
-
- eq_(len([d for d in p._datasets()]), 3)
-
- def test_datasets_none_found(self):
-
- p = RDFProfile(Graph())
-
- eq_(len([d for d in p._datasets()]), 0)
-
- def test_distributions(self):
-
- p = RDFProfile(_default_graph())
-
- for dataset in p._datasets():
- if str(dataset) == 'http://example.org/datasets/1':
- eq_(len([d for d in p._distributions(dataset)]), 2)
- elif str(dataset) == 'http://example.org/datasets/2':
- eq_(len([d for d in p._distributions(dataset)]), 1)
- elif str(dataset) == 'http://example.org/datasets/3':
- eq_(len([d for d in p._distributions(dataset)]), 0)
-
- def test_object(self):
-
- p = RDFProfile(_default_graph())
-
- _object = p._object(URIRef('http://example.org/datasets/1'),
- DCT.title)
-
- assert isinstance(_object, Literal)
- eq_(str(_object), 'Test Dataset 1')
-
- def test_object_not_found(self):
-
- p = RDFProfile(_default_graph())
-
- _object = p._object(URIRef('http://example.org/datasets/1'),
- DCT.unknown_property)
-
- eq_(_object, None)
-
- def test_object_value(self):
-
- p = RDFProfile(_default_graph())
-
- value = p._object_value(URIRef('http://example.org/datasets/1'),
- DCT.title)
-
- assert isinstance(value, str)
- eq_(value, 'Test Dataset 1')
-
- def test_object_value_not_found(self):
-
- p = RDFProfile(_default_graph())
-
- value = p._object_value(URIRef('http://example.org/datasets/1'),
- DCT.unknown_property)
-
- eq_(value, '')
-
- @helpers.change_config('ckan.locale_default', 'de')
- def test_object_value_default_lang(self):
- p = RDFProfile(_default_graph())
-
- p.g.add((URIRef('http://example.org/datasets/1'),
- DCT.title, Literal('Test Datensatz 1', lang='de')))
- p.g.add((URIRef('http://example.org/datasets/1'),
- DCT.title, Literal('Test Dataset 1 (EN)', lang='en')))
-
- value = p._object_value(URIRef('http://example.org/datasets/1'),
- DCT.title)
-
- assert isinstance(value, str)
- eq_(value, 'Test Datensatz 1')
-
- @helpers.change_config('ckan.locale_default', 'fr')
- def test_object_value_default_lang_not_in_graph(self):
- p = RDFProfile(_default_graph())
-
- p.g.add((URIRef('http://example.org/datasets/1'),
- DCT.title, Literal('Test Datensatz 1', lang='de')))
-
- value = p._object_value(URIRef('http://example.org/datasets/1'),
- DCT.title)
-
- assert isinstance(value, str)
- # FR is not in graph, so either node may be used
- assert value.startswith('Test D')
- assert value.endswith(' 1')
-
- def test_object_value_default_lang_fallback(self):
- p = RDFProfile(_default_graph())
-
- p.g.add((URIRef('http://example.org/datasets/1'),
- DCT.title, Literal('Test Datensatz 1', lang='de')))
- p.g.add((URIRef('http://example.org/datasets/1'),
- DCT.title, Literal('Test Dataset 1 (EN)', lang='en')))
-
- value = p._object_value(URIRef('http://example.org/datasets/1'),
- DCT.title)
-
- assert isinstance(value, str)
- # without config parameter, EN is used as default
- eq_(value, 'Test Dataset 1 (EN)')
-
- def test_object_value_default_lang_missing_lang_param(self):
- p = RDFProfile(_default_graph())
-
- value = p._object_value(URIRef('http://example.org/datasets/1'),
- DCT.title)
-
- assert isinstance(value, str)
- eq_(value, 'Test Dataset 1')
-
- def test_object_int(self):
-
- p = RDFProfile(_default_graph())
-
- p.g.add((URIRef('http://example.org/datasets/1'),
- TEST.some_number,
- Literal('23')))
-
- value = p._object_value_int(URIRef('http://example.org/datasets/1'),
- TEST.some_number)
-
- assert isinstance(value, int)
- eq_(value, 23)
-
- def test_object_int_decimal(self):
-
- p = RDFProfile(_default_graph())
-
- p.g.add((URIRef('http://example.org/datasets/1'),
- TEST.some_number,
- Literal('23.0')))
-
- value = p._object_value_int(URIRef('http://example.org/datasets/1'),
- TEST.some_number)
-
- assert isinstance(value, int)
- eq_(value, 23)
-
- def test_object_int_not_found(self):
-
- p = RDFProfile(_default_graph())
-
- value = p._object_value_int(URIRef('http://example.org/datasets/1'),
- TEST.some_number)
-
- eq_(value, None)
-
- def test_object_int_wrong_value(self):
-
- p = RDFProfile(_default_graph())
-
- p.g.add((URIRef('http://example.org/datasets/1'),
- TEST.some_number,
- Literal('Not an intger')))
-
- value = p._object_value_int(URIRef('http://example.org/datasets/1'),
- TEST.some_number)
-
- eq_(value, None)
-
- def test_object_list(self):
-
- p = RDFProfile(_default_graph())
-
- p.g.add((URIRef('http://example.org/datasets/1'),
- DCAT.keyword,
- Literal('space')))
- p.g.add((URIRef('http://example.org/datasets/1'),
- DCAT.keyword,
- Literal('moon')))
-
- value = p._object_value_list(URIRef('http://example.org/datasets/1'),
- DCAT.keyword)
-
- assert isinstance(value, list)
- assert isinstance(value[0], str)
- eq_(len(value), 2)
- eq_(sorted(value), ['moon', 'space'])
-
- def test_object_list_not_found(self):
-
- p = RDFProfile(_default_graph())
-
- value = p._object_value_list(URIRef('http://example.org/datasets/1'),
- TEST.some_list)
-
- assert isinstance(value, list)
- eq_(value, [])
-
- def test_time_interval_schema_org(self):
-
- data = '''
-
-
-
-
- 1905-03-01
- 2013-01-05
-
-
-
-
- '''
-
- g = Graph()
-
- g.parse(data=data)
-
- p = RDFProfile(g)
-
- start, end = p._time_interval(URIRef('http://example.org'), DCT.temporal)
-
- eq_(start, '1905-03-01')
- eq_(end, '2013-01-05')
-
- def test_time_interval_w3c_time(self):
-
- data = '''
-
-
-
-
-
-
- 1904
-
-
-
-
- 2014-03-22
-
-
-
-
-
-
- '''
-
- g = Graph()
-
- g.parse(data=data)
-
- p = RDFProfile(g)
-
- start, end = p._time_interval(URIRef('http://example.org'), DCT.temporal)
-
- eq_(start, '1904-01-01')
- eq_(end, '2014-03-22')
-
- def test_publisher_foaf(self):
-
- data = '''
-
-
-
-
- Publishing Organization for dataset 1
- contact@some.org
- http://some.org
-
-
-
-
-
- '''
-
- g = Graph()
-
- g.parse(data=data)
-
- p = RDFProfile(g)
-
- publisher = p._publisher(URIRef('http://example.org'), DCT.publisher)
-
- eq_(publisher['uri'], 'http://orgs.vocab.org/some-org')
- eq_(publisher['name'], 'Publishing Organization for dataset 1')
- eq_(publisher['email'], 'contact@some.org')
- eq_(publisher['url'], 'http://some.org')
- eq_(publisher['type'], 'http://purl.org/adms/publishertype/NonProfitOrganisation')
-
- def test_publisher_ref(self):
-
- data = '''
-
-
-
-
-
- '''
-
- g = Graph()
-
- g.parse(data=data)
-
- p = RDFProfile(g)
-
- publisher = p._publisher(URIRef('http://example.org'), DCT.publisher)
-
- eq_(publisher['uri'], 'http://orgs.vocab.org/some-org')
-
- def test_contact_details(self):
-
- data = '''
-
-
-
-
- Point of Contact
-
-
-
-
-
- '''
-
- g = Graph()
-
- g.parse(data=data)
-
- p = RDFProfile(g)
-
- contact = p._contact_details(URIRef('http://example.org'), ADMS.contactPoint)
-
- eq_(contact['name'], 'Point of Contact')
- # mailto gets removed for storage and is added again on output
- eq_(contact['email'], 'contact@some.org')
diff --git a/ckanext/dcat/tests/nose/test_controllers.py b/ckanext/dcat/tests/nose/test_controllers.py
deleted file mode 100644
index d76d177f..00000000
--- a/ckanext/dcat/tests/nose/test_controllers.py
+++ /dev/null
@@ -1,681 +0,0 @@
-# -*- coding: utf-8 -*-
-from builtins import str
-from builtins import range
-import time
-import nose
-
-from six.moves import xrange
-
-from ckan import plugins as p
-from ckan.lib.helpers import url_for
-
-from rdflib import Graph
-
-from ckantoolkit.tests import helpers, factories
-
-from ckanext.dcat.processors import RDFParser
-from ckanext.dcat.profiles import RDF, DCAT
-from ckanext.dcat.processors import HYDRA
-
-from ckanext.dcat.tests.nose import DCATFunctionalTestBase
-
-eq_ = nose.tools.eq_
-assert_true = nose.tools.assert_true
-assert_in = nose.tools.assert_in
-
-
-class TestEndpoints(DCATFunctionalTestBase):
-
- def setup(self):
- super(TestEndpoints, self).setup()
- if not p.plugin_loaded('dcat'):
- p.load('dcat')
-
- def teardown(self):
- p.unload('dcat')
-
- def _object_value(self, graph, subject, predicate):
-
- objects = [o for o in graph.objects(subject, predicate)]
- return str(objects[0]) if objects else None
-
- def test_dataset_default(self):
-
- dataset = factories.Dataset(
- notes='Test dataset'
- )
-
- url = url_for('dcat_dataset', _id=dataset['name'], _format='rdf')
-
- app = self._get_test_app()
-
- response = app.get(url)
-
- eq_(response.headers['Content-Type'], 'application/rdf+xml')
-
- content = response.body
-
- # Parse the contents to check it's an actual serialization
- p = RDFParser()
-
- p.parse(content, _format='xml')
-
- dcat_datasets = [d for d in p.datasets()]
-
- eq_(len(dcat_datasets), 1)
-
- dcat_dataset = dcat_datasets[0]
-
- eq_(dcat_dataset['title'], dataset['title'])
- eq_(dcat_dataset['notes'], dataset['notes'])
-
- def test_dataset_xml(self):
-
- dataset = factories.Dataset(
- notes='Test dataset'
- )
-
- url = url_for('dcat_dataset', _id=dataset['name'], _format='xml')
-
- app = self._get_test_app()
-
- response = app.get(url)
-
- eq_(response.headers['Content-Type'], 'application/rdf+xml')
-
- content = response.body
-
- # Parse the contents to check it's an actual serialization
- p = RDFParser()
-
- p.parse(content, _format='xml')
-
- dcat_datasets = [d for d in p.datasets()]
-
- eq_(len(dcat_datasets), 1)
-
- dcat_dataset = dcat_datasets[0]
-
- eq_(dcat_dataset['title'], dataset['title'])
- eq_(dcat_dataset['notes'], dataset['notes'])
-
- def test_dataset_ttl(self):
-
- dataset = factories.Dataset(
- notes='Test dataset'
- )
-
- url = url_for('dcat_dataset', _id=dataset['name'], _format='ttl')
-
- app = self._get_test_app()
-
- response = app.get(url)
-
- eq_(response.headers['Content-Type'], 'text/turtle')
-
- content = response.body
-
- # Parse the contents to check it's an actual serialization
- p = RDFParser()
-
- p.parse(content, _format='turtle')
-
- dcat_datasets = [d for d in p.datasets()]
-
- eq_(len(dcat_datasets), 1)
-
- dcat_dataset = dcat_datasets[0]
-
- eq_(dcat_dataset['title'], dataset['title'])
- eq_(dcat_dataset['notes'], dataset['notes'])
-
- def test_dataset_n3(self):
-
- dataset = factories.Dataset(
- notes='Test dataset'
- )
-
- url = url_for('dcat_dataset', _id=dataset['name'], _format='n3')
-
- app = self._get_test_app()
-
- response = app.get(url)
-
- eq_(response.headers['Content-Type'], 'text/n3')
-
- content = response.body
-
- # Parse the contents to check it's an actual serialization
- p = RDFParser()
-
- p.parse(content, _format='n3')
-
- dcat_datasets = [d for d in p.datasets()]
-
- eq_(len(dcat_datasets), 1)
-
- dcat_dataset = dcat_datasets[0]
-
- eq_(dcat_dataset['title'], dataset['title'])
- eq_(dcat_dataset['notes'], dataset['notes'])
-
- def test_dataset_jsonld(self):
-
- dataset = factories.Dataset(
- notes='Test dataset'
- )
-
- url = url_for('dcat_dataset', _id=dataset['name'], _format='jsonld')
-
- app = self._get_test_app()
-
- response = app.get(url)
-
- eq_(response.headers['Content-Type'], 'application/ld+json')
-
- content = response.body
-
- # Parse the contents to check it's an actual serialization
- p = RDFParser()
-
- p.parse(content, _format='json-ld')
-
- dcat_datasets = [d for d in p.datasets()]
-
- eq_(len(dcat_datasets), 1)
-
- dcat_dataset = dcat_datasets[0]
-
- eq_(dcat_dataset['title'], dataset['title'])
- eq_(dcat_dataset['notes'], dataset['notes'])
-
- def test_dataset_profiles_jsonld(self):
-
- dataset = factories.Dataset(
- notes='Test dataset'
- )
-
- url = url_for('dcat_dataset', _id=dataset['name'], _format='jsonld', profiles='schemaorg')
-
- app = self._get_test_app()
-
- response = app.get(url)
-
- eq_(response.headers['Content-Type'], 'application/ld+json')
-
- content = response.body
-
- assert '"@type": "schema:Dataset"' in content
- assert '"schema:description": "%s"' % dataset['notes'] in content
-
- def test_dataset_profiles_not_found(self):
-
- dataset = factories.Dataset(
- notes='Test dataset'
- )
-
- url = url_for('dcat_dataset', _id=dataset['name'], _format='jsonld', profiles='nope')
-
- app = self._get_test_app()
-
- response = app.get(url, status=409)
-
- assert 'Unknown RDF profiles: nope' in response.body
-
- def test_dataset_not_found(self):
- import uuid
-
- url = url_for('dcat_dataset', _id=str(uuid.uuid4()), _format='n3')
- app = self._get_test_app()
- app.get(url, status=404)
-
- @helpers.change_config('ckanext.dcat.enable_rdf_endpoints', False)
- def test_dataset_endpoint_disabled(self):
- p.unload('dcat')
- p.load('dcat')
- dataset = factories.Dataset(
- notes='Test dataset'
- )
- # without the route, url_for returns the given parameters
- url = url_for('dcat_dataset', _id=dataset['name'], _format='xml')
- assert not url.startswith('/')
- assert url.startswith('dcat_dataset')
-
- def test_dataset_form_is_rendered(self):
- sysadmin = factories.Sysadmin()
- env = {'REMOTE_USER': sysadmin['name'].encode('ascii')}
- url = url_for('add dataset')
-
- app = self._get_test_app()
-
- response = app.get(url, extra_environ=env)
-
- content = response.body
-
- assert '' in response.body
- assert '"schema:description": "test description"' in response.body
-
- def test_structured_data_not_generated(self):
- p.unload('structured_data')
-
- dataset = factories.Dataset(
- notes='test description'
- )
-
- url = url_for('dataset_read', id=dataset['name'])
-
- app = self._get_test_app()
-
- response = app.get(url)
- assert not '