mirror of
https://github.com/gsi-upm/senpy
synced 2025-08-23 18:12:20 +00:00
Multiple changes in the API, schemas and UI
Check out the CHANGELOG.md file for more information
This commit is contained in:
@@ -3,7 +3,7 @@ import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
from unittest import TestCase
|
||||
from senpy.api import (boolean, parse_params, get_extra_params, parse_analysis,
|
||||
from senpy.api import (boolean, parse_params, get_extra_params, parse_analyses,
|
||||
API_PARAMS, NIF_PARAMS, WEB_PARAMS)
|
||||
from senpy.models import Error, Plugin
|
||||
|
||||
@@ -91,7 +91,7 @@ class APITest(TestCase):
|
||||
assert 'input' in p
|
||||
assert p['input'] == 'Aloha my friend'
|
||||
|
||||
def test_parse_analysis(self):
|
||||
def test_parse_analyses(self):
|
||||
'''The API should parse user parameters and return them in a format that plugins can use'''
|
||||
plugins = [
|
||||
Plugin({
|
||||
@@ -161,7 +161,7 @@ class APITest(TestCase):
|
||||
}
|
||||
|
||||
]
|
||||
p = parse_analysis(call, plugins)
|
||||
p = parse_analyses(call, plugins)
|
||||
for i, arg in enumerate(expected):
|
||||
params = p[i].params
|
||||
for k, v in arg.items():
|
||||
|
@@ -60,33 +60,33 @@ class BlueprintsTest(TestCase):
|
||||
The dummy plugin returns an empty response,\
|
||||
it should contain the context
|
||||
"""
|
||||
resp = self.client.get("/api/?i=My aloha mohame&with_parameters=True")
|
||||
resp = self.client.get("/api/?i=My aloha mohame&verbose")
|
||||
self.assertCode(resp, 200)
|
||||
js = parse_resp(resp)
|
||||
logging.debug("Got response: %s", js)
|
||||
assert "@context" in js
|
||||
assert "entries" in js
|
||||
assert len(js['analysis']) == 1
|
||||
assert len(js['activities']) == 1
|
||||
|
||||
def test_analysis_post(self):
|
||||
"""
|
||||
The results for a POST request should be the same as for a GET request.
|
||||
"""
|
||||
resp = self.client.post("/api/", data={'i': 'My aloha mohame',
|
||||
'algorithm': 'rand',
|
||||
'with_parameters': True})
|
||||
'algorithm': 'sentiment-random',
|
||||
'verbose': True})
|
||||
self.assertCode(resp, 200)
|
||||
js = parse_resp(resp)
|
||||
logging.debug("Got response: %s", js)
|
||||
assert "@context" in js
|
||||
assert "entries" in js
|
||||
assert len(js['analysis']) == 1
|
||||
assert len(js['activities']) == 1
|
||||
|
||||
def test_analysis_extra(self):
|
||||
"""
|
||||
Extra params that have a default should use it
|
||||
"""
|
||||
resp = self.client.get("/api/?i=My aloha mohame&algo=Dummy&with_parameters=true")
|
||||
resp = self.client.get("/api/?i=My aloha mohame&algo=Dummy&with-parameters=true")
|
||||
self.assertCode(resp, 200)
|
||||
js = parse_resp(resp)
|
||||
logging.debug("Got response: %s", js)
|
||||
@@ -129,23 +129,23 @@ class BlueprintsTest(TestCase):
|
||||
"""
|
||||
More than one algorithm can be specified. Plugins will then be chained
|
||||
"""
|
||||
resp = self.client.get("/api/Dummy?i=My aloha mohame")
|
||||
resp = self.client.get("/api/Dummy?i=My aloha mohame&verbose")
|
||||
js = parse_resp(resp)
|
||||
assert len(js['analysis']) == 1
|
||||
assert len(js['activities']) == 1
|
||||
assert js['entries'][0]['nif:isString'] == 'My aloha mohame'[::-1]
|
||||
|
||||
resp = self.client.get("/api/Dummy/Dummy?i=My aloha mohame")
|
||||
resp = self.client.get("/api/Dummy/Dummy?i=My aloha mohame&verbose")
|
||||
# Calling dummy twice, should return the same string
|
||||
self.assertCode(resp, 200)
|
||||
js = parse_resp(resp)
|
||||
assert len(js['analysis']) == 2
|
||||
assert len(js['activities']) == 2
|
||||
assert js['entries'][0]['nif:isString'] == 'My aloha mohame'
|
||||
|
||||
resp = self.client.get("/api/Dummy+Dummy?i=My aloha mohame")
|
||||
resp = self.client.get("/api/Dummy+Dummy?i=My aloha mohame&verbose")
|
||||
# Same with pluses instead of slashes
|
||||
self.assertCode(resp, 200)
|
||||
js = parse_resp(resp)
|
||||
assert len(js['analysis']) == 2
|
||||
assert len(js['activities']) == 2
|
||||
assert js['entries'][0]['nif:isString'] == 'My aloha mohame'
|
||||
|
||||
def test_analysis_chain_required(self):
|
||||
@@ -153,9 +153,10 @@ class BlueprintsTest(TestCase):
|
||||
If a parameter is required and duplicated (because two plugins require it), specifying
|
||||
it once should suffice
|
||||
"""
|
||||
resp = self.client.get("/api/DummyRequired/DummyRequired?i=My aloha mohame&example=a")
|
||||
resp = self.client.get(('/api/DummyRequired/DummyRequired?'
|
||||
'i=My aloha mohame&example=a&verbose'))
|
||||
js = parse_resp(resp)
|
||||
assert len(js['analysis']) == 2
|
||||
assert len(js['activities']) == 2
|
||||
assert js['entries'][0]['nif:isString'] == 'My aloha mohame'
|
||||
assert js['entries'][0]['reversed'] == 2
|
||||
|
||||
@@ -193,20 +194,21 @@ class BlueprintsTest(TestCase):
|
||||
# First, we split by sentence twice. Each call should generate 3 additional entries
|
||||
# (one per sentence in the original).
|
||||
resp = self.client.get('/api/split/split?i=The first sentence. The second sentence.'
|
||||
'\nA new paragraph&delimiter=sentence')
|
||||
'\nA new paragraph&delimiter=sentence&verbose')
|
||||
js = parse_resp(resp)
|
||||
assert len(js['analysis']) == 2
|
||||
assert len(js['activities']) == 2
|
||||
assert len(js['entries']) == 7
|
||||
|
||||
# Now, we split by sentence. This produces 3 additional entries.
|
||||
# Then, we split by paragraph. This should create 2 additional entries (One per paragraph
|
||||
# in the original text)
|
||||
resp = self.client.get('/api/split/split?i=The first sentence. The second sentence.'
|
||||
'\nA new paragraph&0.delimiter=sentence&1.delimiter=paragraph')
|
||||
'\nA new paragraph&0.delimiter=sentence'
|
||||
'&1.delimiter=paragraph&verbose')
|
||||
# Calling dummy twice, should return the same string
|
||||
self.assertCode(resp, 200)
|
||||
js = parse_resp(resp)
|
||||
assert len(js['analysis']) == 2
|
||||
assert len(js['activities']) == 2
|
||||
assert len(js['entries']) == 6
|
||||
|
||||
def test_error(self):
|
||||
@@ -230,12 +232,12 @@ class BlueprintsTest(TestCase):
|
||||
assert 'plugins' in js
|
||||
plugins = js['plugins']
|
||||
assert len(plugins) > 1
|
||||
assert list(p for p in plugins if p['name'] == "Dummy")
|
||||
assert any(x['name'] == 'dummy' for x in plugins)
|
||||
assert "@context" in js
|
||||
|
||||
def test_headers(self):
|
||||
for i, j in product(["/api/plugins/?nothing=", "/api/?i=test&"],
|
||||
["inHeaders"]):
|
||||
["in-headers"]):
|
||||
resp = self.client.get("%s" % (i))
|
||||
js = parse_resp(resp)
|
||||
assert "@context" in js
|
||||
@@ -251,12 +253,12 @@ class BlueprintsTest(TestCase):
|
||||
|
||||
def test_detail(self):
|
||||
""" Show only one plugin"""
|
||||
resp = self.client.get("/api/plugins/Dummy/")
|
||||
resp = self.client.get("/api/plugins/dummy/")
|
||||
self.assertCode(resp, 200)
|
||||
js = parse_resp(resp)
|
||||
logging.debug(js)
|
||||
assert "@id" in js
|
||||
assert js["@id"] == "endpoint:plugins/Dummy_0.1"
|
||||
assert js["@id"] == "endpoint:plugins/dummy_0.1"
|
||||
|
||||
def test_default(self):
|
||||
""" Show only one plugin"""
|
||||
@@ -265,7 +267,7 @@ class BlueprintsTest(TestCase):
|
||||
js = parse_resp(resp)
|
||||
logging.debug(js)
|
||||
assert "@id" in js
|
||||
assert js["@id"] == "endpoint:plugins/Dummy_0.1"
|
||||
assert js["@id"] == "endpoint:plugins/dummy_0.1"
|
||||
|
||||
def test_context(self):
|
||||
resp = self.client.get("/api/contexts/context.jsonld")
|
||||
@@ -290,5 +292,5 @@ class BlueprintsTest(TestCase):
|
||||
|
||||
def test_conversion(self):
|
||||
self.app.config['TESTING'] = False # Errors are expected in this case
|
||||
resp = self.client.get("/api/?input=hello&algo=emoRand&emotionModel=DOES NOT EXIST")
|
||||
resp = self.client.get("/api/?input=hello&algo=emotion-random&emotionModel=DOES NOT EXIST")
|
||||
self.assertCode(resp, 404)
|
||||
|
@@ -12,8 +12,8 @@ class CLITest(TestCase):
|
||||
def test_basic(self):
|
||||
self.assertRaises(Error, partial(main_function, []))
|
||||
|
||||
res = main_function(['--input', 'test', '--algo', 'rand',
|
||||
'--with-parameters', '--default-plugins'])
|
||||
res = main_function(['--input', 'test', '--algo', 'sentiment-random',
|
||||
'--with-parameters'])
|
||||
assert res.parameters['input'] == 'test'
|
||||
assert 'rand' in res.parameters['algorithm']
|
||||
assert 'sentiment-random' in res.parameters['algorithm']
|
||||
assert res.parameters['input'] == 'test'
|
||||
|
@@ -28,6 +28,7 @@ class ExtensionsTest(TestCase):
|
||||
default_plugins=False)
|
||||
self.senpy.deactivate_all()
|
||||
self.senpy.activate_plugin("Dummy", sync=True)
|
||||
self.app.config['TESTING'] = True # Tell Flask not to catch Exceptions
|
||||
|
||||
def test_init(self):
|
||||
""" Initialising the app with the extension. """
|
||||
@@ -44,7 +45,7 @@ class ExtensionsTest(TestCase):
|
||||
|
||||
def test_add_delete(self):
|
||||
'''Should be able to add and delete new plugins. '''
|
||||
new = plugins.Analysis(name='new', description='new', version=0)
|
||||
new = plugins.Analyser(name='new', description='new', version=0)
|
||||
self.senpy.add_plugin(new)
|
||||
assert new in self.senpy.plugins(is_activated=False)
|
||||
self.senpy.delete_plugin(new)
|
||||
@@ -55,9 +56,9 @@ class ExtensionsTest(TestCase):
|
||||
senpy = Senpy(plugin_folder=None,
|
||||
app=self.app,
|
||||
default_plugins=False)
|
||||
assert not senpy.analysis_plugins
|
||||
assert not senpy.analysis_plugins()
|
||||
senpy.add_folder(self.examples_dir)
|
||||
assert senpy.plugins(plugin_type=plugins.AnalysisPlugin, is_activated=False)
|
||||
assert senpy.plugins(plugin_type=plugins.Analyser, is_activated=False)
|
||||
self.assertRaises(AttributeError, senpy.add_folder, 'DOES NOT EXIST')
|
||||
|
||||
def test_installing(self):
|
||||
@@ -102,7 +103,7 @@ class ExtensionsTest(TestCase):
|
||||
def test_default(self):
|
||||
""" Default plugin should be set """
|
||||
assert self.senpy.default_plugin
|
||||
assert self.senpy.default_plugin.name == "Dummy"
|
||||
assert self.senpy.default_plugin.name == "dummy"
|
||||
self.senpy.deactivate_all(sync=True)
|
||||
logging.debug("Default: {}".format(self.senpy.default_plugin))
|
||||
assert self.senpy.default_plugin is None
|
||||
@@ -118,8 +119,8 @@ class ExtensionsTest(TestCase):
|
||||
# Leaf (defaultdict with __setattr__ and __getattr__.
|
||||
r1 = analyse(self.senpy, algorithm="Dummy", input="tupni", output="tuptuo")
|
||||
r2 = analyse(self.senpy, input="tupni", output="tuptuo")
|
||||
assert r1.analysis[0].algorithm == "endpoint:plugins/Dummy_0.1"
|
||||
assert r2.analysis[0].algorithm == "endpoint:plugins/Dummy_0.1"
|
||||
assert r1.activities[0].algorithm == "endpoint:plugins/dummy_0.1"
|
||||
assert r2.activities[0].algorithm == "endpoint:plugins/dummy_0.1"
|
||||
assert r1.entries[0]['nif:isString'] == 'input'
|
||||
|
||||
def test_analyse_empty(self):
|
||||
@@ -153,12 +154,12 @@ class ExtensionsTest(TestCase):
|
||||
r2 = analyse(self.senpy,
|
||||
input="tupni",
|
||||
output="tuptuo")
|
||||
assert r1.analysis[0].algorithm == "endpoint:plugins/Dummy_0.1"
|
||||
assert r2.analysis[0].algorithm == "endpoint:plugins/Dummy_0.1"
|
||||
assert r1.activities[0].algorithm == "endpoint:plugins/dummy_0.1"
|
||||
assert r2.activities[0].algorithm == "endpoint:plugins/dummy_0.1"
|
||||
assert r1.entries[0]['nif:isString'] == 'input'
|
||||
|
||||
def test_analyse_error(self):
|
||||
class ErrorPlugin(plugins.Analysis):
|
||||
class ErrorPlugin(plugins.Analyser):
|
||||
author = 'nobody'
|
||||
version = 0
|
||||
ex = Error()
|
||||
@@ -203,20 +204,21 @@ class ExtensionsTest(TestCase):
|
||||
'onyx:usesEmotionModel': 'emoml:fsre-dimensions'
|
||||
})
|
||||
eSet1 = EmotionSet()
|
||||
eSet1.prov__wasGeneratedBy = plugin['@id']
|
||||
activity = plugin.activity()
|
||||
eSet1.prov(activity)
|
||||
eSet1['onyx:hasEmotion'].append(Emotion({
|
||||
'emoml:arousal': 1,
|
||||
'emoml:potency': 0,
|
||||
'emoml:valence': 0
|
||||
}))
|
||||
response = Results({
|
||||
'analysis': [plugin],
|
||||
'activities': [activity],
|
||||
'entries': [Entry({
|
||||
'nif:isString': 'much ado about nothing',
|
||||
'emotions': [eSet1]
|
||||
'onyx:hasEmotionSet': [eSet1]
|
||||
})]
|
||||
})
|
||||
params = {'emotionModel': 'emoml:big6',
|
||||
params = {'emotion-model': 'emoml:big6',
|
||||
'algorithm': ['conversion'],
|
||||
'conversion': 'full'}
|
||||
r1 = deepcopy(response)
|
||||
|
@@ -10,7 +10,6 @@ from senpy.models import (Analysis,
|
||||
EmotionAnalysis,
|
||||
EmotionSet,
|
||||
Entry,
|
||||
Entity,
|
||||
Error,
|
||||
Results,
|
||||
Sentiment,
|
||||
@@ -25,7 +24,7 @@ from pprint import pprint
|
||||
|
||||
class ModelsTest(TestCase):
|
||||
def test_jsonld(self):
|
||||
prueba = {"id": "test", "analysis": [], "entries": []}
|
||||
prueba = {"id": "test", "activities": [], "entries": []}
|
||||
r = Results(**prueba)
|
||||
print("Response's context: ")
|
||||
pprint(r._context)
|
||||
@@ -51,7 +50,7 @@ class ModelsTest(TestCase):
|
||||
logging.debug("jsonld: %s", j6)
|
||||
assert ("@context" in j6)
|
||||
assert ("entries" in j6)
|
||||
assert ("analysis" in j6)
|
||||
assert ("activities" in j6)
|
||||
resp = r6.flask()
|
||||
received = json.loads(resp.data.decode())
|
||||
logging.debug("Response: %s", j6)
|
||||
@@ -114,7 +113,7 @@ class ModelsTest(TestCase):
|
||||
}})
|
||||
c = p.jsonld()
|
||||
assert '@type' in c
|
||||
assert c['@type'] == 'sentimentPlugin'
|
||||
assert c['@type'] == 'SentimentPlugin'
|
||||
assert 'info' not in c
|
||||
assert 'repo' not in c
|
||||
assert 'extra_params' in c
|
||||
@@ -133,7 +132,7 @@ class ModelsTest(TestCase):
|
||||
p._testing = 0
|
||||
s = str(p)
|
||||
assert "_testing" not in s
|
||||
r.analysis.append(p)
|
||||
r.activities.append(p)
|
||||
s = str(r)
|
||||
assert "_testing" not in s
|
||||
|
||||
@@ -146,7 +145,7 @@ class ModelsTest(TestCase):
|
||||
"""Any model should be serializable as a turtle file"""
|
||||
ana = EmotionAnalysis()
|
||||
res = Results()
|
||||
res.analysis.append(ana)
|
||||
res.activities.append(ana)
|
||||
entry = Entry(text='Just testing')
|
||||
eSet = EmotionSet()
|
||||
emotion = Emotion()
|
||||
@@ -155,7 +154,7 @@ class ModelsTest(TestCase):
|
||||
eSet.onyx__hasEmotion.append(emotion)
|
||||
eSet.prov__wasGeneratedBy = ana.id
|
||||
triples = ('ana a :Analysis',
|
||||
'entry a :entry',
|
||||
'ent[]ry a :entry',
|
||||
' nif:isString "Just testing"',
|
||||
' onyx:hasEmotionSet eSet',
|
||||
'eSet a onyx:EmotionSet',
|
||||
@@ -163,7 +162,7 @@ class ModelsTest(TestCase):
|
||||
' onyx:hasEmotion emotion',
|
||||
'emotion a onyx:Emotion',
|
||||
'res a :results',
|
||||
' me:AnalysisInvoloved ana',
|
||||
' me:AnalysisInvolved ana',
|
||||
' prov:used entry')
|
||||
|
||||
t = res.serialize(format='turtle')
|
||||
@@ -176,7 +175,7 @@ class ModelsTest(TestCase):
|
||||
plugs = Plugins()
|
||||
c = plugs.jsonld()
|
||||
assert '@type' in c
|
||||
assert c['@type'] == 'plugins'
|
||||
assert c['@type'] == 'Plugins'
|
||||
|
||||
def test_single_plugin(self):
|
||||
"""A response with a single plugin should still return a list"""
|
||||
@@ -188,7 +187,7 @@ class ModelsTest(TestCase):
|
||||
assert isinstance(plugs.plugins, list)
|
||||
js = plugs.jsonld()
|
||||
assert isinstance(js['plugins'], list)
|
||||
assert js['plugins'][0]['@type'] == 'sentimentPlugin'
|
||||
assert js['plugins'][0]['@type'] == 'SentimentPlugin'
|
||||
|
||||
def test_parameters(self):
|
||||
'''An Analysis should contain the algorithm and the list of parameters to be used'''
|
||||
@@ -205,11 +204,11 @@ class ModelsTest(TestCase):
|
||||
|
||||
def test_from_string(self):
|
||||
results = {
|
||||
'@type': 'results',
|
||||
'@type': 'Results',
|
||||
'@id': 'prueba',
|
||||
'entries': [{
|
||||
'@id': 'entry1',
|
||||
'@type': 'entry',
|
||||
'@type': 'Entry',
|
||||
'nif:isString': 'TEST'
|
||||
}]
|
||||
}
|
||||
@@ -226,10 +225,22 @@ class ModelsTest(TestCase):
|
||||
def test_serializable(self):
|
||||
r = Results()
|
||||
e = Entry()
|
||||
ent = Entity()
|
||||
e.entities.append(ent)
|
||||
r.entries.append(e)
|
||||
d = r.serializable()
|
||||
assert d
|
||||
assert d['entries']
|
||||
assert d['entries'][0]['entities']
|
||||
|
||||
def test_template(self):
|
||||
r = Results()
|
||||
e = Entry()
|
||||
e.nif__isString = 'testing the template'
|
||||
sent = Sentiment()
|
||||
sent.polarity = 'marl:Positive'
|
||||
r.entries.append(e)
|
||||
e.sentiments.append(sent)
|
||||
template = ('{% for entry in entries %}'
|
||||
'{{ entry["nif:isString"] | upper }}'
|
||||
',{{entry.sentiments[0]["marl:hasPolarity"].split(":")[1]}}'
|
||||
'{% endfor %}')
|
||||
res = r.serialize(template=template)
|
||||
assert res == 'TESTING THE TEMPLATE,Positive'
|
||||
|
@@ -1,4 +1,5 @@
|
||||
#!/bin/env python
|
||||
#!/usr/local/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import os
|
||||
import pickle
|
||||
@@ -71,7 +72,7 @@ class PluginsTest(TestCase):
|
||||
ps = Plugins()
|
||||
for i in (plugins.SentimentPlugin,
|
||||
plugins.EmotionPlugin,
|
||||
plugins.AnalysisPlugin):
|
||||
plugins.Analyser):
|
||||
p = i(name='Plugin_{}'.format(i.__name__),
|
||||
description='TEST',
|
||||
version=0,
|
||||
@@ -212,14 +213,15 @@ class PluginsTest(TestCase):
|
||||
author = 'me'
|
||||
version = 0
|
||||
|
||||
def input(self, entry, **kwargs):
|
||||
return entry.text
|
||||
def to_features(self, entry, **kwargs):
|
||||
return entry.text.split()
|
||||
|
||||
def predict_one(self, input):
|
||||
return 'SIGN' in input
|
||||
def predict_one(self, features, **kwargs):
|
||||
return ['SIGN' in features]
|
||||
|
||||
def output(self, output, entry, **kwargs):
|
||||
if output:
|
||||
def to_entry(self, features, entry, **kwargs):
|
||||
print('Features for to_entry:', features)
|
||||
if features[0]:
|
||||
entry.myAnnotation = 'DETECTED'
|
||||
return entry
|
||||
|
||||
@@ -237,16 +239,17 @@ class PluginsTest(TestCase):
|
||||
|
||||
def test_sentimentbox(self):
|
||||
|
||||
class SentimentBox(plugins.MappingMixin, plugins.SentimentBox):
|
||||
class SentimentBox(plugins.SentimentBox):
|
||||
''' Vague description'''
|
||||
|
||||
author = 'me'
|
||||
version = 0
|
||||
|
||||
mappings = {'happy': 'marl:Positive', 'sad': 'marl:Negative'}
|
||||
|
||||
def predict_one(self, input, **kwargs):
|
||||
return 'happy' if ':)' in input else 'sad'
|
||||
def predict_one(self, features, **kwargs):
|
||||
text = ' '.join(features)
|
||||
if ':)' in text:
|
||||
return [1, 0, 0]
|
||||
return [0, 0, 1]
|
||||
|
||||
test_cases = [
|
||||
{
|
||||
@@ -320,19 +323,19 @@ class PluginsTest(TestCase):
|
||||
testdata.append(["bad", 0])
|
||||
dataset = pd.DataFrame(testdata, columns=['text', 'polarity'])
|
||||
|
||||
class DummyPlugin(plugins.TextBox):
|
||||
class DummyPlugin(plugins.SentimentBox):
|
||||
description = 'Plugin to test evaluation'
|
||||
version = 0
|
||||
|
||||
def predict_one(self, input):
|
||||
def predict_one(self, features, **kwargs):
|
||||
return 0
|
||||
|
||||
class SmartPlugin(plugins.TextBox):
|
||||
class SmartPlugin(plugins.SentimentBox):
|
||||
description = 'Plugin to test evaluation'
|
||||
version = 0
|
||||
|
||||
def predict_one(self, input):
|
||||
if input == 'good':
|
||||
def predict_one(self, features, **kwargs):
|
||||
if features[0] == 'good':
|
||||
return 1
|
||||
return 0
|
||||
|
||||
|
112
tests/test_semantics.py
Normal file
112
tests/test_semantics.py
Normal file
@@ -0,0 +1,112 @@
|
||||
#!/usr/local/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from future.standard_library import install_aliases
|
||||
install_aliases()
|
||||
|
||||
import os
|
||||
|
||||
from rdflib import Graph
|
||||
from senpy.extensions import Senpy
|
||||
from flask import Flask
|
||||
from unittest import TestCase
|
||||
|
||||
from urllib.parse import urlencode
|
||||
|
||||
|
||||
def parse_resp(resp, fmt):
|
||||
return Graph().parse(data=resp.data.decode(), format=fmt)
|
||||
|
||||
|
||||
class SemanticsTest(TestCase):
|
||||
'''Tests for the semantics of the server.'''
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
"""Set up only once, and re-use in every individual test"""
|
||||
cls.app = Flask("test_extensions")
|
||||
cls.client = cls.app.test_client()
|
||||
cls.senpy = Senpy(default_plugins=True)
|
||||
cls.senpy.init_app(cls.app)
|
||||
cls.dir = os.path.join(os.path.dirname(__file__), "..")
|
||||
cls.senpy.add_folder(cls.dir)
|
||||
cls.senpy.activate_all()
|
||||
cls.senpy.default_plugin = 'Dummy'
|
||||
|
||||
def setUp(self):
|
||||
self.app.config['TESTING'] = True # Tell Flask not to catch Exceptions
|
||||
|
||||
def assertCode(self, resp, code):
|
||||
self.assertEqual(resp.status_code, code)
|
||||
|
||||
def test_sentiment(self):
|
||||
"""
|
||||
A sentiment analysis call in JSON-LD
|
||||
"""
|
||||
# We use expanded JSON-LD and ignore the context, because in general
|
||||
# the context is a URIS to the service and that URI is not
|
||||
# available outside of self.client
|
||||
params = {
|
||||
'input': 'hello',
|
||||
'in-headers': True,
|
||||
'outformat': 'json-ld',
|
||||
'expanded': True,
|
||||
'prefix': 'http://default.example/#'
|
||||
}
|
||||
resp = self.client.get("/api/basic?{}".format(urlencode(params)))
|
||||
self.assertCode(resp, 200)
|
||||
g = parse_resp(resp, fmt='json-ld')
|
||||
assert g
|
||||
qres = g.query("""
|
||||
PREFIX prov: <http://www.w3.org/ns/prov#>
|
||||
PREFIX marl: <http://www.gsi.dit.upm.es/ontologies/marl/ns#>
|
||||
PREFIX nif: <http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core#>
|
||||
PREFIX onyx: <http://www.gsi.dit.upm.es/ontologies/onyx/ns#>
|
||||
PREFIX senpy: <http://www.gsi.upm.es/onto/senpy/ns#>
|
||||
|
||||
SELECT DISTINCT ?entry ?text ?sentiment
|
||||
WHERE {
|
||||
?entry a senpy:Entry .
|
||||
?entry marl:hasOpinion ?o .
|
||||
?entry nif:isString ?text .
|
||||
?o marl:hasPolarity ?sentiment .
|
||||
}""")
|
||||
assert len(qres) == 1
|
||||
entry, text, sentiment = list(qres)[0]
|
||||
assert entry
|
||||
assert str(text) == 'hello'
|
||||
assert str(sentiment) in ['marl:Positive', 'marl:Neutral', 'marl:Negative']
|
||||
|
||||
def test_sentiment_turtle(self):
|
||||
"""
|
||||
A sentiment analysis call in turtle format
|
||||
"""
|
||||
params = {
|
||||
'input': 'hello',
|
||||
'in-headers': True,
|
||||
'outformat': 'turtle',
|
||||
'expanded': True,
|
||||
'prefix': 'http://default.example/#'
|
||||
}
|
||||
resp = self.client.get("/api/basic?{}".format(urlencode(params)))
|
||||
self.assertCode(resp, 200)
|
||||
g = parse_resp(resp, 'ttl')
|
||||
qres = g.query("""
|
||||
PREFIX prov: <http://www.w3.org/ns/prov#>
|
||||
PREFIX marl: <http://www.gsi.dit.upm.es/ontologies/marl/ns#>
|
||||
PREFIX nif: <http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core#>
|
||||
PREFIX onyx: <http://www.gsi.dit.upm.es/ontologies/onyx/ns#>
|
||||
PREFIX senpy: <http://www.gsi.upm.es/onto/senpy/ns#>
|
||||
|
||||
SELECT DISTINCT ?entry ?text ?sentiment
|
||||
WHERE {
|
||||
?entry a senpy:Entry .
|
||||
?entry marl:hasOpinion ?o .
|
||||
?entry nif:isString ?text .
|
||||
?o marl:hasPolarity ?sentiment .
|
||||
}""")
|
||||
assert len(qres) == 1
|
||||
entry, text, sentiment = list(qres)[0]
|
||||
assert entry
|
||||
assert str(text) == 'hello'
|
||||
assert str(sentiment) in ['marl:Positive', 'marl:Neutral', 'marl:Negative']
|
@@ -5,7 +5,7 @@ import json
|
||||
from senpy.testing import patch_requests
|
||||
from senpy.models import Results
|
||||
|
||||
ENDPOINT = 'http://example.com'
|
||||
ENDPOINT = 'http://invalid.com'
|
||||
|
||||
|
||||
class TestTest(TestCase):
|
||||
|
Reference in New Issue
Block a user