1
0
mirror of https://github.com/gsi-upm/senpy synced 2025-10-19 09:48:26 +00:00

Compare commits

...

13 Commits

Author SHA1 Message Date
J. Fernando Sánchez
d145a852e7 remove pytest warning errors
In the previous commit I naively configured pytest to
treat any warning as an error, excep for numpy
pendingdeprecationwarning errors,
This causes the build in py2.7 to fail, due to some
warnings from nltk.
2018-12-07 18:02:01 +01:00
J. Fernando Sánchez
c090501534 New schema for parameters
* Add parameters as an entity in the schema
* Update examples to include parameters
* Change the API for processing plugins, params is a parameter again, instead of
only adding the request.
* Update tests
2018-12-07 16:46:07 +01:00
J. Fernando Sánchez
6a1069780b Draft merge 51-improve parameters
There are some unsolved issues, like representing the mix of analysis+parameters
in a sensible way.
I think we should somehow represent each of the analysis tasks with a unique ID,
and it should contain the specific parameters used.

Right now results.parameters is a mix of a dict with global parameters and a
list with a dict of parameters per plugin.
2018-11-22 18:56:30 +01:00
J. Fernando Sánchez
41aa142ce0 Refactored conversion and postprocessing 2018-11-22 17:27:43 +01:00
J. Fernando Sánchez
b48730137d Remove makefiles from auto push/pull 2018-11-06 17:12:54 +01:00
J. Fernando Sánchez
f1ec057b16 Add fetch to makefiles push 2018-11-06 17:02:59 +01:00
J. Fernando Sánchez
f6ca82cac8 Merge branch '56-exception-when-using-post' into 'master'
Replace algorithm list with a tuple

Closes #56

See merge request senpy/senpy!25
2018-11-06 14:56:00 +00:00
J. Fernando Sánchez
318acd5a71 Replace algorithm list with a tuple 2018-11-06 15:52:05 +01:00
J. Fernando Sánchez
c8f6f5613d Change CI to include make push
This replaces the makes for each python version with a simple `make push`.
It will also add a "main image" for each version, i.e. `gsiupm/senpy:1.0.0` in
addition to `gsiupm/senpy:1.0.0-python2.7` and `gsiupm/senpy:1.0.0-python3.5`.
2018-10-30 17:45:44 +01:00
J. Fernando Sánchez
748d1a00bd Fix bug in POST 2018-10-30 16:35:17 +01:00
J. Fernando Sánchez
a82e4ed440 Fix bug in py3.5 2018-10-30 16:14:06 +01:00
J. Fernando Sánchez
ca69bddc17 Improve extra requirement handling
This commit adds a new mechanism to handle parameters beforehand in chained
calls, and the ability to get help on available parameters in chained
calls (through `?help`).
It also includes tests for this feature.

Closes #51
2018-08-20 15:44:54 +02:00
J. Fernando Sánchez
aa35e62a27 Avoid duplication in split plugin 2018-08-20 14:07:33 +02:00
43 changed files with 1207 additions and 536 deletions

View File

@@ -31,29 +31,19 @@ test-2.7:
variables:
PYTHON_VERSION: "2.7"
.image: &image_definition
push:
stage: push
script:
- make -e push-$PYTHON_VERSION
- make -e push
only:
- tags
- triggers
- fix-makefiles
push-3.5:
<<: *image_definition
variables:
PYTHON_VERSION: "3.5"
push-2.7:
<<: *image_definition
variables:
PYTHON_VERSION: "2.7"
push-latest:
<<: *image_definition
variables:
PYTHON_VERSION: latest
stage: push
script:
- make -e push-latest
only:
- master
- triggers

View File

@@ -22,7 +22,4 @@ else
rm $(KEY_FILE)
endif
push:: git-push
pull:: git-pull
.PHONY:: commit tag push git-push git-pull push-github
.PHONY:: commit tag git-push git-pull push-github

View File

@@ -1,17 +1,15 @@
makefiles-remote:
@git remote add makefiles ssh://git@lab.cluster.gsi.dit.upm.es:2200/docs/templates/makefiles.git 2>/dev/null || true
git ls-remote --exit-code makefiles 2> /dev/null || git remote add makefiles ssh://git@lab.cluster.gsi.dit.upm.es:2200/docs/templates/makefiles.git
makefiles-commit: makefiles-remote
git add -f .makefiles
git commit -em "Updated makefiles from ${NAME}"
makefiles-push:
git fetch makefiles $(NAME)
git subtree push --prefix=.makefiles/ makefiles $(NAME)
makefiles-pull: makefiles-remote
git subtree pull --prefix=.makefiles/ makefiles master --squash
pull:: makefiles-pull
push:: makefiles-push
.PHONY:: makefiles-remote makefiles-commit makefiles-push makefiles-pull pull push
.PHONY:: makefiles-remote makefiles-commit makefiles-push makefiles-pull

View File

@@ -6,13 +6,9 @@
],
"entries": [
{
"@type": [
"nif:RFC5147String",
"nif:Context"
],
"nif:beginIndex": 0,
"nif:endIndex": 40,
"nif:isString": "My favourite actress is Natalie Portman"
"text": "An entry should have a nif:isString key"
}
]
}

View File

@@ -3,10 +3,21 @@
"@id": "me:Result1",
"@type": "results",
"analysis": [
"me:SAnalysis1",
"me:SgAnalysis1",
"me:EmotionAnalysis1",
"me:NER1"
{
"@id": "_:SAnalysis1_Activity",
"@type": "marl:SentimentAnalysis",
"prov:wasAssociatedWith": "me:SAnalysis1"
},
{
"@id": "_:EmotionAnalysis1_Activity",
"@type": "onyx:EmotionAnalysis",
"prov:wasAssociatedWith": "me:EmotionAnalysis1"
},
{
"@id": "_:NER1_Activity",
"@type": "me:NER",
"prov:wasAssociatedWith": "me:NER1"
}
],
"entries": [
{
@@ -23,7 +34,7 @@
"nif:endIndex": 13,
"nif:anchorOf": "Microsoft",
"me:references": "http://dbpedia.org/page/Microsoft",
"prov:wasGeneratedBy": "me:NER1"
"prov:wasGeneratedBy": "_:NER1_Activity"
},
{
"@id": "http://micro.blog/status1#char=25,37",
@@ -31,7 +42,7 @@
"nif:endIndex": 37,
"nif:anchorOf": "Windows Phone",
"me:references": "http://dbpedia.org/page/Windows_Phone",
"prov:wasGeneratedBy": "me:NER1"
"prov:wasGeneratedBy": "_:NER1_Activity"
}
],
"suggestions": [
@@ -40,7 +51,7 @@
"nif:beginIndex": 16,
"nif:endIndex": 77,
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
"prov:wasGeneratedBy": "me:SgAnalysis1"
"prov:wasGeneratedBy": "_:SgAnalysis1_Activity"
}
],
"sentiments": [
@@ -51,14 +62,14 @@
"nif:anchorOf": "You'll be awesome.",
"marl:hasPolarity": "marl:Positive",
"marl:polarityValue": 0.9,
"prov:wasGeneratedBy": "me:SAnalysis1"
"prov:wasGeneratedBy": "_:SgAnalysis1_Activity"
}
],
"emotions": [
{
"@id": "http://micro.blog/status1#char=0,109",
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
"prov:wasGeneratedBy": "me:EAnalysis1",
"prov:wasGeneratedBy": "_:EmotionAnalysis1_Activity",
"onyx:hasEmotion": [
{
"onyx:hasEmotionCategory": "wna:liking"

View File

@@ -1,78 +0,0 @@
{
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
"@id": "me:Result1",
"@type": "results",
"analysis": [
"me:SAnalysis1",
"me:SgAnalysis1",
"me:EmotionAnalysis1",
"me:NER1",
{
"@type": "analysis",
"@id": "anonymous"
}
],
"entries": [
{
"@id": "http://micro.blog/status1",
"@type": [
"nif:RFC5147String",
"nif:Context"
],
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
"entities": [
{
"@id": "http://micro.blog/status1#char=5,13",
"nif:beginIndex": 5,
"nif:endIndex": 13,
"nif:anchorOf": "Microsoft",
"me:references": "http://dbpedia.org/page/Microsoft",
"prov:wasGeneratedBy": "me:NER1"
},
{
"@id": "http://micro.blog/status1#char=25,37",
"nif:beginIndex": 25,
"nif:endIndex": 37,
"nif:anchorOf": "Windows Phone",
"me:references": "http://dbpedia.org/page/Windows_Phone",
"prov:wasGeneratedBy": "me:NER1"
}
],
"suggestions": [
{
"@id": "http://micro.blog/status1#char=16,77",
"nif:beginIndex": 16,
"nif:endIndex": 77,
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
"prov:wasGeneratedBy": "me:SgAnalysis1"
}
],
"sentiments": [
{
"@id": "http://micro.blog/status1#char=80,97",
"nif:beginIndex": 80,
"nif:endIndex": 97,
"nif:anchorOf": "You'll be awesome.",
"marl:hasPolarity": "marl:Positive",
"marl:polarityValue": 0.9,
"prov:wasGeneratedBy": "me:SAnalysis1"
}
],
"emotions": [
{
"@id": "http://micro.blog/status1#char=0,109",
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
"prov:wasGeneratedBy": "me:EAnalysis1",
"onyx:hasEmotion": [
{
"onyx:hasEmotionCategory": "wna:liking"
},
{
"onyx:hasEmotionCategory": "wna:excitement"
}
]
}
]
}
]
}

View File

@@ -1,9 +1,8 @@
{
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
"@id": "http://example.com#NIFExample",
"@id": "me:Result1",
"@type": "results",
"analysis": [
],
"analysis": [ ],
"entries": [
{
"@id": "http://example.org#char=0,40",

View File

@@ -4,22 +4,34 @@
"@type": "results",
"analysis": [
{
"@id": "me:SAnalysis1",
"@id": "_:SAnalysis1_Activity",
"@type": "marl:SentimentAnalysis",
"marl:maxPolarityValue": 1,
"marl:minPolarityValue": 0
"prov:wasAssociatedWith": "me:SentimentAnalysis",
"prov:used": [
{
"name": "marl:maxPolarityValue",
"prov:value": "1"
},
{
"@id": "me:SgAnalysis1",
"name": "marl:minPolarityValue",
"prov:value": "0"
}
]
},
{
"@id": "_:SgAnalysis1_Activity",
"prov:wasAssociatedWith": "me:SgAnalysis1",
"@type": "me:SuggestionAnalysis"
},
{
"@id": "me:EmotionAnalysis1",
"@type": "me:EmotionAnalysis"
"@id": "_:EmotionAnalysis1_Activity",
"@type": "me:EmotionAnalysis",
"prov:wasAssociatedWith": "me:EmotionAnalysis1"
},
{
"@id": "me:NER1",
"@type": "me:NER"
"@id": "_:NER1_Activity",
"@type": "me:NER",
"prov:wasAssociatedWith": "me:EmotionNER1"
}
],
"entries": [

View File

@@ -4,8 +4,9 @@
"@type": "results",
"analysis": [
{
"@id": "me:EmotionAnalysis1",
"@type": "onyx:EmotionAnalysis"
"@id": "me:EmotionAnalysis1_Activity",
"@type": "me:EmotionAnalysis1",
"prov:wasAssociatedWith": "me:EmotionAnalysis1"
}
],
"entries": [
@@ -26,7 +27,7 @@
{
"@id": "http://micro.blog/status1#char=0,109",
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
"prov:wasGeneratedBy": "me:EmotionAnalysis1",
"prov:wasGeneratedBy": "_:EmotionAnalysis1_Activity",
"onyx:hasEmotion": [
{
"onyx:hasEmotionCategory": "wna:liking"

View File

@@ -4,8 +4,9 @@
"@type": "results",
"analysis": [
{
"@id": "me:NER1",
"@type": "me:NERAnalysis"
"@id": "_:NER1_Activity",
"@type": "me:NERAnalysis",
"prov:wasAssociatedWith": "me:NER1"
}
],
"entries": [

View File

@@ -9,9 +9,15 @@
"@type": "results",
"analysis": [
{
"@id": "me:HesamsAnalysis",
"@id": "me:HesamsAnalysis_Activity",
"@type": "onyx:EmotionAnalysis",
"onyx:usesEmotionModel": "emovoc:pad-dimensions"
"prov:wasAssociatedWith": "me:HesamsAnalysis",
"prov:used": [
{
"name": "emotion-model",
"prov:value": "emovoc:pad-dimensions"
}
]
}
],
"entries": [
@@ -32,7 +38,7 @@
{
"@id": "Entry1#char=0,21",
"nif:anchorOf": "This is a test string",
"prov:wasGeneratedBy": "me:HesamAnalysis",
"prov:wasGeneratedBy": "_:HesamAnalysis_Activity",
"onyx:hasEmotion": [
{
"emovoc:pleasure": 0.5,

View File

@@ -4,10 +4,9 @@
"@type": "results",
"analysis": [
{
"@id": "me:SAnalysis1",
"@id": "_:SAnalysis1_Activity",
"@type": "marl:SentimentAnalysis",
"marl:maxPolarityValue": 1,
"marl:minPolarityValue": 0
"prov:wasAssociatedWith": "me:SAnalysis1"
}
],
"entries": [
@@ -30,7 +29,7 @@
"nif:anchorOf": "You'll be awesome.",
"marl:hasPolarity": "marl:Positive",
"marl:polarityValue": 0.9,
"prov:wasGeneratedBy": "me:SAnalysis1"
"prov:wasGeneratedBy": "_:SAnalysis1_Activity"
}
],
"emotionSets": [

View File

@@ -3,7 +3,11 @@
"@id": "me:Result1",
"@type": "results",
"analysis": [
"me:SgAnalysis1"
{
"@id": "_:SgAnalysis1_Activity",
"@type": "me:SuggestionAnalysis",
"prov:wasAssociatedWith": "me:SgAnalysis1"
}
],
"entries": [
{
@@ -12,7 +16,6 @@
"nif:RFC5147String",
"nif:Context"
],
"prov:wasGeneratedBy": "me:SAnalysis1",
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
"entities": [
],
@@ -22,7 +25,7 @@
"nif:beginIndex": 16,
"nif:endIndex": 77,
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
"prov:wasGeneratedBy": "me:SgAnalysis1"
"prov:wasGeneratedBy": "_:SgAnalysis1_Activity"
}
],
"sentiments": [

View File

@@ -11,7 +11,7 @@ class Async(AnalysisPlugin):
'''An example of an asynchronous module'''
author = '@balkian'
version = '0.2'
async = True
sync = False
def _do_async(self, num_processes):
pool = multiprocessing.Pool(processes=num_processes)

View File

@@ -1,16 +1,15 @@
from future.utils import iteritems
from .models import Error, Results, Entry, from_string
from .models import Analysis, Error, Results, Entry, from_string
import logging
logger = logging.getLogger(__name__)
boolean = [True, False]
API_PARAMS = {
"algorithm": {
"aliases": ["algorithms", "a", "algo"],
"required": False,
"required": True,
"default": 'default',
"description": ("Algorithms that will be used to process the request."
"It may be a list of comma-separated names."),
},
@@ -43,6 +42,14 @@ API_PARAMS = {
"options": boolean,
"default": False
},
"verbose": {
"@id": "verbose",
"description": "Show all help, including the common API parameters, or only plugin-related info",
"aliases": ["v"],
"required": True,
"options": boolean,
"default": True
},
"emotionModel": {
"@id": "emotionModel",
"aliases": ["emoModel"],
@@ -140,6 +147,15 @@ NIF_PARAMS = {
}
}
BUILTIN_PARAMS = {}
for d in [
NIF_PARAMS, CLI_PARAMS, WEB_PARAMS, PLUGINS_PARAMS, EVAL_PARAMS,
API_PARAMS
]:
for k, v in d.items():
BUILTIN_PARAMS[k] = v
def parse_params(indict, *specs):
if not specs:
@@ -161,10 +177,9 @@ def parse_params(indict, *specs):
outdict[param] = options["default"]
elif options.get("required", False):
wrong_params[param] = spec[param]
continue
if "options" in options:
elif "options" in options:
if options["options"] == boolean:
outdict[param] = outdict[param] in [None, True, 'true', '1']
outdict[param] = str(outdict[param]).lower() in ['true', '1']
elif outdict[param] not in options["options"]:
wrong_params[param] = spec[param]
if wrong_params:
@@ -175,31 +190,126 @@ def parse_params(indict, *specs):
parameters=outdict,
errors=wrong_params)
raise message
if 'algorithm' in outdict and not isinstance(outdict['algorithm'], list):
outdict['algorithm'] = list(outdict['algorithm'].split(','))
return outdict
def parse_extra_params(request, plugin=None):
params = request.parameters.copy()
if plugin:
extra_params = parse_params(params, plugin.get('extra_params', {}))
params.update(extra_params)
def get_all_params(plugins, *specs):
'''Return a list of parameters for a given set of specifications and plugins.'''
dic = {}
for s in specs:
dic.update(s)
dic.update(get_extra_params(plugins))
return dic
def get_extra_params(plugins):
'''Get a list of possible parameters given a list of plugins'''
params = {}
extra_params = {}
for plugin in plugins:
this_params = plugin.get('extra_params', {})
for k, v in this_params.items():
if k not in extra_params:
extra_params[k] = {}
extra_params[k][plugin.name] = v
for k, v in extra_params.items(): # Resolve conflicts
if len(v) == 1: # Add the extra options that do not collide
params[k] = list(v.values())[0]
else:
required = False
aliases = None
options = None
default = None
nodefault = False # Set when defaults are not compatible
for plugin, opt in v.items():
params['{}.{}'.format(plugin, k)] = opt
required = required or opt.get('required', False)
newaliases = set(opt.get('aliases', []))
if aliases is None:
aliases = newaliases
else:
aliases = aliases & newaliases
if 'options' in opt:
newoptions = set(opt['options'])
options = newoptions if options is None else options & newoptions
if 'default' in opt:
newdefault = opt['default']
if newdefault:
if default is None and not nodefault:
default = newdefault
elif newdefault != default:
nodefault = True
default = None
# Check for incompatibilities
if options != set():
params[k] = {
'default': default,
'aliases': list(aliases),
'required': required,
'options': list(options)
}
return params
def parse_analysis(params, plugins):
'''
Parse the given parameters individually for each plugin, and get a list of the parameters that
belong to each of the plugins. Each item can then be used in the plugin.analyse_entries method.
'''
analysis_list = []
for i, plugin in enumerate(plugins):
if not plugin:
continue
this_params = filter_params(params, plugin, i)
parsed = parse_params(this_params, plugin.get('extra_params', {}))
analysis = plugin.activity(parsed)
analysis_list.append(analysis)
return analysis_list
def filter_params(params, plugin, ith=-1):
'''
Get the values within params that apply to a plugin.
More specific names override more general names, in this order:
<index_order>.parameter > <plugin.name>.parameter > parameter
Example:
>>> filter_params({'0.hello': True, 'hello': False}, Plugin(), 0)
{ '0.hello': True, 'hello': True}
'''
thisparams = {}
if ith >= 0:
ith = '{}.'.format(ith)
else:
ith = ""
for k, v in params.items():
if ith and k.startswith(str(ith)):
thisparams[k[len(ith):]] = v
elif k.startswith(plugin.name):
thisparams[k[len(plugin.name) + 1:]] = v
elif k not in thisparams:
thisparams[k] = v
return thisparams
def parse_call(params):
'''Return a results object based on the parameters used in a call/request.
'''
Return a results object based on the parameters used in a call/request.
'''
params = parse_params(params, NIF_PARAMS)
if params['informat'] == 'text':
results = Results()
entry = Entry(nif__isString=params['input'],
id='#') # Use @base
entry = Entry(nif__isString=params['input'], id='#') # Use @base
results.entries.append(entry)
elif params['informat'] == 'json-ld':
results = from_string(params['input'], cls=Results)
else: # pragma: no cover
raise NotImplementedError('Informat {} is not implemented'.format(params['informat']))
raise NotImplementedError('Informat {} is not implemented'.format(
params['informat']))
results.parameters = params
return results

View File

@@ -69,7 +69,7 @@ def encoded_url(url=None, base=None):
if request.method == 'GET':
url = request.full_path[1:] # Remove the first slash
else:
hash(frozenset(request.parameters.items()))
hash(frozenset(tuple(request.parameters.items())))
code = 'hash:{}'.format(hash)
code = code or base64.urlsafe_b64encode(url.encode()).decode()
@@ -188,16 +188,29 @@ def basic_api(f):
@api_blueprint.route('/<path:plugin>', methods=['POST', 'GET'])
@basic_api
def api_root(plugin):
if plugin:
if request.parameters['algorithm'] != api.API_PARAMS['algorithm']['default']:
raise Error('You cannot specify the algorithm with a parameter and a URL variable.'
' Please, remove one of them')
request.parameters['algorithm'] = tuple(plugin.replace('+', '/').split('/'))
params = request.parameters
plugin = request.parameters['algorithm']
sp = current_app.senpy
plugins = sp.get_plugins(plugin)
if request.parameters['help']:
dic = dict(api.API_PARAMS, **api.NIF_PARAMS)
response = Help(valid_parameters=dic)
apis = []
if request.parameters['verbose']:
apis.append(api.BUILTIN_PARAMS)
allparameters = api.get_all_params(plugins, *apis)
response = Help(valid_parameters=allparameters)
return response
req = api.parse_call(request.parameters)
if plugin:
plugin = plugin.replace('+', '/')
plugin = plugin.split('/')
req.parameters['algorithm'] = plugin
return current_app.senpy.analyse(req)
analysis = api.parse_analysis(req.parameters, plugins)
results = current_app.senpy.analyse(req, analysis)
return results
@api_blueprint.route('/evaluate/', methods=['POST', 'GET'])

View File

@@ -31,10 +31,10 @@ def main_function(argv):
default_plugins = params.get('default-plugins', False)
sp = Senpy(default_plugins=default_plugins, plugin_folder=plugin_folder)
request = api.parse_call(params)
algos = request.parameters.get('algorithm', None)
algos = sp.get_plugins(request.parameters.get('algorithm', None))
if algos:
for algo in algos:
sp.activate_plugin(algo)
sp.activate_plugin(algo.name)
else:
sp.activate_all()
res = sp.analyse(request)

View File

@@ -25,7 +25,11 @@ class Client(object):
def request(self, path=None, method='GET', **params):
url = '{}{}'.format(self.endpoint.rstrip('/'), path)
if method == 'POST':
response = requests.post(url=url, data=params)
else:
response = requests.request(method=method, url=url, params=params)
try:
resp = models.from_dict(response.json())
except Exception as ex:

View File

@@ -6,7 +6,6 @@ from future import standard_library
standard_library.install_aliases()
from . import plugins, api
from .plugins import Plugin, evaluate
from .models import Error, AggregatedEvaluation
from .blueprints import api_blueprint, demo_blueprint, ns_blueprint
@@ -17,7 +16,6 @@ import copy
import errno
import logging
from . import gsitk_compat
logger = logging.getLogger(__name__)
@@ -25,6 +23,7 @@ logger = logging.getLogger(__name__)
class Senpy(object):
""" Default Senpy extension for Flask """
def __init__(self,
app=None,
plugin_folder=".",
@@ -50,7 +49,7 @@ class Senpy(object):
self.add_folder('plugins', from_root=True)
else:
# Add only conversion plugins
self.add_folder(os.path.join('plugins', 'conversion'),
self.add_folder(os.path.join('plugins', 'postprocessing'),
from_root=True)
self.app = app
if app is not None:
@@ -79,27 +78,47 @@ class Senpy(object):
def delete_plugin(self, plugin):
del self._plugins[plugin.name.lower()]
def plugins(self, **kwargs):
def plugins(self, plugin_type=None, is_activated=True, **kwargs):
""" Return the plugins registered for a given application. Filtered by criteria """
return list(plugins.pfilter(self._plugins, **kwargs))
return list(plugins.pfilter(self._plugins, plugin_type=plugin_type,
is_activated=is_activated, **kwargs))
def get_plugin(self, name, default=None):
if name == 'default':
return self.default_plugin
plugin = name.lower()
if plugin in self._plugins:
return self._plugins[plugin]
elif name == 'conversion':
return None
results = self.plugins(id='endpoint:plugins/{}'.format(name))
if name.lower() in self._plugins:
return self._plugins[name.lower()]
if not results:
return Error(message="Plugin not found", status=404)
results = self.plugins(id='endpoint:plugins/{}'.format(name.lower()),
plugin_type=None)
if results:
return results[0]
results = self.plugins(id=name,
plugin_type=None)
if results:
return results[0]
msg = ("Plugin not found: '{}'\n"
"Make sure it is ACTIVATED\n"
"Valid algorithms: {}").format(name,
self._plugins.keys())
raise Error(message=msg, status=404)
def get_plugins(self, name):
try:
name = name.split(',')
except AttributeError:
pass # Assume it is a tuple or a list
return tuple(self.get_plugin(n) for n in name)
@property
def analysis_plugins(self):
""" Return only the analysis plugins """
return self.plugins(plugin_type='analysisPlugin')
""" Return only the analysis plugins that are active"""
return self.plugins(plugin_type='analysisPlugin', is_activated=True)
def add_folder(self, folder, from_root=False):
""" Find plugins in this folder and add them to this instance """
@@ -114,73 +133,140 @@ class Senpy(object):
else:
raise AttributeError("Not a folder or does not exist: %s", folder)
def _get_plugins(self, request):
if not self.analysis_plugins:
raise Error(
status=404,
message=("No plugins found."
" Please install one."))
algos = request.parameters.get('algorithm', None)
if not algos:
if self.default_plugin:
algos = [self.default_plugin.name, ]
else:
raise Error(
status=404,
message="No default plugin found, and None provided")
# def check_analysis_request(self, analysis):
# '''Check if the analysis request can be fulfilled'''
# if not self.plugins():
# raise Error(
# status=404,
# message=("No plugins found."
# " Please install one."))
# for a in analysis:
# algo = a.algorithm
# if algo == 'default' and not self.default_plugin:
# raise Error(
# status=404,
# message="No default plugin found, and None provided")
# else:
# self.get_plugin(algo)
plugins = list()
for algo in algos:
algo = algo.lower()
if algo not in self._plugins:
msg = ("The algorithm '{}' is not valid\n"
"Valid algorithms: {}").format(algo,
self._plugins.keys())
logger.debug(msg)
raise Error(
status=404,
message=msg)
plugins.append(self._plugins[algo])
return plugins
def _process_entries(self, entries, req, plugins):
def _process(self, req, pending, done=None):
"""
Recursively process the entries with the first plugin in the list, and pass the results
to the rest of the plugins.
"""
if not plugins:
for i in entries:
yield i
return
plugin = plugins[0]
specific_params = api.parse_extra_params(req, plugin)
req.analysis.append({'plugin': plugin,
'parameters': specific_params})
results = plugin.analyse_entries(entries, specific_params)
for i in self._process_entries(results, req, plugins[1:]):
yield i
done = done or []
if not pending:
return req
analysis = pending[0]
results = analysis.run(req)
results.analysis.append(analysis)
done += analysis
return self._process(results, pending[1:], done)
def install_deps(self):
plugins.install_deps(*self.plugins())
def analyse(self, request):
def analyse(self, request, analysis=None):
"""
Main method that analyses a request, either from CLI or HTTP.
It takes a processed request, provided by the user, as returned
by api.parse_call().
"""
if not self.plugins():
raise Error(
status=404,
message=("No plugins found."
" Please install one."))
if analysis is None:
params = str(request)
plugins = self.get_plugins(request.parameters['algorithm'])
analysis = api.parse_analysis(request.parameters, plugins)
logger.debug("analysing request: {}".format(request))
entries = request.entries
request.entries = []
plugins = self._get_plugins(request)
results = request
for i in self._process_entries(entries, results, plugins):
results.entries.append(i)
self.convert_emotions(results)
logger.debug("Returning analysis result: {}".format(results))
results.analysis = [i['plugin'].id for i in results.analysis]
results = self._process(request, analysis)
logger.debug("Got analysis result: {}".format(results))
results = self.postprocess(results)
logger.debug("Returning post-processed result: {}".format(results))
return results
def convert_emotions(self, resp):
"""
Conversion of all emotions in a response **in place**.
In addition to converting from one model to another, it has
to include the conversion plugin to the analysis list.
Needless to say, this is far from an elegant solution, but it works.
@todo refactor and clean up
"""
plugins = resp.analysis
if 'parameters' not in resp:
return resp
params = resp['parameters']
toModel = params.get('emotionModel', None)
if not toModel:
return resp
logger.debug('Asked for model: {}'.format(toModel))
output = params.get('conversion', None)
candidates = {}
for plugin in plugins:
try:
fromModel = plugin.get('onyx:usesEmotionModel', None)
candidates[plugin.id] = next(self._conversion_candidates(fromModel, toModel))
logger.debug('Analysis plugin {} uses model: {}'.format(
plugin.id, fromModel))
except StopIteration:
e = Error(('No conversion plugin found for: '
'{} -> {}'.format(fromModel, toModel)),
status=404)
e.original_response = resp
e.parameters = params
raise e
newentries = []
done = []
for i in resp.entries:
if output == "full":
newemotions = copy.deepcopy(i.emotions)
else:
newemotions = []
for j in i.emotions:
plugname = j['prov:wasGeneratedBy']
candidate = candidates[plugname]
done.append({'plugin': candidate, 'parameters': params})
for k in candidate.convert(j, fromModel, toModel, params):
k.prov__wasGeneratedBy = candidate.id
if output == 'nested':
k.prov__wasDerivedFrom = j
newemotions.append(k)
i.emotions = newemotions
newentries.append(i)
resp.entries = newentries
return resp
def _conversion_candidates(self, fromModel, toModel):
candidates = self.plugins(plugin_type=plugins.EmotionConversion)
for candidate in candidates:
for pair in candidate.onyx__doesConversion:
logging.debug(pair)
if candidate.can_convert(fromModel, toModel):
yield candidate
def postprocess(self, response):
'''
Transform the results from the analysis plugins.
It has some pre-defined post-processing like emotion conversion,
and it also allows plugins to auto-select themselves.
'''
response = self.convert_emotions(response)
for plug in self.plugins(plugin_type=plugins.PostProcessing):
if plug.check(response, response.analysis):
response = plug.process(response)
return response
def _get_datasets(self, request):
if not self.datasets:
raise Error(
@@ -191,8 +277,8 @@ class Senpy(object):
for dataset in datasets_name:
if dataset not in self.datasets:
logger.debug(("The dataset '{}' is not valid\n"
"Valid datasets: {}").format(dataset,
self.datasets.keys()))
"Valid datasets: {}").format(
dataset, self.datasets.keys()))
raise Error(
status=404,
message="The dataset '{}' is not valid".format(dataset))
@@ -218,78 +304,22 @@ class Senpy(object):
results = AggregatedEvaluation()
results.parameters = params
datasets = self._get_datasets(results)
plugins = self._get_plugins(results)
for eval in evaluate(plugins, datasets):
plugins = []
for plugname in params.algorithm:
plugins = self.get_plugin(plugname)
for eval in plugins.evaluate(plugins, datasets):
results.evaluations.append(eval)
if 'with_parameters' not in results.parameters:
del results.parameters
logger.debug("Returning evaluation result: {}".format(results))
return results
def _conversion_candidates(self, fromModel, toModel):
candidates = self.plugins(plugin_type='emotionConversionPlugin')
for candidate in candidates:
for pair in candidate.onyx__doesConversion:
logging.debug(pair)
if pair['onyx:conversionFrom'] == fromModel \
and pair['onyx:conversionTo'] == toModel:
yield candidate
def convert_emotions(self, resp):
"""
Conversion of all emotions in a response **in place**.
In addition to converting from one model to another, it has
to include the conversion plugin to the analysis list.
Needless to say, this is far from an elegant solution, but it works.
@todo refactor and clean up
"""
plugins = [i['plugin'] for i in resp.analysis]
params = resp.parameters
toModel = params.get('emotionModel', None)
if not toModel:
return
logger.debug('Asked for model: {}'.format(toModel))
output = params.get('conversion', None)
candidates = {}
for plugin in plugins:
try:
fromModel = plugin.get('onyx:usesEmotionModel', None)
candidates[plugin.id] = next(self._conversion_candidates(fromModel, toModel))
logger.debug('Analysis plugin {} uses model: {}'.format(plugin.id, fromModel))
except StopIteration:
e = Error(('No conversion plugin found for: '
'{} -> {}'.format(fromModel, toModel)),
status=404)
e.original_response = resp
e.parameters = params
raise e
newentries = []
for i in resp.entries:
if output == "full":
newemotions = copy.deepcopy(i.emotions)
else:
newemotions = []
for j in i.emotions:
plugname = j['prov:wasGeneratedBy']
candidate = candidates[plugname]
resp.analysis.append({'plugin': candidate,
'parameters': params})
for k in candidate.convert(j, fromModel, toModel, params):
k.prov__wasGeneratedBy = candidate.id
if output == 'nested':
k.prov__wasDerivedFrom = j
newemotions.append(k)
i.emotions = newemotions
newentries.append(i)
resp.entries = newentries
@property
def default_plugin(self):
if not self._default or not self._default.is_activated:
candidates = self.plugins(plugin_type='analysisPlugin',
is_activated=True)
candidates = self.plugins(
plugin_type='analysisPlugin', is_activated=True)
if len(candidates) > 0:
self._default = candidates[0]
else:
@@ -299,7 +329,7 @@ class Senpy(object):
@default_plugin.setter
def default_plugin(self, value):
if isinstance(value, Plugin):
if isinstance(value, plugins.Plugin):
if not value.is_activated:
raise AttributeError('The default plugin has to be activated.')
self._default = value
@@ -351,7 +381,8 @@ class Senpy(object):
logger.info("Activating plugin: {}".format(plugin.name))
if sync or not getattr(plugin, 'async', True):
if sync or not getattr(plugin, 'async', True) or getattr(
plugin, 'sync', False):
return self._activate(plugin)
else:
th = Thread(target=partial(self._activate, plugin))
@@ -374,7 +405,8 @@ class Senpy(object):
self._set_active(plugin, False)
if sync or not getattr(plugin, 'async', True):
if sync or not getattr(plugin, 'async', True) or not getattr(
plugin, 'sync', False):
self._deactivate(plugin)
else:
th = Thread(target=partial(self._deactivate, plugin))

View File

@@ -1,5 +1,7 @@
import logging
from pkg_resources import parse_version, get_distribution, DistributionNotFound
logger = logging.getLogger(__name__)
MSG = 'GSITK is not (properly) installed.'
@@ -12,15 +14,18 @@ def raise_exception(*args, **kwargs):
try:
gsitk_distro = get_distribution("gsitk")
GSITK_VERSION = parse_version(gsitk_distro.version)
GSITK_AVAILABLE = GSITK_VERSION > parse_version("0.1.9.1") # Earlier versions have a bug
except DistributionNotFound:
GSITK_AVAILABLE = False
GSITK_VERSION = ()
if GSITK_AVAILABLE:
from gsitk.datasets.datasets import DatasetManager
from gsitk.evaluation.evaluation import Evaluation as Eval
from sklearn.pipeline import Pipeline
import pkg_resources
GSITK_VERSION = pkg_resources.get_distribution("gsitk").version.split()
GSITK_AVAILABLE = GSITK_VERSION > (0, 1, 9, 1) # Earlier versions have a bug
modules = locals()
except ImportError:
else:
logger.warning(IMPORTMSG)
GSITK_AVAILABLE = False
GSITK_VERSION = ()
DatasetManager = Eval = Pipeline = raise_exception

View File

@@ -85,6 +85,7 @@ class BaseMeta(ABCMeta):
schema = json.load(f)
resolver = jsonschema.RefResolver(schema_path, schema)
if '@type' not in attrs:
attrs['@type'] = "".join((name[0].lower(), name[1:]))
attrs['_schema_file'] = schema_file
attrs['schema'] = schema
@@ -244,10 +245,10 @@ class CustomDict(MutableMapping, object):
return key[0] == '_'
def __str__(self):
return str(self.serializable())
return json.dumps(self.serializable(), sort_keys=True, indent=4)
def __repr__(self):
return str(self.serializable())
return json.dumps(self.serializable(), sort_keys=True, indent=4)
_Alias = namedtuple('Alias', 'indict')

View File

@@ -121,11 +121,11 @@ class BaseModel(with_metaclass(BaseMeta, CustomDict)):
'''
schema_file = DEFINITIONS_FILE
# schema_file = DEFINITIONS_FILE
_context = base_context["@context"]
def __init__(self, *args, **kwargs):
auto_id = kwargs.pop('_auto_id', True)
auto_id = kwargs.pop('_auto_id', False)
super(BaseModel, self).__init__(*args, **kwargs)
@@ -133,7 +133,7 @@ class BaseModel(with_metaclass(BaseMeta, CustomDict)):
self.id
if '@type' not in self:
logger.warn('Created an instance of an unknown model')
logger.warning('Created an instance of an unknown model')
@property
def id(self):
@@ -325,7 +325,6 @@ def _add_class_from_schema(*args, **kwargs):
for i in [
'aggregatedEvaluation',
'analysis',
'dataset',
'datasets',
'emotion',
@@ -339,7 +338,7 @@ for i in [
'entity',
'help',
'metric',
'plugin',
'parameter',
'plugins',
'response',
'results',
@@ -349,3 +348,55 @@ for i in [
]:
_add_class_from_schema(i)
class Analysis(BaseModel):
schema = 'analysis'
parameters = alias('prov:used')
@property
def params(self):
outdict = {}
outdict['algorithm'] = self.algorithm
for param in self.parameters:
outdict[param['name']] = param['value']
return outdict
@params.setter
def params(self, value):
for k, v in value.items():
for param in self.parameters:
if param.name == k:
param.value = v
break
else:
self.parameters.append(Parameter(name=k, value=v))
@property
def algorithm(self):
return self['prov:wasAssociatedWith']
@property
def plugin(self):
return self._plugin
@plugin.setter
def plugin(self, value):
self._plugin = value
self['prov:wasAssociatedWith'] = value.id
def run(self, request):
return self.plugin.process(request, self.params)
class Plugin(BaseModel):
schema = 'plugin'
def activity(self, parameters):
'''Generate a prov:Activity from this plugin and the '''
a = Analysis()
a.plugin = self
a.params = parameters
return a

View File

@@ -1,7 +1,6 @@
from future import standard_library
standard_library.install_aliases()
from future.utils import with_metaclass
from functools import partial
@@ -10,7 +9,6 @@ import os
import re
import pickle
import logging
import copy
import pprint
import inspect
@@ -19,14 +17,13 @@ import subprocess
import importlib
import yaml
import threading
import nltk
from nltk import download
from .. import models, utils
from .. import api
from .. import gsitk_compat
from .. import testing
logger = logging.getLogger(__name__)
@@ -46,16 +43,19 @@ class PluginMeta(models.BaseMeta):
if doc:
attrs['description'] = doc
else:
logger.warn(('Plugin {} does not have a description. '
'Please, add a short summary to help other developers').format(name))
logger.warning(
('Plugin {} does not have a description. '
'Please, add a short summary to help other developers'
).format(name))
cls = super(PluginMeta, mcs).__new__(mcs, name, bases, attrs)
if alias in mcs._classes:
if os.environ.get('SENPY_TESTING', ""):
raise Exception(('The type of plugin {} already exists. '
raise Exception(
('The type of plugin {} already exists. '
'Please, choose a different name').format(name))
else:
logger.warn('Overloading plugin class: {}'.format(alias))
logger.warning('Overloading plugin class: {}'.format(alias))
mcs._classes[alias] = cls
return cls
@@ -87,10 +87,12 @@ class Plugin(with_metaclass(PluginMeta, models.Plugin)):
if info:
self.update(info)
self.validate()
self.id = 'endpoint:plugins/{}_{}'.format(self['name'], self['version'])
self.id = 'endpoint:plugins/{}_{}'.format(self['name'],
self['version'])
self.is_activated = False
self._lock = threading.Lock()
self._directory = os.path.abspath(os.path.dirname(inspect.getfile(self.__class__)))
self._directory = os.path.abspath(
os.path.dirname(inspect.getfile(self.__class__)))
data_folder = data_folder or os.getcwd()
subdir = os.path.join(data_folder, self.name)
@@ -118,7 +120,8 @@ class Plugin(with_metaclass(PluginMeta, models.Plugin)):
if x not in self:
missing.append(x)
if missing:
raise models.Error('Missing configuration parameters: {}'.format(missing))
raise models.Error(
'Missing configuration parameters: {}'.format(missing))
def get_folder(self):
return os.path.dirname(inspect.getfile(self.__class__))
@@ -129,22 +132,60 @@ class Plugin(with_metaclass(PluginMeta, models.Plugin)):
def deactivate(self):
pass
def process(self, request, parameters, **kwargs):
"""
An implemented plugin should override this method.
Here, we assume that a process_entries method exists."""
newentries = list(
self.process_entries(request.entries, parameters))
request.entries = newentries
return request
def process_entries(self, entries, parameters):
for entry in entries:
self.log.debug('Processing entry with plugin {}: {}'.format(
self, entry))
results = self.process_entry(entry, parameters)
if inspect.isgenerator(results):
for result in results:
yield result
else:
yield results
def process_entry(self, entry, parameters):
"""
This base method is here to adapt plugins which only
implement the *process* function.
Note that this method may yield an annotated entry or a list of
entries (e.g. in a tokenizer)
"""
raise NotImplementedError(
'You need to implement process, process_entries or process_entry in your plugin'
)
def test(self, test_cases=None):
if not test_cases:
if not hasattr(self, 'test_cases'):
raise AttributeError(('Plugin {} [{}] does not have any defined '
raise AttributeError(
('Plugin {} [{}] does not have any defined '
'test cases').format(self.id,
inspect.getfile(self.__class__)))
test_cases = self.test_cases
for case in test_cases:
try:
self.test_case(case)
self.log.debug('Test case passed:\n{}'.format(pprint.pformat(case)))
self.log.debug('Test case passed:\n{}'.format(
pprint.pformat(case)))
except Exception as ex:
self.log.warn('Test case failed:\n{}'.format(pprint.pformat(case)))
self.log.warning('Test case failed:\n{}'.format(
pprint.pformat(case)))
raise
def test_case(self, case, mock=testing.MOCK_REQUESTS):
if 'entry' not in case and 'input' in case:
entry = models.Entry(_auto_id=False)
entry.nif__isString = case['input']
case['entry'] = entry
entry = models.Entry(case['entry'])
given_parameters = case.get('params', case.get('parameters', {}))
expected = case.get('expected', None)
@@ -152,21 +193,25 @@ class Plugin(with_metaclass(PluginMeta, models.Plugin)):
responses = case.get('responses', [])
try:
params = api.parse_params(given_parameters, self.extra_params)
request = models.Response()
parameters = api.parse_params(given_parameters,
self.extra_params)
request.entries = [
entry,
]
method = partial(self.analyse_entries, [entry, ], params)
method = partial(self.process, request, parameters)
if mock:
res = list(method())
res = method()
else:
with testing.patch_all_requests(responses):
res = list(method())
res = method()
if not isinstance(expected, list):
expected = [expected]
utils.check_template(res, expected)
for r in res:
r.validate()
utils.check_template(res.entries, expected)
res.validate()
except models.Error:
if should_fail:
return
@@ -203,40 +248,26 @@ class Analysis(Plugin):
A subclass of Plugin that analyses text and provides an annotation.
'''
def analyse(self, *args, **kwargs):
raise NotImplementedError(
'Your plugin should implement either analyse or analyse_entry')
def analyse_entry(self, entry, parameters):
""" An implemented plugin should override this method.
This base method is here to adapt old style plugins which only
implement the *analyse* function.
Note that this method may yield an annotated entry or a list of
entries (e.g. in a tokenizer)
"""
text = entry['nif:isString']
params = copy.copy(parameters)
params['input'] = text
results = self.analyse(**params)
for i in results.entries:
yield i
def analyse(self, request, parameters):
return super(Analysis, self).process(request, parameters)
def analyse_entries(self, entries, parameters):
for entry in entries:
self.log.debug('Analysing entry with plugin {}: {}'.format(self, entry))
results = self.analyse_entry(entry, parameters)
if inspect.isgenerator(results):
for result in results:
yield result
else:
yield results
for i in super(Analysis, self).process_entries(entries, parameters):
yield i
def test_case(self, case):
if 'entry' not in case and 'input' in case:
entry = models.Entry(_auto_id=False)
entry.nif__isString = case['input']
case['entry'] = entry
super(Analysis, self).test_case(case)
def process(self, request, parameters, **kwargs):
return self.analyse(request, parameters)
def process_entries(self, entries, parameters):
for i in self.analyse_entries(entries, parameters):
yield i
def process_entry(self, entry, parameters, **kwargs):
if hasattr(self, 'analyse_entry'):
for i in self.analyse_entry(entry, parameters):
yield i
else:
super(Analysis, self).process_entry(entry, parameters, **kwargs)
AnalysisPlugin = Analysis
@@ -247,7 +278,20 @@ class Conversion(Plugin):
A subclass of Plugins that convert between different annotation models.
e.g. a conversion of emotion models, or normalization of sentiment values.
'''
pass
def process(self, response, parameters, plugins=None, **kwargs):
plugins = plugins or []
newentries = []
for entry in response.entries:
newentries.append(
self.convert_entry(entry, parameters, plugins))
response.entries = newentries
return response
def convert_entry(self, entry, parameters, conversions_applied):
raise NotImplementedError(
'You should implement a way to convert each entry, or a custom process method'
)
ConversionPlugin = Conversion
@@ -284,12 +328,28 @@ class EmotionConversion(Conversion):
'''
A subclass of Conversion that converts emotion annotations using different models
'''
pass
def can_convert(self, fromModel, toModel):
'''
Whether this plugin can convert from fromModel to toModel.
If fromModel is None, it is interpreted as "any Model"
'''
for pair in self.onyx__doesConversion:
if (pair['onyx:conversionTo'] == toModel) and \
((fromModel is None) or (pair['onyx:conversionFrom'] == fromModel)):
return True
return False
EmotionConversionPlugin = EmotionConversion
class PostProcessing(Plugin):
def check(self, request, plugins):
'''Should this plugin be run for this request?'''
return False
class Box(AnalysisPlugin):
'''
Black box plugins delegate analysis to a function.
@@ -314,9 +374,10 @@ class Box(AnalysisPlugin):
return output
def predict_one(self, input):
raise NotImplementedError('You should define the behavior of this plugin')
raise NotImplementedError(
'You should define the behavior of this plugin')
def analyse_entries(self, entries, params):
def process_entries(self, entries, params):
for entry in entries:
input = self.input(entry=entry, params=params)
results = self.predict_one(input=input)
@@ -385,7 +446,6 @@ class EmotionBox(TextBox, EmotionPlugin):
class MappingMixin(object):
@property
def mappings(self):
return self._mappings
@@ -395,11 +455,10 @@ class MappingMixin(object):
self._mappings = value
def output(self, output, entry, params):
output = self.mappings.get(output,
self.mappings.get('default', output))
return super(MappingMixin, self).output(output=output,
entry=entry,
params=params)
output = self.mappings.get(output, self.mappings.get(
'default', output))
return super(MappingMixin, self).output(
output=output, entry=entry, params=params)
class ShelfMixin(object):
@@ -412,7 +471,8 @@ class ShelfMixin(object):
with self.open(self.shelf_file, 'rb') as p:
self._sh = pickle.load(p)
except (IndexError, EOFError, pickle.UnpicklingError):
self.log.warning('Corrupted shelf file: {}'.format(self.shelf_file))
self.log.warning('Corrupted shelf file: {}'.format(
self.shelf_file))
if not self.get('force_shelf', False):
raise
return self._sh
@@ -460,8 +520,7 @@ def pfilter(plugins, plugin_type=Analysis, **kwargs):
plugin_type = plugin_type[0].upper() + plugin_type[1:]
pclass = globals()[plugin_type]
logger.debug('Class: {}'.format(pclass))
candidates = filter(lambda x: isinstance(x, pclass),
plugins)
candidates = filter(lambda x: isinstance(x, pclass), plugins)
except KeyError:
raise models.Error('{} is not a valid type'.format(plugin_type))
else:
@@ -471,8 +530,7 @@ def pfilter(plugins, plugin_type=Analysis, **kwargs):
def matches(plug):
res = all(getattr(plug, k, None) == v for (k, v) in kwargs.items())
logger.debug(
"matching {} with {}: {}".format(plug.name, kwargs, res))
logger.debug("matching {} with {}: {}".format(plug.name, kwargs, res))
return res
if kwargs:
@@ -506,17 +564,17 @@ def install_deps(*plugins):
for req in requirements:
pip_args.append(req)
logger.info('Installing requirements: ' + str(requirements))
process = subprocess.Popen(pip_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
process = subprocess.Popen(
pip_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_log_subprocess_output(process)
exitcode = process.wait()
installed = True
if exitcode != 0:
raise models.Error("Dependencies not properly installed: {}".format(pip_args))
raise models.Error(
"Dependencies not properly installed: {}".format(pip_args))
nltk_resources |= set(info.get('nltk_resources', []))
installed |= nltk.download(list(nltk_resources))
installed |= download(list(nltk_resources))
return installed
@@ -593,7 +651,8 @@ def one_from_module(module, root, info, **kwargs):
if '@type' in info:
cls = PluginMeta.from_type(info['@type'])
return cls(info=info, **kwargs)
instance = next(from_module(module=module, root=root, info=info, **kwargs), None)
instance = next(
from_module(module=module, root=root, info=info, **kwargs), None)
if not instance:
raise Exception("No valid plugin for: {}".format(module))
return instance
@@ -617,7 +676,8 @@ def _instances_in_module(module):
def _from_module_name(module, root, info=None, **kwargs):
module = load_module(module, root)
for plugin in _from_loaded_module(module=module, root=root, info=info, **kwargs):
for plugin in _from_loaded_module(
module=module, root=root, info=info, **kwargs):
yield plugin
@@ -629,7 +689,8 @@ def _from_loaded_module(module, info=None, **kwargs):
def evaluate(plugins, datasets, **kwargs):
ev = gsitk_compat.Eval(tuples=None,
ev = gsitk_compat.Eval(
tuples=None,
datasets=datasets,
pipelines=[plugin.as_pipe() for plugin in plugins])
ev.evaluate()

View File

@@ -9,7 +9,7 @@ class Split(AnalysisPlugin):
'''description: A sample plugin that chunks input text'''
author = ["@militarpancho", '@balkian']
version = '0.2'
version = '0.3'
url = "https://github.com/gsi-upm/senpy"
extra_params = {
@@ -33,12 +33,15 @@ class Split(AnalysisPlugin):
if chunker_type == "paragraph":
tokenizer = LineTokenizer()
chars = list(tokenizer.span_tokenize(original_text))
for i, chunk in enumerate(tokenizer.tokenize(original_text)):
print(chunk)
if len(chars) == 1:
# This sentence was already split
return
for i, chunk in enumerate(chars):
start, end = chunk
e = Entry()
e['nif:isString'] = chunk
e['nif:isString'] = original_text[start:end]
if entry.id:
e.id = entry.id + "#char={},{}".format(chars[i][0], chars[i][1])
e.id = entry.id + "#char={},{}".format(start, end)
yield e
test_cases = [

View File

@@ -1,6 +1,6 @@
---
name: Ekman2FSRE
module: senpy.plugins.conversion.emotion.centroids
module: senpy.plugins.postprocessing.emotion.centroids
description: Plugin to convert emotion sets from Ekman to VAD
version: 0.2
# No need to specify onyx:doesConversion because centroids.py adds it automatically from centroids_direction

View File

@@ -1,6 +1,6 @@
---
name: Ekman2PAD
module: senpy.plugins.conversion.emotion.centroids
module: senpy.plugins.postprocessing.emotion.centroids
description: Plugin to convert emotion sets from Ekman to VAD
version: 0.2
# No need to specify onyx:doesConversion because centroids.py adds it automatically from centroids_direction

View File

@@ -0,0 +1,196 @@
from senpy import PostProcessing, easy_test
class MaxEmotion(PostProcessing):
'''Plugin to extract the emotion with highest value from an EmotionSet'''
author = '@dsuarezsouto'
version = '0.1'
def process_entry(self, entry, params):
if len(entry.emotions) < 1:
yield entry
return
set_emotions = entry.emotions[0]['onyx:hasEmotion']
# If there is only one emotion, do not modify it
if len(set_emotions) < 2:
yield entry
return
max_emotion = set_emotions[0]
# Extract max emotion from the set emotions (emotion with highest intensity)
for tmp_emotion in set_emotions:
if tmp_emotion['onyx:hasEmotionIntensity'] > max_emotion[
'onyx:hasEmotionIntensity']:
max_emotion = tmp_emotion
if max_emotion['onyx:hasEmotionIntensity'] == 0:
max_emotion['onyx:hasEmotionCategory'] = "neutral"
max_emotion['onyx:hasEmotionIntensity'] = 1.0
entry.emotions[0]['onyx:hasEmotion'] = [max_emotion]
entry.emotions[0]['prov:wasGeneratedBy'] = "maxSentiment"
yield entry
def check(self, request, plugins):
return 'maxemotion' in request.parameters and self not in plugins
# Test Cases:
# 1 Normal Situation.
# 2 Case to return a Neutral Emotion.
test_cases = [
{
"name":
"If there are several emotions within an emotion set, reduce it to one.",
"entry": {
"@type":
"entry",
"emotions": [
{
"@id":
"Emotions0",
"@type":
"emotionSet",
"onyx:hasEmotion": [
{
"@id": "_:Emotion_1538121033.74",
"@type": "emotion",
"onyx:hasEmotionCategory": "anger",
"onyx:hasEmotionIntensity": 0
},
{
"@id": "_:Emotion_1538121033.74",
"@type": "emotion",
"onyx:hasEmotionCategory": "joy",
"onyx:hasEmotionIntensity": 0.3333333333333333
},
{
"@id": "_:Emotion_1538121033.74",
"@type": "emotion",
"onyx:hasEmotionCategory": "negative-fear",
"onyx:hasEmotionIntensity": 0
},
{
"@id": "_:Emotion_1538121033.74",
"@type": "emotion",
"onyx:hasEmotionCategory": "sadness",
"onyx:hasEmotionIntensity": 0
},
{
"@id": "_:Emotion_1538121033.74",
"@type": "emotion",
"onyx:hasEmotionCategory": "disgust",
"onyx:hasEmotionIntensity": 0
}
]
}
],
"nif:isString":
"Test"
},
'expected': {
"@type":
"entry",
"emotions": [
{
"@id":
"Emotions0",
"@type":
"emotionSet",
"onyx:hasEmotion": [
{
"@id": "_:Emotion_1538121033.74",
"@type": "emotion",
"onyx:hasEmotionCategory": "joy",
"onyx:hasEmotionIntensity": 0.3333333333333333
}
],
"prov:wasGeneratedBy":
'maxSentiment'
}
],
"nif:isString":
"Test"
}
},
{
"name":
"If the maximum emotion has an intensity of 0, return a neutral emotion.",
"entry": {
"@type":
"entry",
"emotions": [{
"@id":
"Emotions0",
"@type":
"emotionSet",
"onyx:hasEmotion": [
{
"@id": "_:Emotion_1538121033.74",
"@type": "emotion",
"onyx:hasEmotionCategory": "anger",
"onyx:hasEmotionIntensity": 0
},
{
"@id": "_:Emotion_1538121033.74",
"@type": "emotion",
"onyx:hasEmotionCategory": "joy",
"onyx:hasEmotionIntensity": 0
},
{
"@id":
"_:Emotion_1538121033.74",
"@type":
"emotion",
"onyx:hasEmotionCategory":
"negative-fear",
"onyx:hasEmotionIntensity":
0
},
{
"@id": "_:Emotion_1538121033.74",
"@type": "emotion",
"onyx:hasEmotionCategory":
"sadness",
"onyx:hasEmotionIntensity": 0
},
{
"@id": "_:Emotion_1538121033.74",
"@type": "emotion",
"onyx:hasEmotionCategory":
"disgust",
"onyx:hasEmotionIntensity": 0
}]
}],
"nif:isString":
"Test"
},
'expected': {
"@type":
"entry",
"emotions": [{
"@id":
"Emotions0",
"@type":
"emotionSet",
"onyx:hasEmotion": [{
"@id": "_:Emotion_1538121033.74",
"@type": "emotion",
"onyx:hasEmotionCategory": "neutral",
"onyx:hasEmotionIntensity": 1
}],
"prov:wasGeneratedBy":
'maxSentiment'
}],
"nif:isString":
"Test"
}
}
]
if __name__ == '__main__':
easy_test()

View File

@@ -9,7 +9,20 @@
"@type": {
"type": "string",
"description": "Type of the analysis. e.g. marl:SentimentAnalysis"
},
"prov:wasAssociatedWith": {
"@type": "string",
"description": "Algorithm/plugin that was used"
},
"prov:used": {
"description": "Parameters of the algorithm",
"@type": "array",
"default": [],
"type": "array",
"items": {
"$ref": "parameter.json"
}
}
},
"required": ["@id", "@type"]
"required": ["@type", "prov:wasAssociatedWith"]
}

View File

@@ -41,7 +41,7 @@
"@container": "@set"
},
"analysis": {
"@id": "AnalysisInvolved",
"@id": "prov:wasInformedBy",
"@type": "@id",
"@container": "@set"
},

View File

@@ -20,5 +20,5 @@
"description": "The ID of the analysis that generated this Emotion. The full object should be included in the \"analysis\" property of the root object"
}
},
"required": ["@id", "prov:wasGeneratedBy", "onyx:hasEmotion"]
"required": ["prov:wasGeneratedBy", "onyx:hasEmotion"]
}

View File

@@ -35,5 +35,5 @@
"default": []
}
},
"required": ["@id", "nif:isString"]
"required": ["nif:isString"]
}

View File

@@ -0,0 +1,16 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Parameters for a senpy analysis",
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Name of the parameter"
},
"prov:value": {
"@type": "any",
"description": "Value of the parameter"
}
},
"required": ["name", "prov:value"]
}

View File

@@ -21,13 +21,7 @@
"default": [],
"type": "array",
"items": {
"anyOf": [
{
"$ref": "analysis.json"
},{
"type": "string"
}
]
}
},
"entries": {

View File

@@ -19,5 +19,5 @@
"description": "The ID of the analysis that generated this Sentiment. The full object should be included in the \"analysis\" property of the root object"
}
},
"required": ["@id", "prov:wasGeneratedBy"]
"required": ["prov:wasGeneratedBy"]
}

View File

@@ -12,6 +12,7 @@ max-line-length = 100
universal=1
[tool:pytest]
addopts = --cov=senpy --cov-report term-missing
filterwarnings =
ignore:the matrix subclass:PendingDeprecationWarning
[coverage:report]
omit = senpy/__main__.py

View File

@@ -3,8 +3,9 @@ import logging
logger = logging.getLogger(__name__)
from unittest import TestCase
from senpy.api import parse_params, API_PARAMS, NIF_PARAMS, WEB_PARAMS
from senpy.models import Error
from senpy.api import (boolean, parse_params, get_extra_params, parse_analysis,
API_PARAMS, NIF_PARAMS, WEB_PARAMS)
from senpy.models import Error, Plugin
class APITest(TestCase):
@@ -89,3 +90,156 @@ class APITest(TestCase):
assert "Dummy" in p['algorithm']
assert 'input' in p
assert p['input'] == 'Aloha my friend'
def test_parse_analysis(self):
'''The API should parse user parameters and return them in a format that plugins can use'''
plugins = [
Plugin({
'name': 'plugin1',
'extra_params': {
# Incompatible parameter
'param0': {
'aliases': ['p1', 'parameter1'],
'options': ['option1', 'option2'],
'default': 'option1',
'required': True
},
'param1': {
'aliases': ['p1', 'parameter1'],
'options': ['en', 'es'],
'default': 'en',
'required': False
},
'param2': {
'aliases': ['p2', 'parameter2'],
'required': False,
'options': ['value2_1', 'value2_2', 'value3_3']
}
}
}), Plugin({
'name': 'plugin2',
'extra_params': {
'param0': {
'aliases': ['parameter1'],
'options': ['new option', 'new option2'],
'default': 'new option',
'required': False
},
'param1': {
'aliases': ['myparam1', 'p1'],
'options': ['en', 'de', 'auto'],
'default': 'de',
'required': True
},
'param3': {
'aliases': ['p3', 'parameter3'],
'options': boolean,
'default': True
}
}
})
]
call = {
'param1': 'en',
'0.param0': 'option1',
'0.param1': 'en',
'param2': 'value2_1',
'param0': 'new option',
'1.param1': 'de',
'param3': False,
}
expected = [
{
'param0': 'option1',
'param1': 'en',
'param2': 'value2_1',
}, {
'param0': 'new option',
'param1': 'de',
'param3': False,
}
]
p = parse_analysis(call, plugins)
for i, arg in enumerate(expected):
params = p[i].params
for k, v in arg.items():
assert params[k] == v
def test_get_extra_params(self):
'''The API should return the list of valid parameters for a set of plugins'''
plugins = [
Plugin({
'name': 'plugin1',
'extra_params': {
# Incompatible parameter
'param0': {
'aliases': ['p1', 'parameter1'],
'options': ['option1', 'option2'],
'default': 'option1',
'required': True
},
'param1': {
'aliases': ['p1', 'parameter1'],
'options': ['en', 'es'],
'default': 'en',
'required': False
},
'param2': {
'aliases': ['p2', 'parameter2'],
'required': False,
'options': ['value2_1', 'value2_2', 'value3_3']
}
}
}), Plugin({
'name': 'plugin2',
'extra_params': {
'param0': {
'aliases': ['parameter1'],
'options': ['new option', 'new option2'],
'default': 'new option',
'required': False
},
'param1': {
'aliases': ['myparam1', 'p1'],
'options': ['en', 'de', 'auto'],
'default': 'de',
'required': True
},
'param3': {
'aliases': ['p3', 'parameter3'],
'options': boolean,
'default': True
}
}
})
]
expected = {
# Overlapping parameters
'plugin1.param0': plugins[0]['extra_params']['param0'],
'plugin1.param1': plugins[0]['extra_params']['param1'],
'plugin2.param0': plugins[1]['extra_params']['param0'],
'plugin2.param1': plugins[1]['extra_params']['param1'],
# Non-overlapping parameters
'param2': plugins[0]['extra_params']['param2'],
'param3': plugins[1]['extra_params']['param3'],
# Intersection of overlapping parameters
'param1': {
'aliases': ['p1'],
'options': ['en'],
'default': None,
'required': True
}
}
result = get_extra_params(plugins)
for ik, iv in expected.items():
assert ik in result
for jk, jv in iv.items():
assert jk in result[ik]
assert expected[ik][jk] == result[ik][jk]

View File

@@ -26,8 +26,7 @@ class BlueprintsTest(TestCase):
cls.senpy.init_app(cls.app)
cls.dir = os.path.join(os.path.dirname(__file__), "..")
cls.senpy.add_folder(cls.dir)
cls.senpy.activate_plugin("Dummy", sync=True)
cls.senpy.activate_plugin("DummyRequired", sync=True)
cls.senpy.activate_all()
cls.senpy.default_plugin = 'Dummy'
def setUp(self):
@@ -74,6 +73,7 @@ class BlueprintsTest(TestCase):
The results for a POST request should be the same as for a GET request.
"""
resp = self.client.post("/api/", data={'i': 'My aloha mohame',
'algorithm': 'rand',
'with_parameters': True})
self.assertCode(resp, 200)
js = parse_resp(resp)
@@ -106,6 +106,7 @@ class BlueprintsTest(TestCase):
assert isinstance(js, models.Error)
resp = self.client.get("/api/?i=My aloha mohame&algo=DummyRequired&example=notvalid")
self.assertCode(resp, 400)
self.app.config['TESTING'] = True
resp = self.client.get("/api/?i=My aloha mohame&algo=DummyRequired&example=a")
self.assertCode(resp, 200)
@@ -147,6 +148,69 @@ class BlueprintsTest(TestCase):
assert len(js['analysis']) == 2
assert js['entries'][0]['nif:isString'] == 'My aloha mohame'
def test_analysis_chain_required(self):
"""
If a parameter is required and duplicated (because two plugins require it), specifying
it once should suffice
"""
resp = self.client.get("/api/DummyRequired/DummyRequired?i=My aloha mohame&example=a")
js = parse_resp(resp)
assert len(js['analysis']) == 2
assert js['entries'][0]['nif:isString'] == 'My aloha mohame'
assert js['entries'][0]['reversed'] == 2
def test_requirements_chain_help(self):
'''The extra parameters of each plugin should be merged if they are in a chain '''
resp = self.client.get("/api/split/DummyRequired?help=true")
self.assertCode(resp, 200)
js = parse_resp(resp)
assert 'valid_parameters' in js
vp = js['valid_parameters']
assert 'example' in vp
assert 'delimiter' in vp
def test_requirements_chain_repeat_help(self):
'''
If a plugin appears several times in a chain, there should be a way to set different
parameters for each.
'''
resp = self.client.get("/api/split/split?help=true")
self.assertCode(resp, 200)
js = parse_resp(resp)
assert 'valid_parameters' in js
vp = js['valid_parameters']
assert 'delimiter' in vp
resp = self.client.get("/api/split/split?help=true&verbose=false")
js = parse_resp(resp)
vp = js['valid_parameters']
assert len(vp.keys()) == 1
def test_requirements_chain(self):
"""
It should be possible to specify different parameters for each step in the chain.
"""
# First, we split by sentence twice. Each call should generate 3 additional entries
# (one per sentence in the original).
resp = self.client.get('/api/split/split?i=The first sentence. The second sentence.'
'\nA new paragraph&delimiter=sentence')
js = parse_resp(resp)
assert len(js['analysis']) == 2
assert len(js['entries']) == 7
# Now, we split by sentence. This produces 3 additional entries.
# Then, we split by paragraph. This should create 2 additional entries (One per paragraph
# in the original text)
resp = self.client.get('/api/split/split?i=The first sentence. The second sentence.'
'\nA new paragraph&0.delimiter=sentence&1.delimiter=paragraph')
# Calling dummy twice, should return the same string
self.assertCode(resp, 200)
js = parse_resp(resp)
assert len(js['analysis']) == 2
assert len(js['entries']) == 6
def test_error(self):
"""
The dummy plugin returns an empty response,\

View File

@@ -11,14 +11,15 @@ except ImportError:
from functools import partial
from senpy.extensions import Senpy
from senpy import plugins
from senpy.models import Error, Results, Entry, EmotionSet, Emotion, Plugin
from senpy.models import Analysis, Error, Results, Entry, EmotionSet, Emotion, Plugin
from senpy import api
from flask import Flask
from unittest import TestCase
def analyse(instance, **kwargs):
request = api.parse_call(kwargs)
basic = api.parse_params(kwargs, api.API_PARAMS)
request = api.parse_call(basic)
return instance.analyse(request)
@@ -49,9 +50,9 @@ class ExtensionsTest(TestCase):
'''Should be able to add and delete new plugins. '''
new = plugins.Analysis(name='new', description='new', version=0)
self.senpy.add_plugin(new)
assert new in self.senpy.plugins()
assert new in self.senpy.plugins(is_activated=False)
self.senpy.delete_plugin(new)
assert new not in self.senpy.plugins()
assert new not in self.senpy.plugins(is_activated=False)
def test_adding_folder(self):
""" It should be possible for senpy to look for plugins in more folders. """
@@ -60,7 +61,7 @@ class ExtensionsTest(TestCase):
default_plugins=False)
assert not senpy.analysis_plugins
senpy.add_folder(self.examples_dir)
assert senpy.analysis_plugins
assert senpy.plugins(plugin_type=plugins.AnalysisPlugin, is_activated=False)
self.assertRaises(AttributeError, senpy.add_folder, 'DOES NOT EXIST')
def test_installing(self):
@@ -121,8 +122,8 @@ class ExtensionsTest(TestCase):
# Leaf (defaultdict with __setattr__ and __getattr__.
r1 = analyse(self.senpy, algorithm="Dummy", input="tupni", output="tuptuo")
r2 = analyse(self.senpy, input="tupni", output="tuptuo")
assert r1.analysis[0] == "endpoint:plugins/Dummy_0.1"
assert r2.analysis[0] == "endpoint:plugins/Dummy_0.1"
assert r1.analysis[0].algorithm == "endpoint:plugins/Dummy_0.1"
assert r2.analysis[0].algorithm == "endpoint:plugins/Dummy_0.1"
assert r1.entries[0]['nif:isString'] == 'input'
def test_analyse_empty(self):
@@ -130,7 +131,7 @@ class ExtensionsTest(TestCase):
senpy = Senpy(plugin_folder=None,
app=self.app,
default_plugins=False)
self.assertRaises(Error, senpy.analyse, Results())
self.assertRaises(Error, senpy.analyse, Results(), [])
def test_analyse_wrong(self):
""" Trying to analyse with a non-existent plugin should raise an error."""
@@ -156,30 +157,32 @@ class ExtensionsTest(TestCase):
r2 = analyse(self.senpy,
input="tupni",
output="tuptuo")
assert r1.analysis[0] == "endpoint:plugins/Dummy_0.1"
assert r2.analysis[0] == "endpoint:plugins/Dummy_0.1"
assert r1.analysis[0].algorithm == "endpoint:plugins/Dummy_0.1"
assert r2.analysis[0].algorithm == "endpoint:plugins/Dummy_0.1"
assert r1.entries[0]['nif:isString'] == 'input'
def test_analyse_error(self):
mm = mock.MagicMock()
mm.id = 'magic_mock'
mm.name = 'mock'
mm.is_activated = True
mm.analyse_entries.side_effect = Error('error in analysis', status=500)
self.senpy.add_plugin(mm)
class ErrorPlugin(plugins.Analysis):
author = 'nobody'
version = 0
ex = Error()
def process(self, *args, **kwargs):
raise self.ex
m = ErrorPlugin(ex=Error('error in analysis', status=500))
self.senpy.add_plugin(m)
try:
analyse(self.senpy, input='nothing', algorithm='MOCK')
analyse(self.senpy, input='nothing', algorithm='ErrorPlugin')
assert False
except Error as ex:
assert 'error in analysis' in ex['message']
assert ex['status'] == 500
ex = Exception('generic exception on analysis')
mm.analyse.side_effect = ex
mm.analyse_entries.side_effect = ex
m.ex = Exception('generic exception on analysis')
try:
analyse(self.senpy, input='nothing', algorithm='MOCK')
analyse(self.senpy, input='nothing', algorithm='ErrorPlugin')
assert False
except Exception as ex:
assert 'generic exception on analysis' in str(ex)
@@ -195,7 +198,7 @@ class ExtensionsTest(TestCase):
def test_load_default_plugins(self):
senpy = Senpy(plugin_folder=self.examples_dir, default_plugins=True)
assert len(senpy.plugins()) > 1
assert len(senpy.plugins(is_activated=False)) > 1
def test_convert_emotions(self):
self.senpy.activate_all(sync=True)
@@ -211,27 +214,28 @@ class ExtensionsTest(TestCase):
'emoml:valence': 0
}))
response = Results({
'analysis': [{'plugin': plugin}],
'analysis': [plugin],
'entries': [Entry({
'nif:isString': 'much ado about nothing',
'emotions': [eSet1]
})]
})
params = {'emotionModel': 'emoml:big6',
'algorithm': ['conversion'],
'conversion': 'full'}
r1 = deepcopy(response)
r1.parameters = params
self.senpy.convert_emotions(r1)
self.senpy.analyse(r1)
assert len(r1.entries[0].emotions) == 2
params['conversion'] = 'nested'
r2 = deepcopy(response)
r2.parameters = params
self.senpy.convert_emotions(r2)
self.senpy.analyse(r2)
assert len(r2.entries[0].emotions) == 1
assert r2.entries[0].emotions[0]['prov:wasDerivedFrom'] == eSet1
params['conversion'] = 'filtered'
r3 = deepcopy(response)
r3.parameters = params
self.senpy.convert_emotions(r3)
self.senpy.analyse(r3)
assert len(r3.entries[0].emotions) == 1
r3.jsonld()

View File

@@ -5,7 +5,8 @@ import jsonschema
import json
import rdflib
from unittest import TestCase
from senpy.models import (Emotion,
from senpy.models import (Analysis,
Emotion,
EmotionAnalysis,
EmotionSet,
Entry,
@@ -61,7 +62,7 @@ class ModelsTest(TestCase):
def test_id(self):
""" Adding the id after creation should overwrite the automatic ID
"""
r = Entry()
r = Entry(_auto_id=True)
j = r.jsonld()
assert '@id' in j
r.id = "test"
@@ -189,6 +190,19 @@ class ModelsTest(TestCase):
assert isinstance(js['plugins'], list)
assert js['plugins'][0]['@type'] == 'sentimentPlugin'
def test_parameters(self):
'''An Analysis should contain the algorithm and the list of parameters to be used'''
a = Analysis()
a.params = {'param1': 1, 'param2': 2}
assert len(a.parameters) == 2
for param in a.parameters:
if param.name == 'param1':
assert param.value == 1
elif param.name == 'param2':
assert param.value == 2
else:
raise Exception('Unknown value %s' % param)
def test_from_string(self):
results = {
'@type': 'results',

View File

@@ -8,7 +8,7 @@ import tempfile
from unittest import TestCase, skipIf
from senpy.models import Results, Entry, EmotionSet, Emotion, Plugins
from senpy import plugins
from senpy.plugins.conversion.emotion.centroids import CentroidConversion
from senpy.plugins.postprocessing.emotion.centroids import CentroidConversion
from senpy.gsitk_compat import GSITK_AVAILABLE
import pandas as pd