mirror of
https://github.com/gsi-upm/senpy
synced 2025-10-19 17:58:28 +00:00
Compare commits
17 Commits
0.10.9
...
51-calcula
Author | SHA1 | Date | |
---|---|---|---|
|
d145a852e7 | ||
|
c090501534 | ||
|
6a1069780b | ||
|
41aa142ce0 | ||
|
b48730137d | ||
|
f1ec057b16 | ||
|
f6ca82cac8 | ||
|
318acd5a71 | ||
|
c8f6f5613d | ||
|
748d1a00bd | ||
|
a82e4ed440 | ||
|
c939b095de | ||
|
ca69bddc17 | ||
|
aa35e62a27 | ||
|
6dd4a44924 | ||
|
4291c5eabf | ||
|
7c7a815d1a |
@@ -31,29 +31,19 @@ test-2.7:
|
|||||||
variables:
|
variables:
|
||||||
PYTHON_VERSION: "2.7"
|
PYTHON_VERSION: "2.7"
|
||||||
|
|
||||||
.image: &image_definition
|
push:
|
||||||
stage: push
|
stage: push
|
||||||
script:
|
script:
|
||||||
- make -e push-$PYTHON_VERSION
|
- make -e push
|
||||||
only:
|
only:
|
||||||
- tags
|
- tags
|
||||||
- triggers
|
- triggers
|
||||||
- fix-makefiles
|
- fix-makefiles
|
||||||
|
|
||||||
push-3.5:
|
|
||||||
<<: *image_definition
|
|
||||||
variables:
|
|
||||||
PYTHON_VERSION: "3.5"
|
|
||||||
|
|
||||||
push-2.7:
|
|
||||||
<<: *image_definition
|
|
||||||
variables:
|
|
||||||
PYTHON_VERSION: "2.7"
|
|
||||||
|
|
||||||
push-latest:
|
push-latest:
|
||||||
<<: *image_definition
|
stage: push
|
||||||
variables:
|
script:
|
||||||
PYTHON_VERSION: latest
|
- make -e push-latest
|
||||||
only:
|
only:
|
||||||
- master
|
- master
|
||||||
- triggers
|
- triggers
|
||||||
|
@@ -22,7 +22,4 @@ else
|
|||||||
rm $(KEY_FILE)
|
rm $(KEY_FILE)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
push:: git-push
|
.PHONY:: commit tag git-push git-pull push-github
|
||||||
pull:: git-pull
|
|
||||||
|
|
||||||
.PHONY:: commit tag push git-push git-pull push-github
|
|
||||||
|
@@ -1,17 +1,15 @@
|
|||||||
makefiles-remote:
|
makefiles-remote:
|
||||||
@git remote add makefiles ssh://git@lab.cluster.gsi.dit.upm.es:2200/docs/templates/makefiles.git 2>/dev/null || true
|
git ls-remote --exit-code makefiles 2> /dev/null || git remote add makefiles ssh://git@lab.cluster.gsi.dit.upm.es:2200/docs/templates/makefiles.git
|
||||||
|
|
||||||
makefiles-commit: makefiles-remote
|
makefiles-commit: makefiles-remote
|
||||||
git add -f .makefiles
|
git add -f .makefiles
|
||||||
git commit -em "Updated makefiles from ${NAME}"
|
git commit -em "Updated makefiles from ${NAME}"
|
||||||
|
|
||||||
makefiles-push:
|
makefiles-push:
|
||||||
|
git fetch makefiles $(NAME)
|
||||||
git subtree push --prefix=.makefiles/ makefiles $(NAME)
|
git subtree push --prefix=.makefiles/ makefiles $(NAME)
|
||||||
|
|
||||||
makefiles-pull: makefiles-remote
|
makefiles-pull: makefiles-remote
|
||||||
git subtree pull --prefix=.makefiles/ makefiles master --squash
|
git subtree pull --prefix=.makefiles/ makefiles master --squash
|
||||||
|
|
||||||
pull:: makefiles-pull
|
.PHONY:: makefiles-remote makefiles-commit makefiles-push makefiles-pull
|
||||||
push:: makefiles-push
|
|
||||||
|
|
||||||
.PHONY:: makefiles-remote makefiles-commit makefiles-push makefiles-pull pull push
|
|
||||||
|
@@ -6,13 +6,9 @@
|
|||||||
],
|
],
|
||||||
"entries": [
|
"entries": [
|
||||||
{
|
{
|
||||||
"@type": [
|
|
||||||
"nif:RFC5147String",
|
|
||||||
"nif:Context"
|
|
||||||
],
|
|
||||||
"nif:beginIndex": 0,
|
"nif:beginIndex": 0,
|
||||||
"nif:endIndex": 40,
|
"nif:endIndex": 40,
|
||||||
"nif:isString": "My favourite actress is Natalie Portman"
|
"text": "An entry should have a nif:isString key"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
@@ -3,10 +3,21 @@
|
|||||||
"@id": "me:Result1",
|
"@id": "me:Result1",
|
||||||
"@type": "results",
|
"@type": "results",
|
||||||
"analysis": [
|
"analysis": [
|
||||||
"me:SAnalysis1",
|
{
|
||||||
"me:SgAnalysis1",
|
"@id": "_:SAnalysis1_Activity",
|
||||||
"me:EmotionAnalysis1",
|
"@type": "marl:SentimentAnalysis",
|
||||||
"me:NER1"
|
"prov:wasAssociatedWith": "me:SAnalysis1"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@id": "_:EmotionAnalysis1_Activity",
|
||||||
|
"@type": "onyx:EmotionAnalysis",
|
||||||
|
"prov:wasAssociatedWith": "me:EmotionAnalysis1"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@id": "_:NER1_Activity",
|
||||||
|
"@type": "me:NER",
|
||||||
|
"prov:wasAssociatedWith": "me:NER1"
|
||||||
|
}
|
||||||
],
|
],
|
||||||
"entries": [
|
"entries": [
|
||||||
{
|
{
|
||||||
@@ -23,7 +34,7 @@
|
|||||||
"nif:endIndex": 13,
|
"nif:endIndex": 13,
|
||||||
"nif:anchorOf": "Microsoft",
|
"nif:anchorOf": "Microsoft",
|
||||||
"me:references": "http://dbpedia.org/page/Microsoft",
|
"me:references": "http://dbpedia.org/page/Microsoft",
|
||||||
"prov:wasGeneratedBy": "me:NER1"
|
"prov:wasGeneratedBy": "_:NER1_Activity"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"@id": "http://micro.blog/status1#char=25,37",
|
"@id": "http://micro.blog/status1#char=25,37",
|
||||||
@@ -31,7 +42,7 @@
|
|||||||
"nif:endIndex": 37,
|
"nif:endIndex": 37,
|
||||||
"nif:anchorOf": "Windows Phone",
|
"nif:anchorOf": "Windows Phone",
|
||||||
"me:references": "http://dbpedia.org/page/Windows_Phone",
|
"me:references": "http://dbpedia.org/page/Windows_Phone",
|
||||||
"prov:wasGeneratedBy": "me:NER1"
|
"prov:wasGeneratedBy": "_:NER1_Activity"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"suggestions": [
|
"suggestions": [
|
||||||
@@ -40,7 +51,7 @@
|
|||||||
"nif:beginIndex": 16,
|
"nif:beginIndex": 16,
|
||||||
"nif:endIndex": 77,
|
"nif:endIndex": 77,
|
||||||
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
|
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
|
||||||
"prov:wasGeneratedBy": "me:SgAnalysis1"
|
"prov:wasGeneratedBy": "_:SgAnalysis1_Activity"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"sentiments": [
|
"sentiments": [
|
||||||
@@ -51,14 +62,14 @@
|
|||||||
"nif:anchorOf": "You'll be awesome.",
|
"nif:anchorOf": "You'll be awesome.",
|
||||||
"marl:hasPolarity": "marl:Positive",
|
"marl:hasPolarity": "marl:Positive",
|
||||||
"marl:polarityValue": 0.9,
|
"marl:polarityValue": 0.9,
|
||||||
"prov:wasGeneratedBy": "me:SAnalysis1"
|
"prov:wasGeneratedBy": "_:SgAnalysis1_Activity"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"emotions": [
|
"emotions": [
|
||||||
{
|
{
|
||||||
"@id": "http://micro.blog/status1#char=0,109",
|
"@id": "http://micro.blog/status1#char=0,109",
|
||||||
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
||||||
"prov:wasGeneratedBy": "me:EAnalysis1",
|
"prov:wasGeneratedBy": "_:EmotionAnalysis1_Activity",
|
||||||
"onyx:hasEmotion": [
|
"onyx:hasEmotion": [
|
||||||
{
|
{
|
||||||
"onyx:hasEmotionCategory": "wna:liking"
|
"onyx:hasEmotionCategory": "wna:liking"
|
||||||
|
@@ -1,78 +0,0 @@
|
|||||||
{
|
|
||||||
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
|
||||||
"@id": "me:Result1",
|
|
||||||
"@type": "results",
|
|
||||||
"analysis": [
|
|
||||||
"me:SAnalysis1",
|
|
||||||
"me:SgAnalysis1",
|
|
||||||
"me:EmotionAnalysis1",
|
|
||||||
"me:NER1",
|
|
||||||
{
|
|
||||||
"@type": "analysis",
|
|
||||||
"@id": "anonymous"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"entries": [
|
|
||||||
{
|
|
||||||
"@id": "http://micro.blog/status1",
|
|
||||||
"@type": [
|
|
||||||
"nif:RFC5147String",
|
|
||||||
"nif:Context"
|
|
||||||
],
|
|
||||||
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
|
||||||
"entities": [
|
|
||||||
{
|
|
||||||
"@id": "http://micro.blog/status1#char=5,13",
|
|
||||||
"nif:beginIndex": 5,
|
|
||||||
"nif:endIndex": 13,
|
|
||||||
"nif:anchorOf": "Microsoft",
|
|
||||||
"me:references": "http://dbpedia.org/page/Microsoft",
|
|
||||||
"prov:wasGeneratedBy": "me:NER1"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"@id": "http://micro.blog/status1#char=25,37",
|
|
||||||
"nif:beginIndex": 25,
|
|
||||||
"nif:endIndex": 37,
|
|
||||||
"nif:anchorOf": "Windows Phone",
|
|
||||||
"me:references": "http://dbpedia.org/page/Windows_Phone",
|
|
||||||
"prov:wasGeneratedBy": "me:NER1"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"suggestions": [
|
|
||||||
{
|
|
||||||
"@id": "http://micro.blog/status1#char=16,77",
|
|
||||||
"nif:beginIndex": 16,
|
|
||||||
"nif:endIndex": 77,
|
|
||||||
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
|
|
||||||
"prov:wasGeneratedBy": "me:SgAnalysis1"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"sentiments": [
|
|
||||||
{
|
|
||||||
"@id": "http://micro.blog/status1#char=80,97",
|
|
||||||
"nif:beginIndex": 80,
|
|
||||||
"nif:endIndex": 97,
|
|
||||||
"nif:anchorOf": "You'll be awesome.",
|
|
||||||
"marl:hasPolarity": "marl:Positive",
|
|
||||||
"marl:polarityValue": 0.9,
|
|
||||||
"prov:wasGeneratedBy": "me:SAnalysis1"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"emotions": [
|
|
||||||
{
|
|
||||||
"@id": "http://micro.blog/status1#char=0,109",
|
|
||||||
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
|
||||||
"prov:wasGeneratedBy": "me:EAnalysis1",
|
|
||||||
"onyx:hasEmotion": [
|
|
||||||
{
|
|
||||||
"onyx:hasEmotionCategory": "wna:liking"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"onyx:hasEmotionCategory": "wna:excitement"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
@@ -1,9 +1,8 @@
|
|||||||
{
|
{
|
||||||
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
||||||
"@id": "http://example.com#NIFExample",
|
"@id": "me:Result1",
|
||||||
"@type": "results",
|
"@type": "results",
|
||||||
"analysis": [
|
"analysis": [ ],
|
||||||
],
|
|
||||||
"entries": [
|
"entries": [
|
||||||
{
|
{
|
||||||
"@id": "http://example.org#char=0,40",
|
"@id": "http://example.org#char=0,40",
|
||||||
|
@@ -4,22 +4,34 @@
|
|||||||
"@type": "results",
|
"@type": "results",
|
||||||
"analysis": [
|
"analysis": [
|
||||||
{
|
{
|
||||||
"@id": "me:SAnalysis1",
|
"@id": "_:SAnalysis1_Activity",
|
||||||
"@type": "marl:SentimentAnalysis",
|
"@type": "marl:SentimentAnalysis",
|
||||||
"marl:maxPolarityValue": 1,
|
"prov:wasAssociatedWith": "me:SentimentAnalysis",
|
||||||
"marl:minPolarityValue": 0
|
"prov:used": [
|
||||||
|
{
|
||||||
|
"name": "marl:maxPolarityValue",
|
||||||
|
"prov:value": "1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"@id": "me:SgAnalysis1",
|
"name": "marl:minPolarityValue",
|
||||||
|
"prov:value": "0"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@id": "_:SgAnalysis1_Activity",
|
||||||
|
"prov:wasAssociatedWith": "me:SgAnalysis1",
|
||||||
"@type": "me:SuggestionAnalysis"
|
"@type": "me:SuggestionAnalysis"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"@id": "me:EmotionAnalysis1",
|
"@id": "_:EmotionAnalysis1_Activity",
|
||||||
"@type": "me:EmotionAnalysis"
|
"@type": "me:EmotionAnalysis",
|
||||||
|
"prov:wasAssociatedWith": "me:EmotionAnalysis1"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"@id": "me:NER1",
|
"@id": "_:NER1_Activity",
|
||||||
"@type": "me:NER"
|
"@type": "me:NER",
|
||||||
|
"prov:wasAssociatedWith": "me:EmotionNER1"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"entries": [
|
"entries": [
|
||||||
|
@@ -4,8 +4,9 @@
|
|||||||
"@type": "results",
|
"@type": "results",
|
||||||
"analysis": [
|
"analysis": [
|
||||||
{
|
{
|
||||||
"@id": "me:EmotionAnalysis1",
|
"@id": "me:EmotionAnalysis1_Activity",
|
||||||
"@type": "onyx:EmotionAnalysis"
|
"@type": "me:EmotionAnalysis1",
|
||||||
|
"prov:wasAssociatedWith": "me:EmotionAnalysis1"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"entries": [
|
"entries": [
|
||||||
@@ -26,7 +27,7 @@
|
|||||||
{
|
{
|
||||||
"@id": "http://micro.blog/status1#char=0,109",
|
"@id": "http://micro.blog/status1#char=0,109",
|
||||||
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
||||||
"prov:wasGeneratedBy": "me:EmotionAnalysis1",
|
"prov:wasGeneratedBy": "_:EmotionAnalysis1_Activity",
|
||||||
"onyx:hasEmotion": [
|
"onyx:hasEmotion": [
|
||||||
{
|
{
|
||||||
"onyx:hasEmotionCategory": "wna:liking"
|
"onyx:hasEmotionCategory": "wna:liking"
|
||||||
|
@@ -4,8 +4,9 @@
|
|||||||
"@type": "results",
|
"@type": "results",
|
||||||
"analysis": [
|
"analysis": [
|
||||||
{
|
{
|
||||||
"@id": "me:NER1",
|
"@id": "_:NER1_Activity",
|
||||||
"@type": "me:NERAnalysis"
|
"@type": "me:NERAnalysis",
|
||||||
|
"prov:wasAssociatedWith": "me:NER1"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"entries": [
|
"entries": [
|
||||||
|
@@ -9,9 +9,15 @@
|
|||||||
"@type": "results",
|
"@type": "results",
|
||||||
"analysis": [
|
"analysis": [
|
||||||
{
|
{
|
||||||
"@id": "me:HesamsAnalysis",
|
"@id": "me:HesamsAnalysis_Activity",
|
||||||
"@type": "onyx:EmotionAnalysis",
|
"@type": "onyx:EmotionAnalysis",
|
||||||
"onyx:usesEmotionModel": "emovoc:pad-dimensions"
|
"prov:wasAssociatedWith": "me:HesamsAnalysis",
|
||||||
|
"prov:used": [
|
||||||
|
{
|
||||||
|
"name": "emotion-model",
|
||||||
|
"prov:value": "emovoc:pad-dimensions"
|
||||||
|
}
|
||||||
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"entries": [
|
"entries": [
|
||||||
@@ -32,7 +38,7 @@
|
|||||||
{
|
{
|
||||||
"@id": "Entry1#char=0,21",
|
"@id": "Entry1#char=0,21",
|
||||||
"nif:anchorOf": "This is a test string",
|
"nif:anchorOf": "This is a test string",
|
||||||
"prov:wasGeneratedBy": "me:HesamAnalysis",
|
"prov:wasGeneratedBy": "_:HesamAnalysis_Activity",
|
||||||
"onyx:hasEmotion": [
|
"onyx:hasEmotion": [
|
||||||
{
|
{
|
||||||
"emovoc:pleasure": 0.5,
|
"emovoc:pleasure": 0.5,
|
||||||
|
@@ -4,10 +4,9 @@
|
|||||||
"@type": "results",
|
"@type": "results",
|
||||||
"analysis": [
|
"analysis": [
|
||||||
{
|
{
|
||||||
"@id": "me:SAnalysis1",
|
"@id": "_:SAnalysis1_Activity",
|
||||||
"@type": "marl:SentimentAnalysis",
|
"@type": "marl:SentimentAnalysis",
|
||||||
"marl:maxPolarityValue": 1,
|
"prov:wasAssociatedWith": "me:SAnalysis1"
|
||||||
"marl:minPolarityValue": 0
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"entries": [
|
"entries": [
|
||||||
@@ -30,7 +29,7 @@
|
|||||||
"nif:anchorOf": "You'll be awesome.",
|
"nif:anchorOf": "You'll be awesome.",
|
||||||
"marl:hasPolarity": "marl:Positive",
|
"marl:hasPolarity": "marl:Positive",
|
||||||
"marl:polarityValue": 0.9,
|
"marl:polarityValue": 0.9,
|
||||||
"prov:wasGeneratedBy": "me:SAnalysis1"
|
"prov:wasGeneratedBy": "_:SAnalysis1_Activity"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"emotionSets": [
|
"emotionSets": [
|
||||||
|
@@ -3,7 +3,11 @@
|
|||||||
"@id": "me:Result1",
|
"@id": "me:Result1",
|
||||||
"@type": "results",
|
"@type": "results",
|
||||||
"analysis": [
|
"analysis": [
|
||||||
"me:SgAnalysis1"
|
{
|
||||||
|
"@id": "_:SgAnalysis1_Activity",
|
||||||
|
"@type": "me:SuggestionAnalysis",
|
||||||
|
"prov:wasAssociatedWith": "me:SgAnalysis1"
|
||||||
|
}
|
||||||
],
|
],
|
||||||
"entries": [
|
"entries": [
|
||||||
{
|
{
|
||||||
@@ -12,7 +16,6 @@
|
|||||||
"nif:RFC5147String",
|
"nif:RFC5147String",
|
||||||
"nif:Context"
|
"nif:Context"
|
||||||
],
|
],
|
||||||
"prov:wasGeneratedBy": "me:SAnalysis1",
|
|
||||||
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
||||||
"entities": [
|
"entities": [
|
||||||
],
|
],
|
||||||
@@ -22,7 +25,7 @@
|
|||||||
"nif:beginIndex": 16,
|
"nif:beginIndex": 16,
|
||||||
"nif:endIndex": 77,
|
"nif:endIndex": 77,
|
||||||
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
|
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
|
||||||
"prov:wasGeneratedBy": "me:SgAnalysis1"
|
"prov:wasGeneratedBy": "_:SgAnalysis1_Activity"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"sentiments": [
|
"sentiments": [
|
||||||
|
@@ -11,7 +11,7 @@ class Async(AnalysisPlugin):
|
|||||||
'''An example of an asynchronous module'''
|
'''An example of an asynchronous module'''
|
||||||
author = '@balkian'
|
author = '@balkian'
|
||||||
version = '0.2'
|
version = '0.2'
|
||||||
async = True
|
sync = False
|
||||||
|
|
||||||
def _do_async(self, num_processes):
|
def _do_async(self, num_processes):
|
||||||
pool = multiprocessing.Pool(processes=num_processes)
|
pool = multiprocessing.Pool(processes=num_processes)
|
||||||
|
@@ -12,3 +12,4 @@ rdflib-jsonld
|
|||||||
numpy
|
numpy
|
||||||
scipy
|
scipy
|
||||||
scikit-learn
|
scikit-learn
|
||||||
|
responses
|
||||||
|
146
senpy/api.py
146
senpy/api.py
@@ -1,16 +1,15 @@
|
|||||||
from future.utils import iteritems
|
from future.utils import iteritems
|
||||||
from .models import Error, Results, Entry, from_string
|
from .models import Analysis, Error, Results, Entry, from_string
|
||||||
import logging
|
import logging
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
boolean = [True, False]
|
boolean = [True, False]
|
||||||
|
|
||||||
|
|
||||||
API_PARAMS = {
|
API_PARAMS = {
|
||||||
"algorithm": {
|
"algorithm": {
|
||||||
"aliases": ["algorithms", "a", "algo"],
|
"aliases": ["algorithms", "a", "algo"],
|
||||||
"required": False,
|
"required": True,
|
||||||
|
"default": 'default',
|
||||||
"description": ("Algorithms that will be used to process the request."
|
"description": ("Algorithms that will be used to process the request."
|
||||||
"It may be a list of comma-separated names."),
|
"It may be a list of comma-separated names."),
|
||||||
},
|
},
|
||||||
@@ -43,6 +42,14 @@ API_PARAMS = {
|
|||||||
"options": boolean,
|
"options": boolean,
|
||||||
"default": False
|
"default": False
|
||||||
},
|
},
|
||||||
|
"verbose": {
|
||||||
|
"@id": "verbose",
|
||||||
|
"description": "Show all help, including the common API parameters, or only plugin-related info",
|
||||||
|
"aliases": ["v"],
|
||||||
|
"required": True,
|
||||||
|
"options": boolean,
|
||||||
|
"default": True
|
||||||
|
},
|
||||||
"emotionModel": {
|
"emotionModel": {
|
||||||
"@id": "emotionModel",
|
"@id": "emotionModel",
|
||||||
"aliases": ["emoModel"],
|
"aliases": ["emoModel"],
|
||||||
@@ -140,6 +147,15 @@ NIF_PARAMS = {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BUILTIN_PARAMS = {}
|
||||||
|
|
||||||
|
for d in [
|
||||||
|
NIF_PARAMS, CLI_PARAMS, WEB_PARAMS, PLUGINS_PARAMS, EVAL_PARAMS,
|
||||||
|
API_PARAMS
|
||||||
|
]:
|
||||||
|
for k, v in d.items():
|
||||||
|
BUILTIN_PARAMS[k] = v
|
||||||
|
|
||||||
|
|
||||||
def parse_params(indict, *specs):
|
def parse_params(indict, *specs):
|
||||||
if not specs:
|
if not specs:
|
||||||
@@ -161,10 +177,9 @@ def parse_params(indict, *specs):
|
|||||||
outdict[param] = options["default"]
|
outdict[param] = options["default"]
|
||||||
elif options.get("required", False):
|
elif options.get("required", False):
|
||||||
wrong_params[param] = spec[param]
|
wrong_params[param] = spec[param]
|
||||||
continue
|
elif "options" in options:
|
||||||
if "options" in options:
|
|
||||||
if options["options"] == boolean:
|
if options["options"] == boolean:
|
||||||
outdict[param] = outdict[param] in [None, True, 'true', '1']
|
outdict[param] = str(outdict[param]).lower() in ['true', '1']
|
||||||
elif outdict[param] not in options["options"]:
|
elif outdict[param] not in options["options"]:
|
||||||
wrong_params[param] = spec[param]
|
wrong_params[param] = spec[param]
|
||||||
if wrong_params:
|
if wrong_params:
|
||||||
@@ -175,31 +190,126 @@ def parse_params(indict, *specs):
|
|||||||
parameters=outdict,
|
parameters=outdict,
|
||||||
errors=wrong_params)
|
errors=wrong_params)
|
||||||
raise message
|
raise message
|
||||||
if 'algorithm' in outdict and not isinstance(outdict['algorithm'], list):
|
|
||||||
outdict['algorithm'] = list(outdict['algorithm'].split(','))
|
|
||||||
return outdict
|
return outdict
|
||||||
|
|
||||||
|
|
||||||
def parse_extra_params(request, plugin=None):
|
def get_all_params(plugins, *specs):
|
||||||
params = request.parameters.copy()
|
'''Return a list of parameters for a given set of specifications and plugins.'''
|
||||||
if plugin:
|
dic = {}
|
||||||
extra_params = parse_params(params, plugin.get('extra_params', {}))
|
for s in specs:
|
||||||
params.update(extra_params)
|
dic.update(s)
|
||||||
|
dic.update(get_extra_params(plugins))
|
||||||
|
return dic
|
||||||
|
|
||||||
|
|
||||||
|
def get_extra_params(plugins):
|
||||||
|
'''Get a list of possible parameters given a list of plugins'''
|
||||||
|
params = {}
|
||||||
|
extra_params = {}
|
||||||
|
for plugin in plugins:
|
||||||
|
this_params = plugin.get('extra_params', {})
|
||||||
|
for k, v in this_params.items():
|
||||||
|
if k not in extra_params:
|
||||||
|
extra_params[k] = {}
|
||||||
|
extra_params[k][plugin.name] = v
|
||||||
|
for k, v in extra_params.items(): # Resolve conflicts
|
||||||
|
if len(v) == 1: # Add the extra options that do not collide
|
||||||
|
params[k] = list(v.values())[0]
|
||||||
|
else:
|
||||||
|
required = False
|
||||||
|
aliases = None
|
||||||
|
options = None
|
||||||
|
default = None
|
||||||
|
nodefault = False # Set when defaults are not compatible
|
||||||
|
|
||||||
|
for plugin, opt in v.items():
|
||||||
|
params['{}.{}'.format(plugin, k)] = opt
|
||||||
|
required = required or opt.get('required', False)
|
||||||
|
newaliases = set(opt.get('aliases', []))
|
||||||
|
if aliases is None:
|
||||||
|
aliases = newaliases
|
||||||
|
else:
|
||||||
|
aliases = aliases & newaliases
|
||||||
|
if 'options' in opt:
|
||||||
|
newoptions = set(opt['options'])
|
||||||
|
options = newoptions if options is None else options & newoptions
|
||||||
|
if 'default' in opt:
|
||||||
|
newdefault = opt['default']
|
||||||
|
if newdefault:
|
||||||
|
if default is None and not nodefault:
|
||||||
|
default = newdefault
|
||||||
|
elif newdefault != default:
|
||||||
|
nodefault = True
|
||||||
|
default = None
|
||||||
|
# Check for incompatibilities
|
||||||
|
if options != set():
|
||||||
|
params[k] = {
|
||||||
|
'default': default,
|
||||||
|
'aliases': list(aliases),
|
||||||
|
'required': required,
|
||||||
|
'options': list(options)
|
||||||
|
}
|
||||||
return params
|
return params
|
||||||
|
|
||||||
|
|
||||||
|
def parse_analysis(params, plugins):
|
||||||
|
'''
|
||||||
|
Parse the given parameters individually for each plugin, and get a list of the parameters that
|
||||||
|
belong to each of the plugins. Each item can then be used in the plugin.analyse_entries method.
|
||||||
|
'''
|
||||||
|
analysis_list = []
|
||||||
|
for i, plugin in enumerate(plugins):
|
||||||
|
if not plugin:
|
||||||
|
continue
|
||||||
|
this_params = filter_params(params, plugin, i)
|
||||||
|
parsed = parse_params(this_params, plugin.get('extra_params', {}))
|
||||||
|
analysis = plugin.activity(parsed)
|
||||||
|
analysis_list.append(analysis)
|
||||||
|
return analysis_list
|
||||||
|
|
||||||
|
|
||||||
|
def filter_params(params, plugin, ith=-1):
|
||||||
|
'''
|
||||||
|
Get the values within params that apply to a plugin.
|
||||||
|
More specific names override more general names, in this order:
|
||||||
|
|
||||||
|
<index_order>.parameter > <plugin.name>.parameter > parameter
|
||||||
|
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
>>> filter_params({'0.hello': True, 'hello': False}, Plugin(), 0)
|
||||||
|
{ '0.hello': True, 'hello': True}
|
||||||
|
|
||||||
|
'''
|
||||||
|
thisparams = {}
|
||||||
|
if ith >= 0:
|
||||||
|
ith = '{}.'.format(ith)
|
||||||
|
else:
|
||||||
|
ith = ""
|
||||||
|
for k, v in params.items():
|
||||||
|
if ith and k.startswith(str(ith)):
|
||||||
|
thisparams[k[len(ith):]] = v
|
||||||
|
elif k.startswith(plugin.name):
|
||||||
|
thisparams[k[len(plugin.name) + 1:]] = v
|
||||||
|
elif k not in thisparams:
|
||||||
|
thisparams[k] = v
|
||||||
|
return thisparams
|
||||||
|
|
||||||
|
|
||||||
def parse_call(params):
|
def parse_call(params):
|
||||||
'''Return a results object based on the parameters used in a call/request.
|
'''
|
||||||
|
Return a results object based on the parameters used in a call/request.
|
||||||
'''
|
'''
|
||||||
params = parse_params(params, NIF_PARAMS)
|
params = parse_params(params, NIF_PARAMS)
|
||||||
if params['informat'] == 'text':
|
if params['informat'] == 'text':
|
||||||
results = Results()
|
results = Results()
|
||||||
entry = Entry(nif__isString=params['input'],
|
entry = Entry(nif__isString=params['input'], id='#') # Use @base
|
||||||
id='#') # Use @base
|
|
||||||
results.entries.append(entry)
|
results.entries.append(entry)
|
||||||
elif params['informat'] == 'json-ld':
|
elif params['informat'] == 'json-ld':
|
||||||
results = from_string(params['input'], cls=Results)
|
results = from_string(params['input'], cls=Results)
|
||||||
else: # pragma: no cover
|
else: # pragma: no cover
|
||||||
raise NotImplementedError('Informat {} is not implemented'.format(params['informat']))
|
raise NotImplementedError('Informat {} is not implemented'.format(
|
||||||
|
params['informat']))
|
||||||
results.parameters = params
|
results.parameters = params
|
||||||
return results
|
return results
|
||||||
|
@@ -69,7 +69,7 @@ def encoded_url(url=None, base=None):
|
|||||||
if request.method == 'GET':
|
if request.method == 'GET':
|
||||||
url = request.full_path[1:] # Remove the first slash
|
url = request.full_path[1:] # Remove the first slash
|
||||||
else:
|
else:
|
||||||
hash(frozenset(request.form.params().items()))
|
hash(frozenset(tuple(request.parameters.items())))
|
||||||
code = 'hash:{}'.format(hash)
|
code = 'hash:{}'.format(hash)
|
||||||
|
|
||||||
code = code or base64.urlsafe_b64encode(url.encode()).decode()
|
code = code or base64.urlsafe_b64encode(url.encode()).decode()
|
||||||
@@ -184,15 +184,33 @@ def basic_api(f):
|
|||||||
return decorated_function
|
return decorated_function
|
||||||
|
|
||||||
|
|
||||||
@api_blueprint.route('/', methods=['POST', 'GET'])
|
@api_blueprint.route('/', defaults={'plugin': None}, methods=['POST', 'GET'])
|
||||||
|
@api_blueprint.route('/<path:plugin>', methods=['POST', 'GET'])
|
||||||
@basic_api
|
@basic_api
|
||||||
def api_root():
|
def api_root(plugin):
|
||||||
|
if plugin:
|
||||||
|
if request.parameters['algorithm'] != api.API_PARAMS['algorithm']['default']:
|
||||||
|
raise Error('You cannot specify the algorithm with a parameter and a URL variable.'
|
||||||
|
' Please, remove one of them')
|
||||||
|
request.parameters['algorithm'] = tuple(plugin.replace('+', '/').split('/'))
|
||||||
|
|
||||||
|
params = request.parameters
|
||||||
|
plugin = request.parameters['algorithm']
|
||||||
|
|
||||||
|
sp = current_app.senpy
|
||||||
|
plugins = sp.get_plugins(plugin)
|
||||||
|
|
||||||
if request.parameters['help']:
|
if request.parameters['help']:
|
||||||
dic = dict(api.API_PARAMS, **api.NIF_PARAMS)
|
apis = []
|
||||||
response = Help(valid_parameters=dic)
|
if request.parameters['verbose']:
|
||||||
|
apis.append(api.BUILTIN_PARAMS)
|
||||||
|
allparameters = api.get_all_params(plugins, *apis)
|
||||||
|
response = Help(valid_parameters=allparameters)
|
||||||
return response
|
return response
|
||||||
req = api.parse_call(request.parameters)
|
req = api.parse_call(request.parameters)
|
||||||
return current_app.senpy.analyse(req)
|
analysis = api.parse_analysis(req.parameters, plugins)
|
||||||
|
results = current_app.senpy.analyse(req, analysis)
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
@api_blueprint.route('/evaluate/', methods=['POST', 'GET'])
|
@api_blueprint.route('/evaluate/', methods=['POST', 'GET'])
|
||||||
@@ -221,7 +239,7 @@ def plugins():
|
|||||||
|
|
||||||
@api_blueprint.route('/plugins/<plugin>/', methods=['POST', 'GET'])
|
@api_blueprint.route('/plugins/<plugin>/', methods=['POST', 'GET'])
|
||||||
@basic_api
|
@basic_api
|
||||||
def plugin(plugin=None):
|
def plugin(plugin):
|
||||||
sp = current_app.senpy
|
sp = current_app.senpy
|
||||||
return sp.get_plugin(plugin)
|
return sp.get_plugin(plugin)
|
||||||
|
|
||||||
|
@@ -31,10 +31,10 @@ def main_function(argv):
|
|||||||
default_plugins = params.get('default-plugins', False)
|
default_plugins = params.get('default-plugins', False)
|
||||||
sp = Senpy(default_plugins=default_plugins, plugin_folder=plugin_folder)
|
sp = Senpy(default_plugins=default_plugins, plugin_folder=plugin_folder)
|
||||||
request = api.parse_call(params)
|
request = api.parse_call(params)
|
||||||
algos = request.parameters.get('algorithm', None)
|
algos = sp.get_plugins(request.parameters.get('algorithm', None))
|
||||||
if algos:
|
if algos:
|
||||||
for algo in algos:
|
for algo in algos:
|
||||||
sp.activate_plugin(algo)
|
sp.activate_plugin(algo.name)
|
||||||
else:
|
else:
|
||||||
sp.activate_all()
|
sp.activate_all()
|
||||||
res = sp.analyse(request)
|
res = sp.analyse(request)
|
||||||
|
@@ -24,8 +24,12 @@ class Client(object):
|
|||||||
return {d.name: d for d in resp}
|
return {d.name: d for d in resp}
|
||||||
|
|
||||||
def request(self, path=None, method='GET', **params):
|
def request(self, path=None, method='GET', **params):
|
||||||
url = '{}{}'.format(self.endpoint, path)
|
url = '{}{}'.format(self.endpoint.rstrip('/'), path)
|
||||||
|
if method == 'POST':
|
||||||
|
response = requests.post(url=url, data=params)
|
||||||
|
else:
|
||||||
response = requests.request(method=method, url=url, params=params)
|
response = requests.request(method=method, url=url, params=params)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
resp = models.from_dict(response.json())
|
resp = models.from_dict(response.json())
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
|
@@ -6,7 +6,6 @@ from future import standard_library
|
|||||||
standard_library.install_aliases()
|
standard_library.install_aliases()
|
||||||
|
|
||||||
from . import plugins, api
|
from . import plugins, api
|
||||||
from .plugins import Plugin, evaluate
|
|
||||||
from .models import Error, AggregatedEvaluation
|
from .models import Error, AggregatedEvaluation
|
||||||
from .blueprints import api_blueprint, demo_blueprint, ns_blueprint
|
from .blueprints import api_blueprint, demo_blueprint, ns_blueprint
|
||||||
|
|
||||||
@@ -17,7 +16,6 @@ import copy
|
|||||||
import errno
|
import errno
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
|
||||||
from . import gsitk_compat
|
from . import gsitk_compat
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -25,6 +23,7 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class Senpy(object):
|
class Senpy(object):
|
||||||
""" Default Senpy extension for Flask """
|
""" Default Senpy extension for Flask """
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
app=None,
|
app=None,
|
||||||
plugin_folder=".",
|
plugin_folder=".",
|
||||||
@@ -50,7 +49,7 @@ class Senpy(object):
|
|||||||
self.add_folder('plugins', from_root=True)
|
self.add_folder('plugins', from_root=True)
|
||||||
else:
|
else:
|
||||||
# Add only conversion plugins
|
# Add only conversion plugins
|
||||||
self.add_folder(os.path.join('plugins', 'conversion'),
|
self.add_folder(os.path.join('plugins', 'postprocessing'),
|
||||||
from_root=True)
|
from_root=True)
|
||||||
self.app = app
|
self.app = app
|
||||||
if app is not None:
|
if app is not None:
|
||||||
@@ -79,27 +78,47 @@ class Senpy(object):
|
|||||||
def delete_plugin(self, plugin):
|
def delete_plugin(self, plugin):
|
||||||
del self._plugins[plugin.name.lower()]
|
del self._plugins[plugin.name.lower()]
|
||||||
|
|
||||||
def plugins(self, **kwargs):
|
def plugins(self, plugin_type=None, is_activated=True, **kwargs):
|
||||||
""" Return the plugins registered for a given application. Filtered by criteria """
|
""" Return the plugins registered for a given application. Filtered by criteria """
|
||||||
return list(plugins.pfilter(self._plugins, **kwargs))
|
return list(plugins.pfilter(self._plugins, plugin_type=plugin_type,
|
||||||
|
is_activated=is_activated, **kwargs))
|
||||||
|
|
||||||
def get_plugin(self, name, default=None):
|
def get_plugin(self, name, default=None):
|
||||||
if name == 'default':
|
if name == 'default':
|
||||||
return self.default_plugin
|
return self.default_plugin
|
||||||
plugin = name.lower()
|
elif name == 'conversion':
|
||||||
if plugin in self._plugins:
|
return None
|
||||||
return self._plugins[plugin]
|
|
||||||
|
|
||||||
results = self.plugins(id='endpoint:plugins/{}'.format(name))
|
if name.lower() in self._plugins:
|
||||||
|
return self._plugins[name.lower()]
|
||||||
|
|
||||||
if not results:
|
results = self.plugins(id='endpoint:plugins/{}'.format(name.lower()),
|
||||||
return Error(message="Plugin not found", status=404)
|
plugin_type=None)
|
||||||
|
if results:
|
||||||
return results[0]
|
return results[0]
|
||||||
|
|
||||||
|
results = self.plugins(id=name,
|
||||||
|
plugin_type=None)
|
||||||
|
if results:
|
||||||
|
return results[0]
|
||||||
|
|
||||||
|
msg = ("Plugin not found: '{}'\n"
|
||||||
|
"Make sure it is ACTIVATED\n"
|
||||||
|
"Valid algorithms: {}").format(name,
|
||||||
|
self._plugins.keys())
|
||||||
|
raise Error(message=msg, status=404)
|
||||||
|
|
||||||
|
def get_plugins(self, name):
|
||||||
|
try:
|
||||||
|
name = name.split(',')
|
||||||
|
except AttributeError:
|
||||||
|
pass # Assume it is a tuple or a list
|
||||||
|
return tuple(self.get_plugin(n) for n in name)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def analysis_plugins(self):
|
def analysis_plugins(self):
|
||||||
""" Return only the analysis plugins """
|
""" Return only the analysis plugins that are active"""
|
||||||
return self.plugins(plugin_type='analysisPlugin')
|
return self.plugins(plugin_type='analysisPlugin', is_activated=True)
|
||||||
|
|
||||||
def add_folder(self, folder, from_root=False):
|
def add_folder(self, folder, from_root=False):
|
||||||
""" Find plugins in this folder and add them to this instance """
|
""" Find plugins in this folder and add them to this instance """
|
||||||
@@ -114,74 +133,140 @@ class Senpy(object):
|
|||||||
else:
|
else:
|
||||||
raise AttributeError("Not a folder or does not exist: %s", folder)
|
raise AttributeError("Not a folder or does not exist: %s", folder)
|
||||||
|
|
||||||
def _get_plugins(self, request):
|
# def check_analysis_request(self, analysis):
|
||||||
if not self.analysis_plugins:
|
# '''Check if the analysis request can be fulfilled'''
|
||||||
raise Error(
|
# if not self.plugins():
|
||||||
status=404,
|
# raise Error(
|
||||||
message=("No plugins found."
|
# status=404,
|
||||||
" Please install one."))
|
# message=("No plugins found."
|
||||||
algos = request.parameters.get('algorithm', None)
|
# " Please install one."))
|
||||||
if not algos:
|
# for a in analysis:
|
||||||
if self.default_plugin:
|
# algo = a.algorithm
|
||||||
algos = [self.default_plugin.name, ]
|
# if algo == 'default' and not self.default_plugin:
|
||||||
else:
|
# raise Error(
|
||||||
raise Error(
|
# status=404,
|
||||||
status=404,
|
# message="No default plugin found, and None provided")
|
||||||
message="No default plugin found, and None provided")
|
# else:
|
||||||
|
# self.get_plugin(algo)
|
||||||
|
|
||||||
plugins = list()
|
|
||||||
for algo in algos:
|
|
||||||
algo = algo.lower()
|
|
||||||
if algo not in self._plugins:
|
|
||||||
msg = ("The algorithm '{}' is not valid\n"
|
|
||||||
"Valid algorithms: {}").format(algo,
|
|
||||||
self._plugins.keys())
|
|
||||||
logger.debug(msg)
|
|
||||||
raise Error(
|
|
||||||
status=404,
|
|
||||||
message=msg)
|
|
||||||
plugins.append(self._plugins[algo])
|
|
||||||
return plugins
|
|
||||||
|
|
||||||
def _process_entries(self, entries, req, plugins):
|
def _process(self, req, pending, done=None):
|
||||||
"""
|
"""
|
||||||
Recursively process the entries with the first plugin in the list, and pass the results
|
Recursively process the entries with the first plugin in the list, and pass the results
|
||||||
to the rest of the plugins.
|
to the rest of the plugins.
|
||||||
"""
|
"""
|
||||||
if not plugins:
|
done = done or []
|
||||||
for i in entries:
|
if not pending:
|
||||||
yield i
|
return req
|
||||||
return
|
|
||||||
plugin = plugins[0]
|
analysis = pending[0]
|
||||||
self._activate(plugin) # Make sure the plugin is activated
|
results = analysis.run(req)
|
||||||
specific_params = api.parse_extra_params(req, plugin)
|
results.analysis.append(analysis)
|
||||||
req.analysis.append({'plugin': plugin,
|
done += analysis
|
||||||
'parameters': specific_params})
|
return self._process(results, pending[1:], done)
|
||||||
results = plugin.analyse_entries(entries, specific_params)
|
|
||||||
for i in self._process_entries(results, req, plugins[1:]):
|
|
||||||
yield i
|
|
||||||
|
|
||||||
def install_deps(self):
|
def install_deps(self):
|
||||||
plugins.install_deps(*self.plugins())
|
plugins.install_deps(*self.plugins())
|
||||||
|
|
||||||
def analyse(self, request):
|
def analyse(self, request, analysis=None):
|
||||||
"""
|
"""
|
||||||
Main method that analyses a request, either from CLI or HTTP.
|
Main method that analyses a request, either from CLI or HTTP.
|
||||||
It takes a processed request, provided by the user, as returned
|
It takes a processed request, provided by the user, as returned
|
||||||
by api.parse_call().
|
by api.parse_call().
|
||||||
"""
|
"""
|
||||||
|
if not self.plugins():
|
||||||
|
raise Error(
|
||||||
|
status=404,
|
||||||
|
message=("No plugins found."
|
||||||
|
" Please install one."))
|
||||||
|
if analysis is None:
|
||||||
|
params = str(request)
|
||||||
|
plugins = self.get_plugins(request.parameters['algorithm'])
|
||||||
|
analysis = api.parse_analysis(request.parameters, plugins)
|
||||||
logger.debug("analysing request: {}".format(request))
|
logger.debug("analysing request: {}".format(request))
|
||||||
entries = request.entries
|
results = self._process(request, analysis)
|
||||||
request.entries = []
|
logger.debug("Got analysis result: {}".format(results))
|
||||||
plugins = self._get_plugins(request)
|
results = self.postprocess(results)
|
||||||
results = request
|
logger.debug("Returning post-processed result: {}".format(results))
|
||||||
for i in self._process_entries(entries, results, plugins):
|
|
||||||
results.entries.append(i)
|
|
||||||
self.convert_emotions(results)
|
|
||||||
logger.debug("Returning analysis result: {}".format(results))
|
|
||||||
results.analysis = [i['plugin'].id for i in results.analysis]
|
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
def convert_emotions(self, resp):
|
||||||
|
"""
|
||||||
|
Conversion of all emotions in a response **in place**.
|
||||||
|
In addition to converting from one model to another, it has
|
||||||
|
to include the conversion plugin to the analysis list.
|
||||||
|
Needless to say, this is far from an elegant solution, but it works.
|
||||||
|
@todo refactor and clean up
|
||||||
|
"""
|
||||||
|
plugins = resp.analysis
|
||||||
|
|
||||||
|
if 'parameters' not in resp:
|
||||||
|
return resp
|
||||||
|
|
||||||
|
params = resp['parameters']
|
||||||
|
toModel = params.get('emotionModel', None)
|
||||||
|
if not toModel:
|
||||||
|
return resp
|
||||||
|
|
||||||
|
logger.debug('Asked for model: {}'.format(toModel))
|
||||||
|
output = params.get('conversion', None)
|
||||||
|
candidates = {}
|
||||||
|
for plugin in plugins:
|
||||||
|
try:
|
||||||
|
fromModel = plugin.get('onyx:usesEmotionModel', None)
|
||||||
|
candidates[plugin.id] = next(self._conversion_candidates(fromModel, toModel))
|
||||||
|
logger.debug('Analysis plugin {} uses model: {}'.format(
|
||||||
|
plugin.id, fromModel))
|
||||||
|
except StopIteration:
|
||||||
|
e = Error(('No conversion plugin found for: '
|
||||||
|
'{} -> {}'.format(fromModel, toModel)),
|
||||||
|
status=404)
|
||||||
|
e.original_response = resp
|
||||||
|
e.parameters = params
|
||||||
|
raise e
|
||||||
|
newentries = []
|
||||||
|
done = []
|
||||||
|
for i in resp.entries:
|
||||||
|
if output == "full":
|
||||||
|
newemotions = copy.deepcopy(i.emotions)
|
||||||
|
else:
|
||||||
|
newemotions = []
|
||||||
|
for j in i.emotions:
|
||||||
|
plugname = j['prov:wasGeneratedBy']
|
||||||
|
candidate = candidates[plugname]
|
||||||
|
done.append({'plugin': candidate, 'parameters': params})
|
||||||
|
for k in candidate.convert(j, fromModel, toModel, params):
|
||||||
|
k.prov__wasGeneratedBy = candidate.id
|
||||||
|
if output == 'nested':
|
||||||
|
k.prov__wasDerivedFrom = j
|
||||||
|
newemotions.append(k)
|
||||||
|
i.emotions = newemotions
|
||||||
|
newentries.append(i)
|
||||||
|
resp.entries = newentries
|
||||||
|
return resp
|
||||||
|
|
||||||
|
def _conversion_candidates(self, fromModel, toModel):
|
||||||
|
candidates = self.plugins(plugin_type=plugins.EmotionConversion)
|
||||||
|
for candidate in candidates:
|
||||||
|
for pair in candidate.onyx__doesConversion:
|
||||||
|
logging.debug(pair)
|
||||||
|
if candidate.can_convert(fromModel, toModel):
|
||||||
|
yield candidate
|
||||||
|
|
||||||
|
def postprocess(self, response):
|
||||||
|
'''
|
||||||
|
Transform the results from the analysis plugins.
|
||||||
|
It has some pre-defined post-processing like emotion conversion,
|
||||||
|
and it also allows plugins to auto-select themselves.
|
||||||
|
'''
|
||||||
|
|
||||||
|
response = self.convert_emotions(response)
|
||||||
|
|
||||||
|
for plug in self.plugins(plugin_type=plugins.PostProcessing):
|
||||||
|
if plug.check(response, response.analysis):
|
||||||
|
response = plug.process(response)
|
||||||
|
return response
|
||||||
|
|
||||||
def _get_datasets(self, request):
|
def _get_datasets(self, request):
|
||||||
if not self.datasets:
|
if not self.datasets:
|
||||||
raise Error(
|
raise Error(
|
||||||
@@ -192,8 +277,8 @@ class Senpy(object):
|
|||||||
for dataset in datasets_name:
|
for dataset in datasets_name:
|
||||||
if dataset not in self.datasets:
|
if dataset not in self.datasets:
|
||||||
logger.debug(("The dataset '{}' is not valid\n"
|
logger.debug(("The dataset '{}' is not valid\n"
|
||||||
"Valid datasets: {}").format(dataset,
|
"Valid datasets: {}").format(
|
||||||
self.datasets.keys()))
|
dataset, self.datasets.keys()))
|
||||||
raise Error(
|
raise Error(
|
||||||
status=404,
|
status=404,
|
||||||
message="The dataset '{}' is not valid".format(dataset))
|
message="The dataset '{}' is not valid".format(dataset))
|
||||||
@@ -219,78 +304,22 @@ class Senpy(object):
|
|||||||
results = AggregatedEvaluation()
|
results = AggregatedEvaluation()
|
||||||
results.parameters = params
|
results.parameters = params
|
||||||
datasets = self._get_datasets(results)
|
datasets = self._get_datasets(results)
|
||||||
plugins = self._get_plugins(results)
|
plugins = []
|
||||||
for eval in evaluate(plugins, datasets):
|
for plugname in params.algorithm:
|
||||||
|
plugins = self.get_plugin(plugname)
|
||||||
|
|
||||||
|
for eval in plugins.evaluate(plugins, datasets):
|
||||||
results.evaluations.append(eval)
|
results.evaluations.append(eval)
|
||||||
if 'with_parameters' not in results.parameters:
|
if 'with_parameters' not in results.parameters:
|
||||||
del results.parameters
|
del results.parameters
|
||||||
logger.debug("Returning evaluation result: {}".format(results))
|
logger.debug("Returning evaluation result: {}".format(results))
|
||||||
return results
|
return results
|
||||||
|
|
||||||
def _conversion_candidates(self, fromModel, toModel):
|
|
||||||
candidates = self.plugins(plugin_type='emotionConversionPlugin')
|
|
||||||
for candidate in candidates:
|
|
||||||
for pair in candidate.onyx__doesConversion:
|
|
||||||
logging.debug(pair)
|
|
||||||
|
|
||||||
if pair['onyx:conversionFrom'] == fromModel \
|
|
||||||
and pair['onyx:conversionTo'] == toModel:
|
|
||||||
yield candidate
|
|
||||||
|
|
||||||
def convert_emotions(self, resp):
|
|
||||||
"""
|
|
||||||
Conversion of all emotions in a response **in place**.
|
|
||||||
In addition to converting from one model to another, it has
|
|
||||||
to include the conversion plugin to the analysis list.
|
|
||||||
Needless to say, this is far from an elegant solution, but it works.
|
|
||||||
@todo refactor and clean up
|
|
||||||
"""
|
|
||||||
plugins = [i['plugin'] for i in resp.analysis]
|
|
||||||
params = resp.parameters
|
|
||||||
toModel = params.get('emotionModel', None)
|
|
||||||
if not toModel:
|
|
||||||
return
|
|
||||||
|
|
||||||
logger.debug('Asked for model: {}'.format(toModel))
|
|
||||||
output = params.get('conversion', None)
|
|
||||||
candidates = {}
|
|
||||||
for plugin in plugins:
|
|
||||||
try:
|
|
||||||
fromModel = plugin.get('onyx:usesEmotionModel', None)
|
|
||||||
candidates[plugin.id] = next(self._conversion_candidates(fromModel, toModel))
|
|
||||||
logger.debug('Analysis plugin {} uses model: {}'.format(plugin.id, fromModel))
|
|
||||||
except StopIteration:
|
|
||||||
e = Error(('No conversion plugin found for: '
|
|
||||||
'{} -> {}'.format(fromModel, toModel)),
|
|
||||||
status=404)
|
|
||||||
e.original_response = resp
|
|
||||||
e.parameters = params
|
|
||||||
raise e
|
|
||||||
newentries = []
|
|
||||||
for i in resp.entries:
|
|
||||||
if output == "full":
|
|
||||||
newemotions = copy.deepcopy(i.emotions)
|
|
||||||
else:
|
|
||||||
newemotions = []
|
|
||||||
for j in i.emotions:
|
|
||||||
plugname = j['prov:wasGeneratedBy']
|
|
||||||
candidate = candidates[plugname]
|
|
||||||
resp.analysis.append({'plugin': candidate,
|
|
||||||
'parameters': params})
|
|
||||||
for k in candidate.convert(j, fromModel, toModel, params):
|
|
||||||
k.prov__wasGeneratedBy = candidate.id
|
|
||||||
if output == 'nested':
|
|
||||||
k.prov__wasDerivedFrom = j
|
|
||||||
newemotions.append(k)
|
|
||||||
i.emotions = newemotions
|
|
||||||
newentries.append(i)
|
|
||||||
resp.entries = newentries
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def default_plugin(self):
|
def default_plugin(self):
|
||||||
if not self._default or not self._default.is_activated:
|
if not self._default or not self._default.is_activated:
|
||||||
candidates = self.plugins(plugin_type='analysisPlugin',
|
candidates = self.plugins(
|
||||||
is_activated=True)
|
plugin_type='analysisPlugin', is_activated=True)
|
||||||
if len(candidates) > 0:
|
if len(candidates) > 0:
|
||||||
self._default = candidates[0]
|
self._default = candidates[0]
|
||||||
else:
|
else:
|
||||||
@@ -300,7 +329,7 @@ class Senpy(object):
|
|||||||
|
|
||||||
@default_plugin.setter
|
@default_plugin.setter
|
||||||
def default_plugin(self, value):
|
def default_plugin(self, value):
|
||||||
if isinstance(value, Plugin):
|
if isinstance(value, plugins.Plugin):
|
||||||
if not value.is_activated:
|
if not value.is_activated:
|
||||||
raise AttributeError('The default plugin has to be activated.')
|
raise AttributeError('The default plugin has to be activated.')
|
||||||
self._default = value
|
self._default = value
|
||||||
@@ -352,7 +381,8 @@ class Senpy(object):
|
|||||||
|
|
||||||
logger.info("Activating plugin: {}".format(plugin.name))
|
logger.info("Activating plugin: {}".format(plugin.name))
|
||||||
|
|
||||||
if sync or 'async' in plugin and not plugin.async:
|
if sync or not getattr(plugin, 'async', True) or getattr(
|
||||||
|
plugin, 'sync', False):
|
||||||
return self._activate(plugin)
|
return self._activate(plugin)
|
||||||
else:
|
else:
|
||||||
th = Thread(target=partial(self._activate, plugin))
|
th = Thread(target=partial(self._activate, plugin))
|
||||||
@@ -375,7 +405,8 @@ class Senpy(object):
|
|||||||
|
|
||||||
self._set_active(plugin, False)
|
self._set_active(plugin, False)
|
||||||
|
|
||||||
if sync or 'async' in plugin and not plugin.async:
|
if sync or not getattr(plugin, 'async', True) or not getattr(
|
||||||
|
plugin, 'sync', False):
|
||||||
self._deactivate(plugin)
|
self._deactivate(plugin)
|
||||||
else:
|
else:
|
||||||
th = Thread(target=partial(self._deactivate, plugin))
|
th = Thread(target=partial(self._deactivate, plugin))
|
||||||
|
@@ -1,5 +1,7 @@
|
|||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
from pkg_resources import parse_version, get_distribution, DistributionNotFound
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
MSG = 'GSITK is not (properly) installed.'
|
MSG = 'GSITK is not (properly) installed.'
|
||||||
@@ -12,12 +14,18 @@ def raise_exception(*args, **kwargs):
|
|||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
gsitk_distro = get_distribution("gsitk")
|
||||||
|
GSITK_VERSION = parse_version(gsitk_distro.version)
|
||||||
|
GSITK_AVAILABLE = GSITK_VERSION > parse_version("0.1.9.1") # Earlier versions have a bug
|
||||||
|
except DistributionNotFound:
|
||||||
|
GSITK_AVAILABLE = False
|
||||||
|
GSITK_VERSION = ()
|
||||||
|
|
||||||
|
if GSITK_AVAILABLE:
|
||||||
from gsitk.datasets.datasets import DatasetManager
|
from gsitk.datasets.datasets import DatasetManager
|
||||||
from gsitk.evaluation.evaluation import Evaluation as Eval
|
from gsitk.evaluation.evaluation import Evaluation as Eval
|
||||||
from sklearn.pipeline import Pipeline
|
from sklearn.pipeline import Pipeline
|
||||||
GSITK_AVAILABLE = True
|
|
||||||
modules = locals()
|
modules = locals()
|
||||||
except ImportError:
|
else:
|
||||||
logger.warn(IMPORTMSG)
|
logger.warning(IMPORTMSG)
|
||||||
GSITK_AVAILABLE = False
|
|
||||||
DatasetManager = Eval = Pipeline = raise_exception
|
DatasetManager = Eval = Pipeline = raise_exception
|
||||||
|
@@ -85,6 +85,7 @@ class BaseMeta(ABCMeta):
|
|||||||
schema = json.load(f)
|
schema = json.load(f)
|
||||||
|
|
||||||
resolver = jsonschema.RefResolver(schema_path, schema)
|
resolver = jsonschema.RefResolver(schema_path, schema)
|
||||||
|
if '@type' not in attrs:
|
||||||
attrs['@type'] = "".join((name[0].lower(), name[1:]))
|
attrs['@type'] = "".join((name[0].lower(), name[1:]))
|
||||||
attrs['_schema_file'] = schema_file
|
attrs['_schema_file'] = schema_file
|
||||||
attrs['schema'] = schema
|
attrs['schema'] = schema
|
||||||
@@ -244,10 +245,10 @@ class CustomDict(MutableMapping, object):
|
|||||||
return key[0] == '_'
|
return key[0] == '_'
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return str(self.serializable())
|
return json.dumps(self.serializable(), sort_keys=True, indent=4)
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return str(self.serializable())
|
return json.dumps(self.serializable(), sort_keys=True, indent=4)
|
||||||
|
|
||||||
|
|
||||||
_Alias = namedtuple('Alias', 'indict')
|
_Alias = namedtuple('Alias', 'indict')
|
||||||
|
@@ -121,11 +121,11 @@ class BaseModel(with_metaclass(BaseMeta, CustomDict)):
|
|||||||
|
|
||||||
'''
|
'''
|
||||||
|
|
||||||
schema_file = DEFINITIONS_FILE
|
# schema_file = DEFINITIONS_FILE
|
||||||
_context = base_context["@context"]
|
_context = base_context["@context"]
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
auto_id = kwargs.pop('_auto_id', True)
|
auto_id = kwargs.pop('_auto_id', False)
|
||||||
|
|
||||||
super(BaseModel, self).__init__(*args, **kwargs)
|
super(BaseModel, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
@@ -133,7 +133,7 @@ class BaseModel(with_metaclass(BaseMeta, CustomDict)):
|
|||||||
self.id
|
self.id
|
||||||
|
|
||||||
if '@type' not in self:
|
if '@type' not in self:
|
||||||
logger.warn('Created an instance of an unknown model')
|
logger.warning('Created an instance of an unknown model')
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def id(self):
|
def id(self):
|
||||||
@@ -325,7 +325,6 @@ def _add_class_from_schema(*args, **kwargs):
|
|||||||
|
|
||||||
for i in [
|
for i in [
|
||||||
'aggregatedEvaluation',
|
'aggregatedEvaluation',
|
||||||
'analysis',
|
|
||||||
'dataset',
|
'dataset',
|
||||||
'datasets',
|
'datasets',
|
||||||
'emotion',
|
'emotion',
|
||||||
@@ -339,7 +338,7 @@ for i in [
|
|||||||
'entity',
|
'entity',
|
||||||
'help',
|
'help',
|
||||||
'metric',
|
'metric',
|
||||||
'plugin',
|
'parameter',
|
||||||
'plugins',
|
'plugins',
|
||||||
'response',
|
'response',
|
||||||
'results',
|
'results',
|
||||||
@@ -349,3 +348,55 @@ for i in [
|
|||||||
|
|
||||||
]:
|
]:
|
||||||
_add_class_from_schema(i)
|
_add_class_from_schema(i)
|
||||||
|
|
||||||
|
|
||||||
|
class Analysis(BaseModel):
|
||||||
|
schema = 'analysis'
|
||||||
|
|
||||||
|
parameters = alias('prov:used')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def params(self):
|
||||||
|
outdict = {}
|
||||||
|
outdict['algorithm'] = self.algorithm
|
||||||
|
for param in self.parameters:
|
||||||
|
outdict[param['name']] = param['value']
|
||||||
|
return outdict
|
||||||
|
|
||||||
|
@params.setter
|
||||||
|
def params(self, value):
|
||||||
|
for k, v in value.items():
|
||||||
|
for param in self.parameters:
|
||||||
|
if param.name == k:
|
||||||
|
param.value = v
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
self.parameters.append(Parameter(name=k, value=v))
|
||||||
|
|
||||||
|
@property
|
||||||
|
def algorithm(self):
|
||||||
|
return self['prov:wasAssociatedWith']
|
||||||
|
|
||||||
|
@property
|
||||||
|
def plugin(self):
|
||||||
|
return self._plugin
|
||||||
|
|
||||||
|
@plugin.setter
|
||||||
|
def plugin(self, value):
|
||||||
|
self._plugin = value
|
||||||
|
self['prov:wasAssociatedWith'] = value.id
|
||||||
|
|
||||||
|
def run(self, request):
|
||||||
|
return self.plugin.process(request, self.params)
|
||||||
|
|
||||||
|
|
||||||
|
class Plugin(BaseModel):
|
||||||
|
schema = 'plugin'
|
||||||
|
|
||||||
|
def activity(self, parameters):
|
||||||
|
'''Generate a prov:Activity from this plugin and the '''
|
||||||
|
a = Analysis()
|
||||||
|
a.plugin = self
|
||||||
|
a.params = parameters
|
||||||
|
return a
|
||||||
|
|
||||||
|
@@ -1,7 +1,6 @@
|
|||||||
from future import standard_library
|
from future import standard_library
|
||||||
standard_library.install_aliases()
|
standard_library.install_aliases()
|
||||||
|
|
||||||
|
|
||||||
from future.utils import with_metaclass
|
from future.utils import with_metaclass
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
|
||||||
@@ -10,7 +9,6 @@ import os
|
|||||||
import re
|
import re
|
||||||
import pickle
|
import pickle
|
||||||
import logging
|
import logging
|
||||||
import copy
|
|
||||||
import pprint
|
import pprint
|
||||||
|
|
||||||
import inspect
|
import inspect
|
||||||
@@ -19,12 +17,12 @@ import subprocess
|
|||||||
import importlib
|
import importlib
|
||||||
import yaml
|
import yaml
|
||||||
import threading
|
import threading
|
||||||
import nltk
|
from nltk import download
|
||||||
|
|
||||||
from .. import models, utils
|
from .. import models, utils
|
||||||
from .. import api
|
from .. import api
|
||||||
from .. import gsitk_compat
|
from .. import gsitk_compat
|
||||||
|
from .. import testing
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -45,16 +43,19 @@ class PluginMeta(models.BaseMeta):
|
|||||||
if doc:
|
if doc:
|
||||||
attrs['description'] = doc
|
attrs['description'] = doc
|
||||||
else:
|
else:
|
||||||
logger.warn(('Plugin {} does not have a description. '
|
logger.warning(
|
||||||
'Please, add a short summary to help other developers').format(name))
|
('Plugin {} does not have a description. '
|
||||||
|
'Please, add a short summary to help other developers'
|
||||||
|
).format(name))
|
||||||
cls = super(PluginMeta, mcs).__new__(mcs, name, bases, attrs)
|
cls = super(PluginMeta, mcs).__new__(mcs, name, bases, attrs)
|
||||||
|
|
||||||
if alias in mcs._classes:
|
if alias in mcs._classes:
|
||||||
if os.environ.get('SENPY_TESTING', ""):
|
if os.environ.get('SENPY_TESTING', ""):
|
||||||
raise Exception(('The type of plugin {} already exists. '
|
raise Exception(
|
||||||
|
('The type of plugin {} already exists. '
|
||||||
'Please, choose a different name').format(name))
|
'Please, choose a different name').format(name))
|
||||||
else:
|
else:
|
||||||
logger.warn('Overloading plugin class: {}'.format(alias))
|
logger.warning('Overloading plugin class: {}'.format(alias))
|
||||||
mcs._classes[alias] = cls
|
mcs._classes[alias] = cls
|
||||||
return cls
|
return cls
|
||||||
|
|
||||||
@@ -86,10 +87,12 @@ class Plugin(with_metaclass(PluginMeta, models.Plugin)):
|
|||||||
if info:
|
if info:
|
||||||
self.update(info)
|
self.update(info)
|
||||||
self.validate()
|
self.validate()
|
||||||
self.id = 'endpoint:plugins/{}_{}'.format(self['name'], self['version'])
|
self.id = 'endpoint:plugins/{}_{}'.format(self['name'],
|
||||||
|
self['version'])
|
||||||
self.is_activated = False
|
self.is_activated = False
|
||||||
self._lock = threading.Lock()
|
self._lock = threading.Lock()
|
||||||
self._directory = os.path.abspath(os.path.dirname(inspect.getfile(self.__class__)))
|
self._directory = os.path.abspath(
|
||||||
|
os.path.dirname(inspect.getfile(self.__class__)))
|
||||||
|
|
||||||
data_folder = data_folder or os.getcwd()
|
data_folder = data_folder or os.getcwd()
|
||||||
subdir = os.path.join(data_folder, self.name)
|
subdir = os.path.join(data_folder, self.name)
|
||||||
@@ -117,7 +120,8 @@ class Plugin(with_metaclass(PluginMeta, models.Plugin)):
|
|||||||
if x not in self:
|
if x not in self:
|
||||||
missing.append(x)
|
missing.append(x)
|
||||||
if missing:
|
if missing:
|
||||||
raise models.Error('Missing configuration parameters: {}'.format(missing))
|
raise models.Error(
|
||||||
|
'Missing configuration parameters: {}'.format(missing))
|
||||||
|
|
||||||
def get_folder(self):
|
def get_folder(self):
|
||||||
return os.path.dirname(inspect.getfile(self.__class__))
|
return os.path.dirname(inspect.getfile(self.__class__))
|
||||||
@@ -128,35 +132,86 @@ class Plugin(with_metaclass(PluginMeta, models.Plugin)):
|
|||||||
def deactivate(self):
|
def deactivate(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def process(self, request, parameters, **kwargs):
|
||||||
|
"""
|
||||||
|
An implemented plugin should override this method.
|
||||||
|
Here, we assume that a process_entries method exists."""
|
||||||
|
newentries = list(
|
||||||
|
self.process_entries(request.entries, parameters))
|
||||||
|
request.entries = newentries
|
||||||
|
return request
|
||||||
|
|
||||||
|
def process_entries(self, entries, parameters):
|
||||||
|
for entry in entries:
|
||||||
|
self.log.debug('Processing entry with plugin {}: {}'.format(
|
||||||
|
self, entry))
|
||||||
|
results = self.process_entry(entry, parameters)
|
||||||
|
if inspect.isgenerator(results):
|
||||||
|
for result in results:
|
||||||
|
yield result
|
||||||
|
else:
|
||||||
|
yield results
|
||||||
|
|
||||||
|
def process_entry(self, entry, parameters):
|
||||||
|
"""
|
||||||
|
This base method is here to adapt plugins which only
|
||||||
|
implement the *process* function.
|
||||||
|
Note that this method may yield an annotated entry or a list of
|
||||||
|
entries (e.g. in a tokenizer)
|
||||||
|
"""
|
||||||
|
raise NotImplementedError(
|
||||||
|
'You need to implement process, process_entries or process_entry in your plugin'
|
||||||
|
)
|
||||||
|
|
||||||
def test(self, test_cases=None):
|
def test(self, test_cases=None):
|
||||||
if not test_cases:
|
if not test_cases:
|
||||||
if not hasattr(self, 'test_cases'):
|
if not hasattr(self, 'test_cases'):
|
||||||
raise AttributeError(('Plugin {} [{}] does not have any defined '
|
raise AttributeError(
|
||||||
|
('Plugin {} [{}] does not have any defined '
|
||||||
'test cases').format(self.id,
|
'test cases').format(self.id,
|
||||||
inspect.getfile(self.__class__)))
|
inspect.getfile(self.__class__)))
|
||||||
test_cases = self.test_cases
|
test_cases = self.test_cases
|
||||||
for case in test_cases:
|
for case in test_cases:
|
||||||
try:
|
try:
|
||||||
self.test_case(case)
|
self.test_case(case)
|
||||||
self.log.debug('Test case passed:\n{}'.format(pprint.pformat(case)))
|
self.log.debug('Test case passed:\n{}'.format(
|
||||||
|
pprint.pformat(case)))
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
self.log.warn('Test case failed:\n{}'.format(pprint.pformat(case)))
|
self.log.warning('Test case failed:\n{}'.format(
|
||||||
|
pprint.pformat(case)))
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def test_case(self, case):
|
def test_case(self, case, mock=testing.MOCK_REQUESTS):
|
||||||
|
if 'entry' not in case and 'input' in case:
|
||||||
|
entry = models.Entry(_auto_id=False)
|
||||||
|
entry.nif__isString = case['input']
|
||||||
|
case['entry'] = entry
|
||||||
entry = models.Entry(case['entry'])
|
entry = models.Entry(case['entry'])
|
||||||
given_parameters = case.get('params', case.get('parameters', {}))
|
given_parameters = case.get('params', case.get('parameters', {}))
|
||||||
expected = case.get('expected', None)
|
expected = case.get('expected', None)
|
||||||
should_fail = case.get('should_fail', False)
|
should_fail = case.get('should_fail', False)
|
||||||
|
responses = case.get('responses', [])
|
||||||
|
|
||||||
try:
|
try:
|
||||||
params = api.parse_params(given_parameters, self.extra_params)
|
request = models.Response()
|
||||||
res = list(self.analyse_entries([entry, ], params))
|
parameters = api.parse_params(given_parameters,
|
||||||
|
self.extra_params)
|
||||||
|
request.entries = [
|
||||||
|
entry,
|
||||||
|
]
|
||||||
|
|
||||||
|
method = partial(self.process, request, parameters)
|
||||||
|
|
||||||
|
if mock:
|
||||||
|
res = method()
|
||||||
|
else:
|
||||||
|
with testing.patch_all_requests(responses):
|
||||||
|
res = method()
|
||||||
|
|
||||||
if not isinstance(expected, list):
|
if not isinstance(expected, list):
|
||||||
expected = [expected]
|
expected = [expected]
|
||||||
utils.check_template(res, expected)
|
utils.check_template(res.entries, expected)
|
||||||
for r in res:
|
res.validate()
|
||||||
r.validate()
|
|
||||||
except models.Error:
|
except models.Error:
|
||||||
if should_fail:
|
if should_fail:
|
||||||
return
|
return
|
||||||
@@ -193,40 +248,26 @@ class Analysis(Plugin):
|
|||||||
A subclass of Plugin that analyses text and provides an annotation.
|
A subclass of Plugin that analyses text and provides an annotation.
|
||||||
'''
|
'''
|
||||||
|
|
||||||
def analyse(self, *args, **kwargs):
|
def analyse(self, request, parameters):
|
||||||
raise NotImplementedError(
|
return super(Analysis, self).process(request, parameters)
|
||||||
'Your plugin should implement either analyse or analyse_entry')
|
|
||||||
|
|
||||||
def analyse_entry(self, entry, parameters):
|
|
||||||
""" An implemented plugin should override this method.
|
|
||||||
This base method is here to adapt old style plugins which only
|
|
||||||
implement the *analyse* function.
|
|
||||||
Note that this method may yield an annotated entry or a list of
|
|
||||||
entries (e.g. in a tokenizer)
|
|
||||||
"""
|
|
||||||
text = entry['nif:isString']
|
|
||||||
params = copy.copy(parameters)
|
|
||||||
params['input'] = text
|
|
||||||
results = self.analyse(**params)
|
|
||||||
for i in results.entries:
|
|
||||||
yield i
|
|
||||||
|
|
||||||
def analyse_entries(self, entries, parameters):
|
def analyse_entries(self, entries, parameters):
|
||||||
for entry in entries:
|
for i in super(Analysis, self).process_entries(entries, parameters):
|
||||||
self.log.debug('Analysing entry with plugin {}: {}'.format(self, entry))
|
yield i
|
||||||
results = self.analyse_entry(entry, parameters)
|
|
||||||
if inspect.isgenerator(results):
|
|
||||||
for result in results:
|
|
||||||
yield result
|
|
||||||
else:
|
|
||||||
yield results
|
|
||||||
|
|
||||||
def test_case(self, case):
|
def process(self, request, parameters, **kwargs):
|
||||||
if 'entry' not in case and 'input' in case:
|
return self.analyse(request, parameters)
|
||||||
entry = models.Entry(_auto_id=False)
|
|
||||||
entry.nif__isString = case['input']
|
def process_entries(self, entries, parameters):
|
||||||
case['entry'] = entry
|
for i in self.analyse_entries(entries, parameters):
|
||||||
super(Analysis, self).test_case(case)
|
yield i
|
||||||
|
|
||||||
|
def process_entry(self, entry, parameters, **kwargs):
|
||||||
|
if hasattr(self, 'analyse_entry'):
|
||||||
|
for i in self.analyse_entry(entry, parameters):
|
||||||
|
yield i
|
||||||
|
else:
|
||||||
|
super(Analysis, self).process_entry(entry, parameters, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
AnalysisPlugin = Analysis
|
AnalysisPlugin = Analysis
|
||||||
@@ -237,7 +278,20 @@ class Conversion(Plugin):
|
|||||||
A subclass of Plugins that convert between different annotation models.
|
A subclass of Plugins that convert between different annotation models.
|
||||||
e.g. a conversion of emotion models, or normalization of sentiment values.
|
e.g. a conversion of emotion models, or normalization of sentiment values.
|
||||||
'''
|
'''
|
||||||
pass
|
|
||||||
|
def process(self, response, parameters, plugins=None, **kwargs):
|
||||||
|
plugins = plugins or []
|
||||||
|
newentries = []
|
||||||
|
for entry in response.entries:
|
||||||
|
newentries.append(
|
||||||
|
self.convert_entry(entry, parameters, plugins))
|
||||||
|
response.entries = newentries
|
||||||
|
return response
|
||||||
|
|
||||||
|
def convert_entry(self, entry, parameters, conversions_applied):
|
||||||
|
raise NotImplementedError(
|
||||||
|
'You should implement a way to convert each entry, or a custom process method'
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
ConversionPlugin = Conversion
|
ConversionPlugin = Conversion
|
||||||
@@ -274,12 +328,28 @@ class EmotionConversion(Conversion):
|
|||||||
'''
|
'''
|
||||||
A subclass of Conversion that converts emotion annotations using different models
|
A subclass of Conversion that converts emotion annotations using different models
|
||||||
'''
|
'''
|
||||||
pass
|
|
||||||
|
def can_convert(self, fromModel, toModel):
|
||||||
|
'''
|
||||||
|
Whether this plugin can convert from fromModel to toModel.
|
||||||
|
If fromModel is None, it is interpreted as "any Model"
|
||||||
|
'''
|
||||||
|
for pair in self.onyx__doesConversion:
|
||||||
|
if (pair['onyx:conversionTo'] == toModel) and \
|
||||||
|
((fromModel is None) or (pair['onyx:conversionFrom'] == fromModel)):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
EmotionConversionPlugin = EmotionConversion
|
EmotionConversionPlugin = EmotionConversion
|
||||||
|
|
||||||
|
|
||||||
|
class PostProcessing(Plugin):
|
||||||
|
def check(self, request, plugins):
|
||||||
|
'''Should this plugin be run for this request?'''
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
class Box(AnalysisPlugin):
|
class Box(AnalysisPlugin):
|
||||||
'''
|
'''
|
||||||
Black box plugins delegate analysis to a function.
|
Black box plugins delegate analysis to a function.
|
||||||
@@ -304,9 +374,10 @@ class Box(AnalysisPlugin):
|
|||||||
return output
|
return output
|
||||||
|
|
||||||
def predict_one(self, input):
|
def predict_one(self, input):
|
||||||
raise NotImplementedError('You should define the behavior of this plugin')
|
raise NotImplementedError(
|
||||||
|
'You should define the behavior of this plugin')
|
||||||
|
|
||||||
def analyse_entries(self, entries, params):
|
def process_entries(self, entries, params):
|
||||||
for entry in entries:
|
for entry in entries:
|
||||||
input = self.input(entry=entry, params=params)
|
input = self.input(entry=entry, params=params)
|
||||||
results = self.predict_one(input=input)
|
results = self.predict_one(input=input)
|
||||||
@@ -375,7 +446,6 @@ class EmotionBox(TextBox, EmotionPlugin):
|
|||||||
|
|
||||||
|
|
||||||
class MappingMixin(object):
|
class MappingMixin(object):
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def mappings(self):
|
def mappings(self):
|
||||||
return self._mappings
|
return self._mappings
|
||||||
@@ -385,11 +455,10 @@ class MappingMixin(object):
|
|||||||
self._mappings = value
|
self._mappings = value
|
||||||
|
|
||||||
def output(self, output, entry, params):
|
def output(self, output, entry, params):
|
||||||
output = self.mappings.get(output,
|
output = self.mappings.get(output, self.mappings.get(
|
||||||
self.mappings.get('default', output))
|
'default', output))
|
||||||
return super(MappingMixin, self).output(output=output,
|
return super(MappingMixin, self).output(
|
||||||
entry=entry,
|
output=output, entry=entry, params=params)
|
||||||
params=params)
|
|
||||||
|
|
||||||
|
|
||||||
class ShelfMixin(object):
|
class ShelfMixin(object):
|
||||||
@@ -402,7 +471,8 @@ class ShelfMixin(object):
|
|||||||
with self.open(self.shelf_file, 'rb') as p:
|
with self.open(self.shelf_file, 'rb') as p:
|
||||||
self._sh = pickle.load(p)
|
self._sh = pickle.load(p)
|
||||||
except (IndexError, EOFError, pickle.UnpicklingError):
|
except (IndexError, EOFError, pickle.UnpicklingError):
|
||||||
self.log.warning('Corrupted shelf file: {}'.format(self.shelf_file))
|
self.log.warning('Corrupted shelf file: {}'.format(
|
||||||
|
self.shelf_file))
|
||||||
if not self.get('force_shelf', False):
|
if not self.get('force_shelf', False):
|
||||||
raise
|
raise
|
||||||
return self._sh
|
return self._sh
|
||||||
@@ -450,8 +520,7 @@ def pfilter(plugins, plugin_type=Analysis, **kwargs):
|
|||||||
plugin_type = plugin_type[0].upper() + plugin_type[1:]
|
plugin_type = plugin_type[0].upper() + plugin_type[1:]
|
||||||
pclass = globals()[plugin_type]
|
pclass = globals()[plugin_type]
|
||||||
logger.debug('Class: {}'.format(pclass))
|
logger.debug('Class: {}'.format(pclass))
|
||||||
candidates = filter(lambda x: isinstance(x, pclass),
|
candidates = filter(lambda x: isinstance(x, pclass), plugins)
|
||||||
plugins)
|
|
||||||
except KeyError:
|
except KeyError:
|
||||||
raise models.Error('{} is not a valid type'.format(plugin_type))
|
raise models.Error('{} is not a valid type'.format(plugin_type))
|
||||||
else:
|
else:
|
||||||
@@ -461,8 +530,7 @@ def pfilter(plugins, plugin_type=Analysis, **kwargs):
|
|||||||
|
|
||||||
def matches(plug):
|
def matches(plug):
|
||||||
res = all(getattr(plug, k, None) == v for (k, v) in kwargs.items())
|
res = all(getattr(plug, k, None) == v for (k, v) in kwargs.items())
|
||||||
logger.debug(
|
logger.debug("matching {} with {}: {}".format(plug.name, kwargs, res))
|
||||||
"matching {} with {}: {}".format(plug.name, kwargs, res))
|
|
||||||
return res
|
return res
|
||||||
|
|
||||||
if kwargs:
|
if kwargs:
|
||||||
@@ -496,17 +564,17 @@ def install_deps(*plugins):
|
|||||||
for req in requirements:
|
for req in requirements:
|
||||||
pip_args.append(req)
|
pip_args.append(req)
|
||||||
logger.info('Installing requirements: ' + str(requirements))
|
logger.info('Installing requirements: ' + str(requirements))
|
||||||
process = subprocess.Popen(pip_args,
|
process = subprocess.Popen(
|
||||||
stdout=subprocess.PIPE,
|
pip_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
stderr=subprocess.PIPE)
|
|
||||||
_log_subprocess_output(process)
|
_log_subprocess_output(process)
|
||||||
exitcode = process.wait()
|
exitcode = process.wait()
|
||||||
installed = True
|
installed = True
|
||||||
if exitcode != 0:
|
if exitcode != 0:
|
||||||
raise models.Error("Dependencies not properly installed: {}".format(pip_args))
|
raise models.Error(
|
||||||
|
"Dependencies not properly installed: {}".format(pip_args))
|
||||||
nltk_resources |= set(info.get('nltk_resources', []))
|
nltk_resources |= set(info.get('nltk_resources', []))
|
||||||
|
|
||||||
installed |= nltk.download(list(nltk_resources))
|
installed |= download(list(nltk_resources))
|
||||||
return installed
|
return installed
|
||||||
|
|
||||||
|
|
||||||
@@ -583,7 +651,8 @@ def one_from_module(module, root, info, **kwargs):
|
|||||||
if '@type' in info:
|
if '@type' in info:
|
||||||
cls = PluginMeta.from_type(info['@type'])
|
cls = PluginMeta.from_type(info['@type'])
|
||||||
return cls(info=info, **kwargs)
|
return cls(info=info, **kwargs)
|
||||||
instance = next(from_module(module=module, root=root, info=info, **kwargs), None)
|
instance = next(
|
||||||
|
from_module(module=module, root=root, info=info, **kwargs), None)
|
||||||
if not instance:
|
if not instance:
|
||||||
raise Exception("No valid plugin for: {}".format(module))
|
raise Exception("No valid plugin for: {}".format(module))
|
||||||
return instance
|
return instance
|
||||||
@@ -607,7 +676,8 @@ def _instances_in_module(module):
|
|||||||
|
|
||||||
def _from_module_name(module, root, info=None, **kwargs):
|
def _from_module_name(module, root, info=None, **kwargs):
|
||||||
module = load_module(module, root)
|
module = load_module(module, root)
|
||||||
for plugin in _from_loaded_module(module=module, root=root, info=info, **kwargs):
|
for plugin in _from_loaded_module(
|
||||||
|
module=module, root=root, info=info, **kwargs):
|
||||||
yield plugin
|
yield plugin
|
||||||
|
|
||||||
|
|
||||||
@@ -619,7 +689,8 @@ def _from_loaded_module(module, info=None, **kwargs):
|
|||||||
|
|
||||||
|
|
||||||
def evaluate(plugins, datasets, **kwargs):
|
def evaluate(plugins, datasets, **kwargs):
|
||||||
ev = gsitk_compat.Eval(tuples=None,
|
ev = gsitk_compat.Eval(
|
||||||
|
tuples=None,
|
||||||
datasets=datasets,
|
datasets=datasets,
|
||||||
pipelines=[plugin.as_pipe() for plugin in plugins])
|
pipelines=[plugin.as_pipe() for plugin in plugins])
|
||||||
ev.evaluate()
|
ev.evaluate()
|
||||||
|
@@ -9,7 +9,7 @@ class Split(AnalysisPlugin):
|
|||||||
'''description: A sample plugin that chunks input text'''
|
'''description: A sample plugin that chunks input text'''
|
||||||
|
|
||||||
author = ["@militarpancho", '@balkian']
|
author = ["@militarpancho", '@balkian']
|
||||||
version = '0.2'
|
version = '0.3'
|
||||||
url = "https://github.com/gsi-upm/senpy"
|
url = "https://github.com/gsi-upm/senpy"
|
||||||
|
|
||||||
extra_params = {
|
extra_params = {
|
||||||
@@ -33,12 +33,15 @@ class Split(AnalysisPlugin):
|
|||||||
if chunker_type == "paragraph":
|
if chunker_type == "paragraph":
|
||||||
tokenizer = LineTokenizer()
|
tokenizer = LineTokenizer()
|
||||||
chars = list(tokenizer.span_tokenize(original_text))
|
chars = list(tokenizer.span_tokenize(original_text))
|
||||||
for i, chunk in enumerate(tokenizer.tokenize(original_text)):
|
if len(chars) == 1:
|
||||||
print(chunk)
|
# This sentence was already split
|
||||||
|
return
|
||||||
|
for i, chunk in enumerate(chars):
|
||||||
|
start, end = chunk
|
||||||
e = Entry()
|
e = Entry()
|
||||||
e['nif:isString'] = chunk
|
e['nif:isString'] = original_text[start:end]
|
||||||
if entry.id:
|
if entry.id:
|
||||||
e.id = entry.id + "#char={},{}".format(chars[i][0], chars[i][1])
|
e.id = entry.id + "#char={},{}".format(start, end)
|
||||||
yield e
|
yield e
|
||||||
|
|
||||||
test_cases = [
|
test_cases = [
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
name: Ekman2FSRE
|
name: Ekman2FSRE
|
||||||
module: senpy.plugins.conversion.emotion.centroids
|
module: senpy.plugins.postprocessing.emotion.centroids
|
||||||
description: Plugin to convert emotion sets from Ekman to VAD
|
description: Plugin to convert emotion sets from Ekman to VAD
|
||||||
version: 0.2
|
version: 0.2
|
||||||
# No need to specify onyx:doesConversion because centroids.py adds it automatically from centroids_direction
|
# No need to specify onyx:doesConversion because centroids.py adds it automatically from centroids_direction
|
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
name: Ekman2PAD
|
name: Ekman2PAD
|
||||||
module: senpy.plugins.conversion.emotion.centroids
|
module: senpy.plugins.postprocessing.emotion.centroids
|
||||||
description: Plugin to convert emotion sets from Ekman to VAD
|
description: Plugin to convert emotion sets from Ekman to VAD
|
||||||
version: 0.2
|
version: 0.2
|
||||||
# No need to specify onyx:doesConversion because centroids.py adds it automatically from centroids_direction
|
# No need to specify onyx:doesConversion because centroids.py adds it automatically from centroids_direction
|
196
senpy/plugins/postprocessing/emotion/maxEmotion_plugin.py
Normal file
196
senpy/plugins/postprocessing/emotion/maxEmotion_plugin.py
Normal file
@@ -0,0 +1,196 @@
|
|||||||
|
from senpy import PostProcessing, easy_test
|
||||||
|
|
||||||
|
|
||||||
|
class MaxEmotion(PostProcessing):
|
||||||
|
'''Plugin to extract the emotion with highest value from an EmotionSet'''
|
||||||
|
author = '@dsuarezsouto'
|
||||||
|
version = '0.1'
|
||||||
|
|
||||||
|
def process_entry(self, entry, params):
|
||||||
|
if len(entry.emotions) < 1:
|
||||||
|
yield entry
|
||||||
|
return
|
||||||
|
|
||||||
|
set_emotions = entry.emotions[0]['onyx:hasEmotion']
|
||||||
|
|
||||||
|
# If there is only one emotion, do not modify it
|
||||||
|
if len(set_emotions) < 2:
|
||||||
|
yield entry
|
||||||
|
return
|
||||||
|
|
||||||
|
max_emotion = set_emotions[0]
|
||||||
|
|
||||||
|
# Extract max emotion from the set emotions (emotion with highest intensity)
|
||||||
|
for tmp_emotion in set_emotions:
|
||||||
|
if tmp_emotion['onyx:hasEmotionIntensity'] > max_emotion[
|
||||||
|
'onyx:hasEmotionIntensity']:
|
||||||
|
max_emotion = tmp_emotion
|
||||||
|
|
||||||
|
if max_emotion['onyx:hasEmotionIntensity'] == 0:
|
||||||
|
max_emotion['onyx:hasEmotionCategory'] = "neutral"
|
||||||
|
max_emotion['onyx:hasEmotionIntensity'] = 1.0
|
||||||
|
|
||||||
|
entry.emotions[0]['onyx:hasEmotion'] = [max_emotion]
|
||||||
|
|
||||||
|
entry.emotions[0]['prov:wasGeneratedBy'] = "maxSentiment"
|
||||||
|
yield entry
|
||||||
|
|
||||||
|
def check(self, request, plugins):
|
||||||
|
return 'maxemotion' in request.parameters and self not in plugins
|
||||||
|
|
||||||
|
# Test Cases:
|
||||||
|
# 1 Normal Situation.
|
||||||
|
# 2 Case to return a Neutral Emotion.
|
||||||
|
test_cases = [
|
||||||
|
{
|
||||||
|
"name":
|
||||||
|
"If there are several emotions within an emotion set, reduce it to one.",
|
||||||
|
"entry": {
|
||||||
|
"@type":
|
||||||
|
"entry",
|
||||||
|
"emotions": [
|
||||||
|
{
|
||||||
|
"@id":
|
||||||
|
"Emotions0",
|
||||||
|
"@type":
|
||||||
|
"emotionSet",
|
||||||
|
"onyx:hasEmotion": [
|
||||||
|
{
|
||||||
|
"@id": "_:Emotion_1538121033.74",
|
||||||
|
"@type": "emotion",
|
||||||
|
"onyx:hasEmotionCategory": "anger",
|
||||||
|
"onyx:hasEmotionIntensity": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@id": "_:Emotion_1538121033.74",
|
||||||
|
"@type": "emotion",
|
||||||
|
"onyx:hasEmotionCategory": "joy",
|
||||||
|
"onyx:hasEmotionIntensity": 0.3333333333333333
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@id": "_:Emotion_1538121033.74",
|
||||||
|
"@type": "emotion",
|
||||||
|
"onyx:hasEmotionCategory": "negative-fear",
|
||||||
|
"onyx:hasEmotionIntensity": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@id": "_:Emotion_1538121033.74",
|
||||||
|
"@type": "emotion",
|
||||||
|
"onyx:hasEmotionCategory": "sadness",
|
||||||
|
"onyx:hasEmotionIntensity": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@id": "_:Emotion_1538121033.74",
|
||||||
|
"@type": "emotion",
|
||||||
|
"onyx:hasEmotionCategory": "disgust",
|
||||||
|
"onyx:hasEmotionIntensity": 0
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"nif:isString":
|
||||||
|
"Test"
|
||||||
|
},
|
||||||
|
'expected': {
|
||||||
|
"@type":
|
||||||
|
"entry",
|
||||||
|
"emotions": [
|
||||||
|
{
|
||||||
|
"@id":
|
||||||
|
"Emotions0",
|
||||||
|
"@type":
|
||||||
|
"emotionSet",
|
||||||
|
"onyx:hasEmotion": [
|
||||||
|
{
|
||||||
|
"@id": "_:Emotion_1538121033.74",
|
||||||
|
"@type": "emotion",
|
||||||
|
"onyx:hasEmotionCategory": "joy",
|
||||||
|
"onyx:hasEmotionIntensity": 0.3333333333333333
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"prov:wasGeneratedBy":
|
||||||
|
'maxSentiment'
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"nif:isString":
|
||||||
|
"Test"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name":
|
||||||
|
"If the maximum emotion has an intensity of 0, return a neutral emotion.",
|
||||||
|
"entry": {
|
||||||
|
"@type":
|
||||||
|
"entry",
|
||||||
|
"emotions": [{
|
||||||
|
"@id":
|
||||||
|
"Emotions0",
|
||||||
|
"@type":
|
||||||
|
"emotionSet",
|
||||||
|
"onyx:hasEmotion": [
|
||||||
|
{
|
||||||
|
"@id": "_:Emotion_1538121033.74",
|
||||||
|
"@type": "emotion",
|
||||||
|
"onyx:hasEmotionCategory": "anger",
|
||||||
|
"onyx:hasEmotionIntensity": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@id": "_:Emotion_1538121033.74",
|
||||||
|
"@type": "emotion",
|
||||||
|
"onyx:hasEmotionCategory": "joy",
|
||||||
|
"onyx:hasEmotionIntensity": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@id":
|
||||||
|
"_:Emotion_1538121033.74",
|
||||||
|
"@type":
|
||||||
|
"emotion",
|
||||||
|
"onyx:hasEmotionCategory":
|
||||||
|
"negative-fear",
|
||||||
|
"onyx:hasEmotionIntensity":
|
||||||
|
0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@id": "_:Emotion_1538121033.74",
|
||||||
|
"@type": "emotion",
|
||||||
|
"onyx:hasEmotionCategory":
|
||||||
|
"sadness",
|
||||||
|
"onyx:hasEmotionIntensity": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@id": "_:Emotion_1538121033.74",
|
||||||
|
"@type": "emotion",
|
||||||
|
"onyx:hasEmotionCategory":
|
||||||
|
"disgust",
|
||||||
|
"onyx:hasEmotionIntensity": 0
|
||||||
|
}]
|
||||||
|
}],
|
||||||
|
"nif:isString":
|
||||||
|
"Test"
|
||||||
|
},
|
||||||
|
'expected': {
|
||||||
|
"@type":
|
||||||
|
"entry",
|
||||||
|
"emotions": [{
|
||||||
|
"@id":
|
||||||
|
"Emotions0",
|
||||||
|
"@type":
|
||||||
|
"emotionSet",
|
||||||
|
"onyx:hasEmotion": [{
|
||||||
|
"@id": "_:Emotion_1538121033.74",
|
||||||
|
"@type": "emotion",
|
||||||
|
"onyx:hasEmotionCategory": "neutral",
|
||||||
|
"onyx:hasEmotionIntensity": 1
|
||||||
|
}],
|
||||||
|
"prov:wasGeneratedBy":
|
||||||
|
'maxSentiment'
|
||||||
|
}],
|
||||||
|
"nif:isString":
|
||||||
|
"Test"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
easy_test()
|
@@ -4,6 +4,8 @@ import json
|
|||||||
from senpy.plugins import SentimentPlugin
|
from senpy.plugins import SentimentPlugin
|
||||||
from senpy.models import Sentiment
|
from senpy.models import Sentiment
|
||||||
|
|
||||||
|
ENDPOINT = 'http://www.sentiment140.com/api/bulkClassifyJson'
|
||||||
|
|
||||||
|
|
||||||
class Sentiment140(SentimentPlugin):
|
class Sentiment140(SentimentPlugin):
|
||||||
'''Connects to the sentiment140 free API: http://sentiment140.com'''
|
'''Connects to the sentiment140 free API: http://sentiment140.com'''
|
||||||
@@ -26,7 +28,7 @@ class Sentiment140(SentimentPlugin):
|
|||||||
|
|
||||||
def analyse_entry(self, entry, params):
|
def analyse_entry(self, entry, params):
|
||||||
lang = params["language"]
|
lang = params["language"]
|
||||||
res = requests.post("http://www.sentiment140.com/api/bulkClassifyJson",
|
res = requests.post(ENDPOINT,
|
||||||
json.dumps({
|
json.dumps({
|
||||||
"language": lang,
|
"language": lang,
|
||||||
"data": [{
|
"data": [{
|
||||||
@@ -52,18 +54,6 @@ class Sentiment140(SentimentPlugin):
|
|||||||
entry.language = lang
|
entry.language = lang
|
||||||
yield entry
|
yield entry
|
||||||
|
|
||||||
def test(self, *args, **kwargs):
|
|
||||||
'''
|
|
||||||
To avoid calling the sentiment140 API, we will mock the results
|
|
||||||
from requests.
|
|
||||||
'''
|
|
||||||
from senpy.testing import patch_requests
|
|
||||||
expected = {"data": [{"polarity": 4}]}
|
|
||||||
with patch_requests(expected) as (request, response):
|
|
||||||
super(Sentiment140, self).test(*args, **kwargs)
|
|
||||||
assert request.called
|
|
||||||
assert response.json.called
|
|
||||||
|
|
||||||
test_cases = [
|
test_cases = [
|
||||||
{
|
{
|
||||||
'entry': {
|
'entry': {
|
||||||
@@ -77,6 +67,9 @@ class Sentiment140(SentimentPlugin):
|
|||||||
'marl:hasPolarity': 'marl:Positive',
|
'marl:hasPolarity': 'marl:Positive',
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
},
|
||||||
|
'responses': [{'url': ENDPOINT,
|
||||||
|
'method': 'POST',
|
||||||
|
'json': {'data': [{'polarity': 4}]}}]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
@@ -9,7 +9,20 @@
|
|||||||
"@type": {
|
"@type": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Type of the analysis. e.g. marl:SentimentAnalysis"
|
"description": "Type of the analysis. e.g. marl:SentimentAnalysis"
|
||||||
|
},
|
||||||
|
"prov:wasAssociatedWith": {
|
||||||
|
"@type": "string",
|
||||||
|
"description": "Algorithm/plugin that was used"
|
||||||
|
},
|
||||||
|
"prov:used": {
|
||||||
|
"description": "Parameters of the algorithm",
|
||||||
|
"@type": "array",
|
||||||
|
"default": [],
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"$ref": "parameter.json"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"required": ["@id", "@type"]
|
"required": ["@type", "prov:wasAssociatedWith"]
|
||||||
}
|
}
|
||||||
|
@@ -41,7 +41,7 @@
|
|||||||
"@container": "@set"
|
"@container": "@set"
|
||||||
},
|
},
|
||||||
"analysis": {
|
"analysis": {
|
||||||
"@id": "AnalysisInvolved",
|
"@id": "prov:wasInformedBy",
|
||||||
"@type": "@id",
|
"@type": "@id",
|
||||||
"@container": "@set"
|
"@container": "@set"
|
||||||
},
|
},
|
||||||
|
@@ -20,5 +20,5 @@
|
|||||||
"description": "The ID of the analysis that generated this Emotion. The full object should be included in the \"analysis\" property of the root object"
|
"description": "The ID of the analysis that generated this Emotion. The full object should be included in the \"analysis\" property of the root object"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"required": ["@id", "prov:wasGeneratedBy", "onyx:hasEmotion"]
|
"required": ["prov:wasGeneratedBy", "onyx:hasEmotion"]
|
||||||
}
|
}
|
||||||
|
@@ -35,5 +35,5 @@
|
|||||||
"default": []
|
"default": []
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"required": ["@id", "nif:isString"]
|
"required": ["nif:isString"]
|
||||||
}
|
}
|
||||||
|
16
senpy/schemas/parameter.json
Normal file
16
senpy/schemas/parameter.json
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
{
|
||||||
|
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||||
|
"description": "Parameters for a senpy analysis",
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Name of the parameter"
|
||||||
|
},
|
||||||
|
"prov:value": {
|
||||||
|
"@type": "any",
|
||||||
|
"description": "Value of the parameter"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["name", "prov:value"]
|
||||||
|
}
|
@@ -21,13 +21,7 @@
|
|||||||
"default": [],
|
"default": [],
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"anyOf": [
|
|
||||||
{
|
|
||||||
"$ref": "analysis.json"
|
"$ref": "analysis.json"
|
||||||
},{
|
|
||||||
"type": "string"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"entries": {
|
"entries": {
|
||||||
|
@@ -19,5 +19,5 @@
|
|||||||
"description": "The ID of the analysis that generated this Sentiment. The full object should be included in the \"analysis\" property of the root object"
|
"description": "The ID of the analysis that generated this Sentiment. The full object should be included in the \"analysis\" property of the root object"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"required": ["@id", "prov:wasGeneratedBy"]
|
"required": ["prov:wasGeneratedBy"]
|
||||||
}
|
}
|
||||||
|
@@ -1,36 +1,31 @@
|
|||||||
try:
|
|
||||||
from unittest.mock import patch, MagicMock
|
|
||||||
except ImportError:
|
|
||||||
from mock import patch, MagicMock
|
|
||||||
|
|
||||||
from past.builtins import basestring
|
from past.builtins import basestring
|
||||||
|
|
||||||
|
import os
|
||||||
import json
|
import responses as requestmock
|
||||||
from contextlib import contextmanager
|
|
||||||
|
|
||||||
from .models import BaseModel
|
from .models import BaseModel
|
||||||
|
|
||||||
|
|
||||||
@contextmanager
|
MOCK_REQUESTS = os.environ.get('MOCK_REQUESTS', '').lower() in ['no', 'false']
|
||||||
def patch_requests(value, code=200):
|
|
||||||
success = MagicMock()
|
|
||||||
if isinstance(value, BaseModel):
|
def patch_all_requests(responses):
|
||||||
value = value.jsonld()
|
|
||||||
if not isinstance(value, basestring):
|
patched = requestmock.RequestsMock()
|
||||||
data = json.dumps(value)
|
|
||||||
|
for response in responses or []:
|
||||||
|
args = response.copy()
|
||||||
|
if 'json' in args and isinstance(args['json'], BaseModel):
|
||||||
|
args['json'] = args['json'].jsonld()
|
||||||
|
args['method'] = getattr(requestmock, args.get('method', 'GET'))
|
||||||
|
patched.add(**args)
|
||||||
|
return patched
|
||||||
|
|
||||||
|
|
||||||
|
def patch_requests(url, response, method='GET', status=200):
|
||||||
|
args = {'url': url, 'method': method, 'status': status}
|
||||||
|
if isinstance(response, basestring):
|
||||||
|
args['body'] = response
|
||||||
else:
|
else:
|
||||||
data = value
|
args['json'] = response
|
||||||
|
return patch_all_requests([args])
|
||||||
success.json.return_value = value
|
|
||||||
|
|
||||||
success.status_code = code
|
|
||||||
success.content = data
|
|
||||||
success.text = data
|
|
||||||
|
|
||||||
method_mocker = MagicMock()
|
|
||||||
method_mocker.return_value = success
|
|
||||||
with patch.multiple('requests', request=method_mocker,
|
|
||||||
get=method_mocker, post=method_mocker):
|
|
||||||
yield method_mocker, success
|
|
||||||
assert method_mocker.called
|
|
||||||
|
@@ -12,6 +12,7 @@ max-line-length = 100
|
|||||||
universal=1
|
universal=1
|
||||||
[tool:pytest]
|
[tool:pytest]
|
||||||
addopts = --cov=senpy --cov-report term-missing
|
addopts = --cov=senpy --cov-report term-missing
|
||||||
|
filterwarnings =
|
||||||
|
ignore:the matrix subclass:PendingDeprecationWarning
|
||||||
[coverage:report]
|
[coverage:report]
|
||||||
omit = senpy/__main__.py
|
omit = senpy/__main__.py
|
@@ -3,8 +3,9 @@ import logging
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
from unittest import TestCase
|
from unittest import TestCase
|
||||||
from senpy.api import parse_params, API_PARAMS, NIF_PARAMS, WEB_PARAMS
|
from senpy.api import (boolean, parse_params, get_extra_params, parse_analysis,
|
||||||
from senpy.models import Error
|
API_PARAMS, NIF_PARAMS, WEB_PARAMS)
|
||||||
|
from senpy.models import Error, Plugin
|
||||||
|
|
||||||
|
|
||||||
class APITest(TestCase):
|
class APITest(TestCase):
|
||||||
@@ -89,3 +90,156 @@ class APITest(TestCase):
|
|||||||
assert "Dummy" in p['algorithm']
|
assert "Dummy" in p['algorithm']
|
||||||
assert 'input' in p
|
assert 'input' in p
|
||||||
assert p['input'] == 'Aloha my friend'
|
assert p['input'] == 'Aloha my friend'
|
||||||
|
|
||||||
|
def test_parse_analysis(self):
|
||||||
|
'''The API should parse user parameters and return them in a format that plugins can use'''
|
||||||
|
plugins = [
|
||||||
|
Plugin({
|
||||||
|
'name': 'plugin1',
|
||||||
|
'extra_params': {
|
||||||
|
# Incompatible parameter
|
||||||
|
'param0': {
|
||||||
|
'aliases': ['p1', 'parameter1'],
|
||||||
|
'options': ['option1', 'option2'],
|
||||||
|
'default': 'option1',
|
||||||
|
'required': True
|
||||||
|
},
|
||||||
|
'param1': {
|
||||||
|
'aliases': ['p1', 'parameter1'],
|
||||||
|
'options': ['en', 'es'],
|
||||||
|
|
||||||
|
'default': 'en',
|
||||||
|
'required': False
|
||||||
|
},
|
||||||
|
'param2': {
|
||||||
|
'aliases': ['p2', 'parameter2'],
|
||||||
|
'required': False,
|
||||||
|
'options': ['value2_1', 'value2_2', 'value3_3']
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}), Plugin({
|
||||||
|
'name': 'plugin2',
|
||||||
|
'extra_params': {
|
||||||
|
'param0': {
|
||||||
|
'aliases': ['parameter1'],
|
||||||
|
'options': ['new option', 'new option2'],
|
||||||
|
'default': 'new option',
|
||||||
|
'required': False
|
||||||
|
},
|
||||||
|
'param1': {
|
||||||
|
'aliases': ['myparam1', 'p1'],
|
||||||
|
'options': ['en', 'de', 'auto'],
|
||||||
|
'default': 'de',
|
||||||
|
'required': True
|
||||||
|
},
|
||||||
|
'param3': {
|
||||||
|
'aliases': ['p3', 'parameter3'],
|
||||||
|
'options': boolean,
|
||||||
|
'default': True
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
]
|
||||||
|
call = {
|
||||||
|
'param1': 'en',
|
||||||
|
'0.param0': 'option1',
|
||||||
|
'0.param1': 'en',
|
||||||
|
'param2': 'value2_1',
|
||||||
|
'param0': 'new option',
|
||||||
|
'1.param1': 'de',
|
||||||
|
'param3': False,
|
||||||
|
}
|
||||||
|
expected = [
|
||||||
|
{
|
||||||
|
'param0': 'option1',
|
||||||
|
'param1': 'en',
|
||||||
|
'param2': 'value2_1',
|
||||||
|
}, {
|
||||||
|
'param0': 'new option',
|
||||||
|
'param1': 'de',
|
||||||
|
'param3': False,
|
||||||
|
}
|
||||||
|
|
||||||
|
]
|
||||||
|
p = parse_analysis(call, plugins)
|
||||||
|
for i, arg in enumerate(expected):
|
||||||
|
params = p[i].params
|
||||||
|
for k, v in arg.items():
|
||||||
|
assert params[k] == v
|
||||||
|
|
||||||
|
def test_get_extra_params(self):
|
||||||
|
'''The API should return the list of valid parameters for a set of plugins'''
|
||||||
|
plugins = [
|
||||||
|
Plugin({
|
||||||
|
'name': 'plugin1',
|
||||||
|
'extra_params': {
|
||||||
|
# Incompatible parameter
|
||||||
|
'param0': {
|
||||||
|
'aliases': ['p1', 'parameter1'],
|
||||||
|
'options': ['option1', 'option2'],
|
||||||
|
'default': 'option1',
|
||||||
|
'required': True
|
||||||
|
},
|
||||||
|
'param1': {
|
||||||
|
'aliases': ['p1', 'parameter1'],
|
||||||
|
'options': ['en', 'es'],
|
||||||
|
'default': 'en',
|
||||||
|
'required': False
|
||||||
|
},
|
||||||
|
'param2': {
|
||||||
|
'aliases': ['p2', 'parameter2'],
|
||||||
|
'required': False,
|
||||||
|
'options': ['value2_1', 'value2_2', 'value3_3']
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}), Plugin({
|
||||||
|
'name': 'plugin2',
|
||||||
|
'extra_params': {
|
||||||
|
'param0': {
|
||||||
|
'aliases': ['parameter1'],
|
||||||
|
'options': ['new option', 'new option2'],
|
||||||
|
'default': 'new option',
|
||||||
|
'required': False
|
||||||
|
},
|
||||||
|
'param1': {
|
||||||
|
'aliases': ['myparam1', 'p1'],
|
||||||
|
'options': ['en', 'de', 'auto'],
|
||||||
|
'default': 'de',
|
||||||
|
'required': True
|
||||||
|
},
|
||||||
|
'param3': {
|
||||||
|
'aliases': ['p3', 'parameter3'],
|
||||||
|
'options': boolean,
|
||||||
|
'default': True
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
]
|
||||||
|
|
||||||
|
expected = {
|
||||||
|
# Overlapping parameters
|
||||||
|
'plugin1.param0': plugins[0]['extra_params']['param0'],
|
||||||
|
'plugin1.param1': plugins[0]['extra_params']['param1'],
|
||||||
|
'plugin2.param0': plugins[1]['extra_params']['param0'],
|
||||||
|
'plugin2.param1': plugins[1]['extra_params']['param1'],
|
||||||
|
|
||||||
|
# Non-overlapping parameters
|
||||||
|
'param2': plugins[0]['extra_params']['param2'],
|
||||||
|
'param3': plugins[1]['extra_params']['param3'],
|
||||||
|
|
||||||
|
# Intersection of overlapping parameters
|
||||||
|
'param1': {
|
||||||
|
'aliases': ['p1'],
|
||||||
|
'options': ['en'],
|
||||||
|
'default': None,
|
||||||
|
'required': True
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result = get_extra_params(plugins)
|
||||||
|
|
||||||
|
for ik, iv in expected.items():
|
||||||
|
assert ik in result
|
||||||
|
for jk, jv in iv.items():
|
||||||
|
assert jk in result[ik]
|
||||||
|
assert expected[ik][jk] == result[ik][jk]
|
||||||
|
@@ -26,8 +26,7 @@ class BlueprintsTest(TestCase):
|
|||||||
cls.senpy.init_app(cls.app)
|
cls.senpy.init_app(cls.app)
|
||||||
cls.dir = os.path.join(os.path.dirname(__file__), "..")
|
cls.dir = os.path.join(os.path.dirname(__file__), "..")
|
||||||
cls.senpy.add_folder(cls.dir)
|
cls.senpy.add_folder(cls.dir)
|
||||||
cls.senpy.activate_plugin("Dummy", sync=True)
|
cls.senpy.activate_all()
|
||||||
cls.senpy.activate_plugin("DummyRequired", sync=True)
|
|
||||||
cls.senpy.default_plugin = 'Dummy'
|
cls.senpy.default_plugin = 'Dummy'
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
@@ -67,10 +66,25 @@ class BlueprintsTest(TestCase):
|
|||||||
logging.debug("Got response: %s", js)
|
logging.debug("Got response: %s", js)
|
||||||
assert "@context" in js
|
assert "@context" in js
|
||||||
assert "entries" in js
|
assert "entries" in js
|
||||||
|
assert len(js['analysis']) == 1
|
||||||
|
|
||||||
|
def test_analysis_post(self):
|
||||||
|
"""
|
||||||
|
The results for a POST request should be the same as for a GET request.
|
||||||
|
"""
|
||||||
|
resp = self.client.post("/api/", data={'i': 'My aloha mohame',
|
||||||
|
'algorithm': 'rand',
|
||||||
|
'with_parameters': True})
|
||||||
|
self.assertCode(resp, 200)
|
||||||
|
js = parse_resp(resp)
|
||||||
|
logging.debug("Got response: %s", js)
|
||||||
|
assert "@context" in js
|
||||||
|
assert "entries" in js
|
||||||
|
assert len(js['analysis']) == 1
|
||||||
|
|
||||||
def test_analysis_extra(self):
|
def test_analysis_extra(self):
|
||||||
"""
|
"""
|
||||||
Extra params that have a default should
|
Extra params that have a default should use it
|
||||||
"""
|
"""
|
||||||
resp = self.client.get("/api/?i=My aloha mohame&algo=Dummy&with_parameters=true")
|
resp = self.client.get("/api/?i=My aloha mohame&algo=Dummy&with_parameters=true")
|
||||||
self.assertCode(resp, 200)
|
self.assertCode(resp, 200)
|
||||||
@@ -92,9 +106,111 @@ class BlueprintsTest(TestCase):
|
|||||||
assert isinstance(js, models.Error)
|
assert isinstance(js, models.Error)
|
||||||
resp = self.client.get("/api/?i=My aloha mohame&algo=DummyRequired&example=notvalid")
|
resp = self.client.get("/api/?i=My aloha mohame&algo=DummyRequired&example=notvalid")
|
||||||
self.assertCode(resp, 400)
|
self.assertCode(resp, 400)
|
||||||
|
self.app.config['TESTING'] = True
|
||||||
resp = self.client.get("/api/?i=My aloha mohame&algo=DummyRequired&example=a")
|
resp = self.client.get("/api/?i=My aloha mohame&algo=DummyRequired&example=a")
|
||||||
self.assertCode(resp, 200)
|
self.assertCode(resp, 200)
|
||||||
|
|
||||||
|
def test_analysis_url(self):
|
||||||
|
"""
|
||||||
|
The algorithm can also be specified as part of the URL
|
||||||
|
"""
|
||||||
|
self.app.config['TESTING'] = False # Errors are expected in this case
|
||||||
|
resp = self.client.get("/api/DummyRequired?i=My aloha mohame")
|
||||||
|
self.assertCode(resp, 400)
|
||||||
|
js = parse_resp(resp)
|
||||||
|
logging.debug("Got response: %s", js)
|
||||||
|
assert isinstance(js, models.Error)
|
||||||
|
resp = self.client.get("/api/DummyRequired?i=My aloha mohame&example=notvalid")
|
||||||
|
self.assertCode(resp, 400)
|
||||||
|
resp = self.client.get("/api/DummyRequired?i=My aloha mohame&example=a")
|
||||||
|
self.assertCode(resp, 200)
|
||||||
|
|
||||||
|
def test_analysis_chain(self):
|
||||||
|
"""
|
||||||
|
More than one algorithm can be specified. Plugins will then be chained
|
||||||
|
"""
|
||||||
|
resp = self.client.get("/api/Dummy?i=My aloha mohame")
|
||||||
|
js = parse_resp(resp)
|
||||||
|
assert len(js['analysis']) == 1
|
||||||
|
assert js['entries'][0]['nif:isString'] == 'My aloha mohame'[::-1]
|
||||||
|
|
||||||
|
resp = self.client.get("/api/Dummy/Dummy?i=My aloha mohame")
|
||||||
|
# Calling dummy twice, should return the same string
|
||||||
|
self.assertCode(resp, 200)
|
||||||
|
js = parse_resp(resp)
|
||||||
|
assert len(js['analysis']) == 2
|
||||||
|
assert js['entries'][0]['nif:isString'] == 'My aloha mohame'
|
||||||
|
|
||||||
|
resp = self.client.get("/api/Dummy+Dummy?i=My aloha mohame")
|
||||||
|
# Same with pluses instead of slashes
|
||||||
|
self.assertCode(resp, 200)
|
||||||
|
js = parse_resp(resp)
|
||||||
|
assert len(js['analysis']) == 2
|
||||||
|
assert js['entries'][0]['nif:isString'] == 'My aloha mohame'
|
||||||
|
|
||||||
|
def test_analysis_chain_required(self):
|
||||||
|
"""
|
||||||
|
If a parameter is required and duplicated (because two plugins require it), specifying
|
||||||
|
it once should suffice
|
||||||
|
"""
|
||||||
|
resp = self.client.get("/api/DummyRequired/DummyRequired?i=My aloha mohame&example=a")
|
||||||
|
js = parse_resp(resp)
|
||||||
|
assert len(js['analysis']) == 2
|
||||||
|
assert js['entries'][0]['nif:isString'] == 'My aloha mohame'
|
||||||
|
assert js['entries'][0]['reversed'] == 2
|
||||||
|
|
||||||
|
def test_requirements_chain_help(self):
|
||||||
|
'''The extra parameters of each plugin should be merged if they are in a chain '''
|
||||||
|
resp = self.client.get("/api/split/DummyRequired?help=true")
|
||||||
|
self.assertCode(resp, 200)
|
||||||
|
js = parse_resp(resp)
|
||||||
|
assert 'valid_parameters' in js
|
||||||
|
vp = js['valid_parameters']
|
||||||
|
assert 'example' in vp
|
||||||
|
assert 'delimiter' in vp
|
||||||
|
|
||||||
|
def test_requirements_chain_repeat_help(self):
|
||||||
|
'''
|
||||||
|
If a plugin appears several times in a chain, there should be a way to set different
|
||||||
|
parameters for each.
|
||||||
|
'''
|
||||||
|
resp = self.client.get("/api/split/split?help=true")
|
||||||
|
self.assertCode(resp, 200)
|
||||||
|
js = parse_resp(resp)
|
||||||
|
assert 'valid_parameters' in js
|
||||||
|
vp = js['valid_parameters']
|
||||||
|
assert 'delimiter' in vp
|
||||||
|
|
||||||
|
resp = self.client.get("/api/split/split?help=true&verbose=false")
|
||||||
|
js = parse_resp(resp)
|
||||||
|
vp = js['valid_parameters']
|
||||||
|
assert len(vp.keys()) == 1
|
||||||
|
|
||||||
|
|
||||||
|
def test_requirements_chain(self):
|
||||||
|
"""
|
||||||
|
It should be possible to specify different parameters for each step in the chain.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# First, we split by sentence twice. Each call should generate 3 additional entries
|
||||||
|
# (one per sentence in the original).
|
||||||
|
resp = self.client.get('/api/split/split?i=The first sentence. The second sentence.'
|
||||||
|
'\nA new paragraph&delimiter=sentence')
|
||||||
|
js = parse_resp(resp)
|
||||||
|
assert len(js['analysis']) == 2
|
||||||
|
assert len(js['entries']) == 7
|
||||||
|
|
||||||
|
# Now, we split by sentence. This produces 3 additional entries.
|
||||||
|
# Then, we split by paragraph. This should create 2 additional entries (One per paragraph
|
||||||
|
# in the original text)
|
||||||
|
resp = self.client.get('/api/split/split?i=The first sentence. The second sentence.'
|
||||||
|
'\nA new paragraph&0.delimiter=sentence&1.delimiter=paragraph')
|
||||||
|
# Calling dummy twice, should return the same string
|
||||||
|
self.assertCode(resp, 200)
|
||||||
|
js = parse_resp(resp)
|
||||||
|
assert len(js['analysis']) == 2
|
||||||
|
assert len(js['entries']) == 6
|
||||||
|
|
||||||
def test_error(self):
|
def test_error(self):
|
||||||
"""
|
"""
|
||||||
The dummy plugin returns an empty response,\
|
The dummy plugin returns an empty response,\
|
||||||
|
@@ -14,22 +14,28 @@ class ModelsTest(TestCase):
|
|||||||
def test_client(self):
|
def test_client(self):
|
||||||
endpoint = 'http://dummy/'
|
endpoint = 'http://dummy/'
|
||||||
client = Client(endpoint)
|
client = Client(endpoint)
|
||||||
with patch_requests(Results()) as (request, response):
|
with patch_requests('http://dummy/', Results()):
|
||||||
resp = client.analyse('hello')
|
resp = client.analyse('hello')
|
||||||
assert isinstance(resp, Results)
|
assert isinstance(resp, Results)
|
||||||
request.assert_called_with(
|
with patch_requests('http://dummy/', Error('Nothing')):
|
||||||
url=endpoint + '/', method='GET', params={'input': 'hello'})
|
|
||||||
with patch_requests(Error('Nothing')) as (request, response):
|
|
||||||
try:
|
try:
|
||||||
client.analyse(input='hello', algorithm='NONEXISTENT')
|
client.analyse(input='hello', algorithm='NONEXISTENT')
|
||||||
raise Exception('Exceptions should be raised. This is not golang')
|
raise Exception('Exceptions should be raised. This is not golang')
|
||||||
except Error:
|
except Error:
|
||||||
pass
|
pass
|
||||||
request.assert_called_with(
|
|
||||||
url=endpoint + '/',
|
def test_client_post(self):
|
||||||
method='GET',
|
endpoint = 'http://dummy/'
|
||||||
params={'input': 'hello',
|
client = Client(endpoint)
|
||||||
'algorithm': 'NONEXISTENT'})
|
with patch_requests('http://dummy/', Results()):
|
||||||
|
resp = client.analyse('hello')
|
||||||
|
assert isinstance(resp, Results)
|
||||||
|
with patch_requests('http://dummy/', Error('Nothing'), method='POST'):
|
||||||
|
try:
|
||||||
|
client.analyse(input='hello', method='POST', algorithm='NONEXISTENT')
|
||||||
|
raise Exception('Exceptions should be raised. This is not golang')
|
||||||
|
except Error:
|
||||||
|
pass
|
||||||
|
|
||||||
def test_plugins(self):
|
def test_plugins(self):
|
||||||
endpoint = 'http://dummy/'
|
endpoint = 'http://dummy/'
|
||||||
@@ -37,11 +43,8 @@ class ModelsTest(TestCase):
|
|||||||
plugins = Plugins()
|
plugins = Plugins()
|
||||||
p1 = AnalysisPlugin({'name': 'AnalysisP1', 'version': 0, 'description': 'No'})
|
p1 = AnalysisPlugin({'name': 'AnalysisP1', 'version': 0, 'description': 'No'})
|
||||||
plugins.plugins = [p1, ]
|
plugins.plugins = [p1, ]
|
||||||
with patch_requests(plugins) as (request, response):
|
with patch_requests('http://dummy/plugins', plugins):
|
||||||
response = client.plugins()
|
response = client.plugins()
|
||||||
assert isinstance(response, dict)
|
assert isinstance(response, dict)
|
||||||
assert len(response) == 1
|
assert len(response) == 1
|
||||||
assert "AnalysisP1" in response
|
assert "AnalysisP1" in response
|
||||||
request.assert_called_with(
|
|
||||||
url=endpoint + '/plugins', method='GET',
|
|
||||||
params={})
|
|
||||||
|
@@ -11,14 +11,15 @@ except ImportError:
|
|||||||
from functools import partial
|
from functools import partial
|
||||||
from senpy.extensions import Senpy
|
from senpy.extensions import Senpy
|
||||||
from senpy import plugins
|
from senpy import plugins
|
||||||
from senpy.models import Error, Results, Entry, EmotionSet, Emotion, Plugin
|
from senpy.models import Analysis, Error, Results, Entry, EmotionSet, Emotion, Plugin
|
||||||
from senpy import api
|
from senpy import api
|
||||||
from flask import Flask
|
from flask import Flask
|
||||||
from unittest import TestCase
|
from unittest import TestCase
|
||||||
|
|
||||||
|
|
||||||
def analyse(instance, **kwargs):
|
def analyse(instance, **kwargs):
|
||||||
request = api.parse_call(kwargs)
|
basic = api.parse_params(kwargs, api.API_PARAMS)
|
||||||
|
request = api.parse_call(basic)
|
||||||
return instance.analyse(request)
|
return instance.analyse(request)
|
||||||
|
|
||||||
|
|
||||||
@@ -49,9 +50,9 @@ class ExtensionsTest(TestCase):
|
|||||||
'''Should be able to add and delete new plugins. '''
|
'''Should be able to add and delete new plugins. '''
|
||||||
new = plugins.Analysis(name='new', description='new', version=0)
|
new = plugins.Analysis(name='new', description='new', version=0)
|
||||||
self.senpy.add_plugin(new)
|
self.senpy.add_plugin(new)
|
||||||
assert new in self.senpy.plugins()
|
assert new in self.senpy.plugins(is_activated=False)
|
||||||
self.senpy.delete_plugin(new)
|
self.senpy.delete_plugin(new)
|
||||||
assert new not in self.senpy.plugins()
|
assert new not in self.senpy.plugins(is_activated=False)
|
||||||
|
|
||||||
def test_adding_folder(self):
|
def test_adding_folder(self):
|
||||||
""" It should be possible for senpy to look for plugins in more folders. """
|
""" It should be possible for senpy to look for plugins in more folders. """
|
||||||
@@ -60,7 +61,7 @@ class ExtensionsTest(TestCase):
|
|||||||
default_plugins=False)
|
default_plugins=False)
|
||||||
assert not senpy.analysis_plugins
|
assert not senpy.analysis_plugins
|
||||||
senpy.add_folder(self.examples_dir)
|
senpy.add_folder(self.examples_dir)
|
||||||
assert senpy.analysis_plugins
|
assert senpy.plugins(plugin_type=plugins.AnalysisPlugin, is_activated=False)
|
||||||
self.assertRaises(AttributeError, senpy.add_folder, 'DOES NOT EXIST')
|
self.assertRaises(AttributeError, senpy.add_folder, 'DOES NOT EXIST')
|
||||||
|
|
||||||
def test_installing(self):
|
def test_installing(self):
|
||||||
@@ -121,8 +122,8 @@ class ExtensionsTest(TestCase):
|
|||||||
# Leaf (defaultdict with __setattr__ and __getattr__.
|
# Leaf (defaultdict with __setattr__ and __getattr__.
|
||||||
r1 = analyse(self.senpy, algorithm="Dummy", input="tupni", output="tuptuo")
|
r1 = analyse(self.senpy, algorithm="Dummy", input="tupni", output="tuptuo")
|
||||||
r2 = analyse(self.senpy, input="tupni", output="tuptuo")
|
r2 = analyse(self.senpy, input="tupni", output="tuptuo")
|
||||||
assert r1.analysis[0] == "endpoint:plugins/Dummy_0.1"
|
assert r1.analysis[0].algorithm == "endpoint:plugins/Dummy_0.1"
|
||||||
assert r2.analysis[0] == "endpoint:plugins/Dummy_0.1"
|
assert r2.analysis[0].algorithm == "endpoint:plugins/Dummy_0.1"
|
||||||
assert r1.entries[0]['nif:isString'] == 'input'
|
assert r1.entries[0]['nif:isString'] == 'input'
|
||||||
|
|
||||||
def test_analyse_empty(self):
|
def test_analyse_empty(self):
|
||||||
@@ -130,7 +131,7 @@ class ExtensionsTest(TestCase):
|
|||||||
senpy = Senpy(plugin_folder=None,
|
senpy = Senpy(plugin_folder=None,
|
||||||
app=self.app,
|
app=self.app,
|
||||||
default_plugins=False)
|
default_plugins=False)
|
||||||
self.assertRaises(Error, senpy.analyse, Results())
|
self.assertRaises(Error, senpy.analyse, Results(), [])
|
||||||
|
|
||||||
def test_analyse_wrong(self):
|
def test_analyse_wrong(self):
|
||||||
""" Trying to analyse with a non-existent plugin should raise an error."""
|
""" Trying to analyse with a non-existent plugin should raise an error."""
|
||||||
@@ -156,30 +157,32 @@ class ExtensionsTest(TestCase):
|
|||||||
r2 = analyse(self.senpy,
|
r2 = analyse(self.senpy,
|
||||||
input="tupni",
|
input="tupni",
|
||||||
output="tuptuo")
|
output="tuptuo")
|
||||||
assert r1.analysis[0] == "endpoint:plugins/Dummy_0.1"
|
assert r1.analysis[0].algorithm == "endpoint:plugins/Dummy_0.1"
|
||||||
assert r2.analysis[0] == "endpoint:plugins/Dummy_0.1"
|
assert r2.analysis[0].algorithm == "endpoint:plugins/Dummy_0.1"
|
||||||
assert r1.entries[0]['nif:isString'] == 'input'
|
assert r1.entries[0]['nif:isString'] == 'input'
|
||||||
|
|
||||||
def test_analyse_error(self):
|
def test_analyse_error(self):
|
||||||
mm = mock.MagicMock()
|
class ErrorPlugin(plugins.Analysis):
|
||||||
mm.id = 'magic_mock'
|
author = 'nobody'
|
||||||
mm.name = 'mock'
|
version = 0
|
||||||
mm.is_activated = True
|
ex = Error()
|
||||||
mm.analyse_entries.side_effect = Error('error in analysis', status=500)
|
|
||||||
self.senpy.add_plugin(mm)
|
def process(self, *args, **kwargs):
|
||||||
|
raise self.ex
|
||||||
|
|
||||||
|
m = ErrorPlugin(ex=Error('error in analysis', status=500))
|
||||||
|
self.senpy.add_plugin(m)
|
||||||
try:
|
try:
|
||||||
analyse(self.senpy, input='nothing', algorithm='MOCK')
|
analyse(self.senpy, input='nothing', algorithm='ErrorPlugin')
|
||||||
assert False
|
assert False
|
||||||
except Error as ex:
|
except Error as ex:
|
||||||
assert 'error in analysis' in ex['message']
|
assert 'error in analysis' in ex['message']
|
||||||
assert ex['status'] == 500
|
assert ex['status'] == 500
|
||||||
|
|
||||||
ex = Exception('generic exception on analysis')
|
m.ex = Exception('generic exception on analysis')
|
||||||
mm.analyse.side_effect = ex
|
|
||||||
mm.analyse_entries.side_effect = ex
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
analyse(self.senpy, input='nothing', algorithm='MOCK')
|
analyse(self.senpy, input='nothing', algorithm='ErrorPlugin')
|
||||||
assert False
|
assert False
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
assert 'generic exception on analysis' in str(ex)
|
assert 'generic exception on analysis' in str(ex)
|
||||||
@@ -195,7 +198,7 @@ class ExtensionsTest(TestCase):
|
|||||||
|
|
||||||
def test_load_default_plugins(self):
|
def test_load_default_plugins(self):
|
||||||
senpy = Senpy(plugin_folder=self.examples_dir, default_plugins=True)
|
senpy = Senpy(plugin_folder=self.examples_dir, default_plugins=True)
|
||||||
assert len(senpy.plugins()) > 1
|
assert len(senpy.plugins(is_activated=False)) > 1
|
||||||
|
|
||||||
def test_convert_emotions(self):
|
def test_convert_emotions(self):
|
||||||
self.senpy.activate_all(sync=True)
|
self.senpy.activate_all(sync=True)
|
||||||
@@ -211,27 +214,28 @@ class ExtensionsTest(TestCase):
|
|||||||
'emoml:valence': 0
|
'emoml:valence': 0
|
||||||
}))
|
}))
|
||||||
response = Results({
|
response = Results({
|
||||||
'analysis': [{'plugin': plugin}],
|
'analysis': [plugin],
|
||||||
'entries': [Entry({
|
'entries': [Entry({
|
||||||
'nif:isString': 'much ado about nothing',
|
'nif:isString': 'much ado about nothing',
|
||||||
'emotions': [eSet1]
|
'emotions': [eSet1]
|
||||||
})]
|
})]
|
||||||
})
|
})
|
||||||
params = {'emotionModel': 'emoml:big6',
|
params = {'emotionModel': 'emoml:big6',
|
||||||
|
'algorithm': ['conversion'],
|
||||||
'conversion': 'full'}
|
'conversion': 'full'}
|
||||||
r1 = deepcopy(response)
|
r1 = deepcopy(response)
|
||||||
r1.parameters = params
|
r1.parameters = params
|
||||||
self.senpy.convert_emotions(r1)
|
self.senpy.analyse(r1)
|
||||||
assert len(r1.entries[0].emotions) == 2
|
assert len(r1.entries[0].emotions) == 2
|
||||||
params['conversion'] = 'nested'
|
params['conversion'] = 'nested'
|
||||||
r2 = deepcopy(response)
|
r2 = deepcopy(response)
|
||||||
r2.parameters = params
|
r2.parameters = params
|
||||||
self.senpy.convert_emotions(r2)
|
self.senpy.analyse(r2)
|
||||||
assert len(r2.entries[0].emotions) == 1
|
assert len(r2.entries[0].emotions) == 1
|
||||||
assert r2.entries[0].emotions[0]['prov:wasDerivedFrom'] == eSet1
|
assert r2.entries[0].emotions[0]['prov:wasDerivedFrom'] == eSet1
|
||||||
params['conversion'] = 'filtered'
|
params['conversion'] = 'filtered'
|
||||||
r3 = deepcopy(response)
|
r3 = deepcopy(response)
|
||||||
r3.parameters = params
|
r3.parameters = params
|
||||||
self.senpy.convert_emotions(r3)
|
self.senpy.analyse(r3)
|
||||||
assert len(r3.entries[0].emotions) == 1
|
assert len(r3.entries[0].emotions) == 1
|
||||||
r3.jsonld()
|
r3.jsonld()
|
||||||
|
@@ -5,7 +5,8 @@ import jsonschema
|
|||||||
import json
|
import json
|
||||||
import rdflib
|
import rdflib
|
||||||
from unittest import TestCase
|
from unittest import TestCase
|
||||||
from senpy.models import (Emotion,
|
from senpy.models import (Analysis,
|
||||||
|
Emotion,
|
||||||
EmotionAnalysis,
|
EmotionAnalysis,
|
||||||
EmotionSet,
|
EmotionSet,
|
||||||
Entry,
|
Entry,
|
||||||
@@ -61,7 +62,7 @@ class ModelsTest(TestCase):
|
|||||||
def test_id(self):
|
def test_id(self):
|
||||||
""" Adding the id after creation should overwrite the automatic ID
|
""" Adding the id after creation should overwrite the automatic ID
|
||||||
"""
|
"""
|
||||||
r = Entry()
|
r = Entry(_auto_id=True)
|
||||||
j = r.jsonld()
|
j = r.jsonld()
|
||||||
assert '@id' in j
|
assert '@id' in j
|
||||||
r.id = "test"
|
r.id = "test"
|
||||||
@@ -189,6 +190,19 @@ class ModelsTest(TestCase):
|
|||||||
assert isinstance(js['plugins'], list)
|
assert isinstance(js['plugins'], list)
|
||||||
assert js['plugins'][0]['@type'] == 'sentimentPlugin'
|
assert js['plugins'][0]['@type'] == 'sentimentPlugin'
|
||||||
|
|
||||||
|
def test_parameters(self):
|
||||||
|
'''An Analysis should contain the algorithm and the list of parameters to be used'''
|
||||||
|
a = Analysis()
|
||||||
|
a.params = {'param1': 1, 'param2': 2}
|
||||||
|
assert len(a.parameters) == 2
|
||||||
|
for param in a.parameters:
|
||||||
|
if param.name == 'param1':
|
||||||
|
assert param.value == 1
|
||||||
|
elif param.name == 'param2':
|
||||||
|
assert param.value == 2
|
||||||
|
else:
|
||||||
|
raise Exception('Unknown value %s' % param)
|
||||||
|
|
||||||
def test_from_string(self):
|
def test_from_string(self):
|
||||||
results = {
|
results = {
|
||||||
'@type': 'results',
|
'@type': 'results',
|
||||||
|
@@ -1,15 +1,15 @@
|
|||||||
#!/bin/env python
|
#!/bin/env python
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
|
||||||
import pickle
|
import pickle
|
||||||
import shutil
|
import shutil
|
||||||
import tempfile
|
import tempfile
|
||||||
|
|
||||||
from unittest import TestCase
|
from unittest import TestCase, skipIf
|
||||||
from senpy.models import Results, Entry, EmotionSet, Emotion, Plugins
|
from senpy.models import Results, Entry, EmotionSet, Emotion, Plugins
|
||||||
from senpy import plugins
|
from senpy import plugins
|
||||||
from senpy.plugins.conversion.emotion.centroids import CentroidConversion
|
from senpy.plugins.postprocessing.emotion.centroids import CentroidConversion
|
||||||
|
from senpy.gsitk_compat import GSITK_AVAILABLE
|
||||||
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
@@ -346,13 +346,15 @@ class PluginsTest(TestCase):
|
|||||||
smart_metrics = results[0].metrics[0]
|
smart_metrics = results[0].metrics[0]
|
||||||
assert abs(smart_metrics['accuracy'] - 1) < 0.01
|
assert abs(smart_metrics['accuracy'] - 1) < 0.01
|
||||||
|
|
||||||
|
@skipIf(not GSITK_AVAILABLE, "GSITK is not available")
|
||||||
def test_evaluation(self):
|
def test_evaluation(self):
|
||||||
if sys.version_info < (3, 0):
|
self._test_evaluation()
|
||||||
|
|
||||||
|
@skipIf(GSITK_AVAILABLE, "GSITK is available")
|
||||||
|
def test_evaluation_unavailable(self):
|
||||||
with self.assertRaises(Exception) as context:
|
with self.assertRaises(Exception) as context:
|
||||||
self._test_evaluation()
|
self._test_evaluation()
|
||||||
self.assertTrue('GSITK ' in str(context.exception))
|
self.assertTrue('GSITK ' in str(context.exception))
|
||||||
else:
|
|
||||||
self._test_evaluation()
|
|
||||||
|
|
||||||
|
|
||||||
def make_mini_test(fpath):
|
def make_mini_test(fpath):
|
||||||
|
@@ -5,28 +5,29 @@ import json
|
|||||||
from senpy.testing import patch_requests
|
from senpy.testing import patch_requests
|
||||||
from senpy.models import Results
|
from senpy.models import Results
|
||||||
|
|
||||||
|
ENDPOINT = 'http://example.com'
|
||||||
|
|
||||||
|
|
||||||
class TestTest(TestCase):
|
class TestTest(TestCase):
|
||||||
def test_patch_text(self):
|
def test_patch_text(self):
|
||||||
with patch_requests('hello'):
|
with patch_requests(ENDPOINT, 'hello'):
|
||||||
r = requests.get('http://example.com')
|
r = requests.get(ENDPOINT)
|
||||||
assert r.text == 'hello'
|
assert r.text == 'hello'
|
||||||
assert r.content == 'hello'
|
|
||||||
|
|
||||||
def test_patch_json(self):
|
def test_patch_json(self):
|
||||||
r = Results()
|
r = Results()
|
||||||
with patch_requests(r):
|
with patch_requests(ENDPOINT, r):
|
||||||
res = requests.get('http://example.com')
|
res = requests.get(ENDPOINT)
|
||||||
assert res.content == json.dumps(r.jsonld())
|
assert res.text == json.dumps(r.jsonld())
|
||||||
js = res.json()
|
js = res.json()
|
||||||
assert js
|
assert js
|
||||||
assert js['@type'] == r['@type']
|
assert js['@type'] == r['@type']
|
||||||
|
|
||||||
def test_patch_dict(self):
|
def test_patch_dict(self):
|
||||||
r = {'nothing': 'new'}
|
r = {'nothing': 'new'}
|
||||||
with patch_requests(r):
|
with patch_requests(ENDPOINT, r):
|
||||||
res = requests.get('http://example.com')
|
res = requests.get(ENDPOINT)
|
||||||
assert res.content == json.dumps(r)
|
assert res.text == json.dumps(r)
|
||||||
js = res.json()
|
js = res.json()
|
||||||
assert js
|
assert js
|
||||||
assert js['nothing'] == 'new'
|
assert js['nothing'] == 'new'
|
||||||
|
Reference in New Issue
Block a user