diff --git a/.gitmodules b/.gitmodules index 6648a0a..1679e11 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,15 +1,9 @@ -[submodule "sentiment-meaningCloud"] - path = sentiment-meaningCloud - url = https://lab.cluster.gsi.dit.upm.es/senpy/sentiment-meaningCloud/ -[submodule "sentiment-vader"] - path = sentiment-vader - url = https://lab.cluster.gsi.dit.upm.es/senpy/sentiment-vader/ -[submodule "emotion-wnaffect"] - path = emotion-wnaffect - url = https://lab.cluster.gsi.dit.upm.es/senpy/emotion-wnaffect -[submodule "emotion-anew"] - path = emotion-anew - url = https://lab.cluster.gsi.dit.upm.es/senpy/emotion-anew -[submodule "sentiment-basic"] - path = sentiment-basic - url = https://lab.cluster.gsi.dit.upm.es/senpy/sentiment-basic +[submodule "emotion-anew/data"] + path = emotion-anew/data + url = https://lab.cluster.gsi.dit.upm.es/senpy/data/emotion-anew.git +[submodule "emotion-wnaffect/data"] + path = emotion-wnaffect/data + url = https://lab.cluster.gsi.dit.upm.es/senpy/data/emotion-wnaffect.git +[submodule "sentiment-basic/data"] + path = sentiment-basic/data + url = https://lab.cluster.gsi.dit.upm.es/senpy/data/sentiment-basic.git diff --git a/Dockerfile b/Dockerfile index 4c80456..f272ce2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,9 +1,10 @@ -from gsiupm/senpy:0.8.7-python2.7 +from gsiupm/senpy:0.10.5-python2.7 RUN python -m nltk.downloader stopwords RUN python -m nltk.downloader punkt RUN python -m nltk.downloader maxent_treebank_pos_tagger RUN python -m nltk.downloader wordnet +RUN python -m nltk.downloader omw ADD . /senpy-plugins diff --git a/Makefile b/Makefile index 9c33135..852f12f 100644 --- a/Makefile +++ b/Makefile @@ -3,6 +3,11 @@ NAME=senpycommunity REPO=gsiupm VERSION=test PLUGINS= $(filter %/, $(wildcard */)) +DOCKER_FLAGS= + +ifdef SENPY_FOLDER + DOCKER_FLAGS+=-v $(realpath $(SENPY_FOLDER)):/usr/src/app/ + endif all: build run @@ -11,15 +16,15 @@ build: clean Dockerfile docker build -t '$(REPO)/$(NAME):$(VERSION)-python$(PYVERSION)' -f Dockerfile .; test-%: - docker run -v $$PWD/$*:/senpy-plugins/ --rm --entrypoint=/usr/local/bin/py.test -ti '$(REPO)/$(NAME):$(VERSION)-python$(PYVERSION)' test.py + docker run $(DOCKER_FLAGS) -v $$PWD/$*:/senpy-plugins/ --rm -ti '$(REPO)/$(NAME):$(VERSION)-python$(PYVERSION)' --only-test $(TEST_FLAGS) -test: $(addprefix test-,$(PLUGINS)) +test: test-. clean: @docker ps -a | awk '/$(REPO)\/$(NAME)/{ split($$2, vers, "-"); if(vers[1] != "${VERSION}"){ print $$1;}}' | xargs docker rm 2>/dev/null|| true @docker images | awk '/$(REPO)\/$(NAME)/{ split($$2, vers, "-"); if(vers[1] != "${VERSION}"){ print $$1":"$$2;}}' | xargs docker rmi 2>/dev/null|| true run: build - docker run --rm -p 5000:5000 -ti '$(REPO)/$(NAME):$(VERSION)-python$(PYMAIN)' + docker run $(DOCKER_FLAGS) --rm -p 5000:5000 -ti '$(REPO)/$(NAME):$(VERSION)-python$(PYMAIN)' .PHONY: test test-% build-% build test test_pip run clean diff --git a/emotion-anew/.gitmodules b/emotion-anew/.gitmodules deleted file mode 100644 index e7aadbd..0000000 --- a/emotion-anew/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "data"] - path = data - url = ../data/emotion-anew diff --git a/emotion-anew/emotion-anew.py b/emotion-anew/emotion-anew.py index 1ff42f4..a91e8fa 100644 --- a/emotion-anew/emotion-anew.py +++ b/emotion-anew/emotion-anew.py @@ -23,12 +23,81 @@ from senpy.plugins import SentimentPlugin, SenpyPlugin from senpy.models import Results, EmotionSet, Entry, Emotion -class EmotionTextPlugin(SentimentPlugin): +class ANEW(SentimentPlugin): + description = "This plugin consists on an emotion classifier using ANEW lexicon dictionary to calculate VAD (valence-arousal-dominance) of the sentence and determinate which emotion is closer to this value. Each emotion has a centroid, calculated according to this article: http://www.aclweb.org/anthology/W10-0208. The plugin is going to look for the words in the sentence that appear in the ANEW dictionary and calculate the average VAD score for the sentence. Once this score is calculated, it is going to seek the emotion that is closest to this value." + author = "@icorcuera" + version = "0.5.1" + name = "emotion-anew" + + extra_params = { + "language": { + "aliases": ["language", "l"], + "required": True, + "options": ["es","en"], + "default": "en" + } + } + + anew_path_es = "Dictionary/Redondo(2007).csv" + anew_path_en = "Dictionary/ANEW2010All.txt" + centroids = { + "anger": { + "A": 6.95, + "D": 5.1, + "V": 2.7 + }, + "disgust": { + "A": 5.3, + "D": 8.05, + "V": 2.7 + }, + "fear": { + "A": 6.5, + "D": 3.6, + "V": 3.2 + }, + "joy": { + "A": 7.22, + "D": 6.28, + "V": 8.6 + }, + "sadness": { + "A": 5.21, + "D": 2.82, + "V": 2.21 + } + } + emotions_ontology = { + "anger": "http://gsi.dit.upm.es/ontologies/wnaffect/ns#anger", + "disgust": "http://gsi.dit.upm.es/ontologies/wnaffect/ns#disgust", + "fear": "http://gsi.dit.upm.es/ontologies/wnaffect/ns#negative-fear", + "joy": "http://gsi.dit.upm.es/ontologies/wnaffect/ns#joy", + "neutral": "http://gsi.dit.upm.es/ontologies/wnaffect/ns#neutral-emotion", + "sadness": "http://gsi.dit.upm.es/ontologies/wnaffect/ns#sadness" + } + onyx__usesEmotionModel = "emoml:big6" + nltk_resources = ['stopwords'] def activate(self, *args, **kwargs): - nltk.download('stopwords') self._stopwords = stopwords.words('english') - self._local_path=os.path.dirname(os.path.abspath(__file__)) + dictionary={} + dictionary['es'] = {} + with self.open(self.anew_path_es,'rb') as tabfile: + reader = csv.reader(tabfile, delimiter='\t') + for row in reader: + dictionary['es'][row[2]]={} + dictionary['es'][row[2]]['V']=row[3] + dictionary['es'][row[2]]['A']=row[5] + dictionary['es'][row[2]]['D']=row[7] + dictionary['en'] = {} + with self.open(self.anew_path_en,'rb') as tabfile: + reader = csv.reader(tabfile, delimiter='\t') + for row in reader: + dictionary['en'][row[0]]={} + dictionary['en'][row[0]]['V']=row[2] + dictionary['en'][row[0]]['A']=row[4] + dictionary['en'][row[0]]['D']=row[6] + self._dictionary = dictionary def _my_preprocessor(self, text): @@ -63,8 +132,8 @@ class EmotionTextPlugin(SentimentPlugin): for token in sentence: if token[0].lower() not in self._stopwords: unigrams_words.append(token[0].lower()) - unigrams_lemmas.append(token[4]) - pos_tagged.append(token[1]) + unigrams_lemmas.append(token[4]) + pos_tagged.append(token[1]) return unigrams_lemmas,unigrams_words,pos_tagged @@ -83,7 +152,7 @@ class EmotionTextPlugin(SentimentPlugin): value=new_value emotion=state return emotion - + def _extract_features(self, tweet,dictionary,lang): feature_set={} ngrams_lemmas,ngrams_words,pos_tagged = self._extract_ngrams(tweet,lang) @@ -116,41 +185,76 @@ class EmotionTextPlugin(SentimentPlugin): def analyse_entry(self, entry, params): - text_input = entry.get("text", None) + text_input = entry.text - text= self._my_preprocessor(text_input) - dictionary={} - lang = params.get("language", "auto") - if lang == 'es': - with open(self._local_path + self.anew_path_es,'rb') as tabfile: - reader = csv.reader(tabfile, delimiter='\t') - for row in reader: - dictionary[row[2]]={} - dictionary[row[2]]['V']=row[3] - dictionary[row[2]]['A']=row[5] - dictionary[row[2]]['D']=row[7] - else: - with open(self._local_path + self.anew_path_en,'rb') as tabfile: - reader = csv.reader(tabfile, delimiter='\t') - for row in reader: - dictionary[row[0]]={} - dictionary[row[0]]['V']=row[2] - dictionary[row[0]]['A']=row[4] - dictionary[row[0]]['D']=row[6] + text = self._my_preprocessor(text_input) + dictionary = self._dictionary[params['language']] - feature_set=self._extract_features(text,dictionary,lang) + feature_set=self._extract_features(text, dictionary, params['language']) emotions = EmotionSet() emotions.id = "Emotions0" emotion1 = Emotion(id="Emotion0") - emotion1["onyx:hasEmotionCategory"] = self.emotions_ontology[feature_set['emotion']] emotion1["http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#valence"] = feature_set['V'] emotion1["http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#arousal"] = feature_set['A'] emotion1["http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#dominance"] = feature_set['D'] + emotion1.prov(self) + emotions.prov(self) + emotions.onyx__hasEmotion.append(emotion1) - entry.emotions = [emotions,] + entry.emotions = [emotions, ] yield entry + + ontology = "http://gsi.dit.upm.es/ontologies/wnaffect/ns#" + test_cases = [ + { + 'input': 'I hate you', + 'expected': { + 'emotions': [{ + 'onyx:hasEmotion': [{ + 'onyx:hasEmotionCategory': ontology + 'anger', + }] + }] + } + }, { + 'input': 'i am sad', + 'expected': { + 'emotions': [{ + 'onyx:hasEmotion': [{ + 'onyx:hasEmotionCategory': ontology + 'sadness', + }] + }] + } + }, { + 'input': 'i am happy with my marks', + 'expected': { + 'emotions': [{ + 'onyx:hasEmotion': [{ + 'onyx:hasEmotionCategory': ontology + 'joy', + }] + }] + } + }, { + 'input': 'This movie is scary', + 'expected': { + 'emotions': [{ + 'onyx:hasEmotion': [{ + 'onyx:hasEmotionCategory': ontology + 'negative-fear', + }] + }] + } + }, { + 'input': 'this cake is disgusting' , + 'expected': { + 'emotions': [{ + 'onyx:hasEmotion': [{ + 'onyx:hasEmotionCategory': ontology + 'negative-fear', + }] + }] + } + } + ] diff --git a/emotion-anew/emotion-anew.pyc b/emotion-anew/emotion-anew.pyc index a3e0427..c5cbdf5 100644 Binary files a/emotion-anew/emotion-anew.pyc and b/emotion-anew/emotion-anew.pyc differ diff --git a/emotion-anew/emotion-anew.senpy b/emotion-anew/emotion-anew.senpy index 5bce13e..3bfde1b 100644 --- a/emotion-anew/emotion-anew.senpy +++ b/emotion-anew/emotion-anew.senpy @@ -1,64 +1,12 @@ -{ - "name": "emotion-anew", - "module": "emotion-anew", - "description": "This plugin consists on an emotion classifier using ANEW lexicon dictionary to calculate VAD (valence-arousal-dominance) of the sentence and determinate which emotion is closer to this value. Each emotion has a centroid, calculated according to this article: http://www.aclweb.org/anthology/W10-0208. The plugin is going to look for the words in the sentence that appear in the ANEW dictionary and calculate the average VAD score for the sentence. Once this score is calculated, it is going to seek the emotion that is closest to this value.", - "author": "@icorcuera", - "version": "0.5", - "extra_params": { - "language": { - "aliases": ["language", "l"], - "required": true, - "options": ["es","en"], - "default": "en" - } - }, - "requirements": {}, - "anew_path_es": "/data/Dictionary/Redondo(2007).csv", - "anew_path_en": "/data/Dictionary/ANEW2010All.txt", - "centroids": { - "anger": { - "A": 6.95, - "D": 5.1, - "V": 2.7 - }, - "disgust": { - "A": 5.3, - "D": 8.05, - "V": 2.7 - }, - "fear": { - "A": 6.5, - "D": 3.6, - "V": 3.2 - }, - "joy": { - "A": 7.22, - "D": 6.28, - "V": 8.6 - }, - "sadness": { - "A": 5.21, - "D": 2.82, - "V": 2.21 - } - }, - "emotions_ontology": { - "anger": "http://gsi.dit.upm.es/ontologies/wnaffect/ns#anger", - "disgust": "http://gsi.dit.upm.es/ontologies/wnaffect/ns#disgust", - "fear": "http://gsi.dit.upm.es/ontologies/wnaffect/ns#negative-fear", - "joy": "http://gsi.dit.upm.es/ontologies/wnaffect/ns#joy", - "neutral": "http://gsi.dit.upm.es/ontologies/wnaffect/ns#neutral-emotion", - "sadness": "http://gsi.dit.upm.es/ontologies/wnaffect/ns#sadness" - }, - "requirements": [ - "numpy", - "pandas", - "nltk", - "scipy", - "scikit-learn", - "textblob", - "pattern", - "lxml" - ], - "onyx:usesEmotionModel": "emoml:big6", -} +--- +module: emotion-anew +requirements: +- numpy +- pandas +- nltk +- scipy +- scikit-learn +- textblob +- pattern +- lxml +onyx:usesEmotionModel: "emoml:big6" diff --git a/emotion-anew/test.py b/emotion-anew/test.py deleted file mode 100644 index cdfdf9d..0000000 --- a/emotion-anew/test.py +++ /dev/null @@ -1,45 +0,0 @@ -import os -import logging -logging.basicConfig() -try: - import unittest.mock as mock -except ImportError: - import mock -from senpy.extensions import Senpy -from flask import Flask -import unittest -import re - -class emoTextANEWTest(unittest.TestCase): - - def setUp(self): - self.app = Flask("test_plugin") - self.dir = os.path.join(os.path.dirname(__file__)) - self.senpy = Senpy(plugin_folder=self.dir, default_plugins=False) - self.senpy.init_app(self.app) - - def tearDown(self): - self.senpy.deactivate_plugin("EmoTextANEW", sync=True) - - def test_analyse(self): - plugin = self.senpy.plugins["EmoTextANEW"] - plugin.activate() - - ontology = "http://gsi.dit.upm.es/ontologies/wnaffect/ns#" - texts = {'I hate you': 'anger', - 'i am sad': 'sadness', - 'i am happy with my marks': 'joy', - 'This movie is scary': 'negative-fear', - 'this cake is disgusting' : 'negative-fear'} - - for text in texts: - response = plugin.analyse(input=text) - expected = texts[text] - emotionSet = response.entries[0].emotions[0] - - assert emotionSet['onyx:hasEmotion'][0]['onyx:hasEmotionCategory'] == ontology+expected - - plugin.deactivate() - -if __name__ == '__main__': - unittest.main() diff --git a/emotion-wnaffect/.gitmodules b/emotion-wnaffect/.gitmodules deleted file mode 100644 index 309bd76..0000000 --- a/emotion-wnaffect/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "data"] - path = data - url = ../data/emotion-wnaffect diff --git a/emotion-wnaffect/emotion-wnaffect.py b/emotion-wnaffect/emotion-wnaffect.py index 638640e..0a59b7b 100644 --- a/emotion-wnaffect/emotion-wnaffect.py +++ b/emotion-wnaffect/emotion-wnaffect.py @@ -2,7 +2,6 @@ from __future__ import division import re import nltk -import logging import os import string import xml.etree.ElementTree as ET @@ -14,11 +13,29 @@ from senpy.plugins import EmotionPlugin, AnalysisPlugin, ShelfMixin from senpy.models import Results, EmotionSet, Entry, Emotion -class EmotionTextPlugin(EmotionPlugin, ShelfMixin): - '''Emotion classifier using WordNet-Affect to calculate the percentage - of each emotion. This plugin classifies among 6 emotions: anger,fear,disgust,joy,sadness - or neutral. The only available language is English (en) - ''' +class WNAffect(EmotionPlugin, ShelfMixin): + ''' + Emotion classifier using WordNet-Affect to calculate the percentage + of each emotion. This plugin classifies among 6 emotions: anger,fear,disgust,joy,sadness + or neutral. The only available language is English (en) + ''' + name = 'emotion-wnaffect' + author = ["@icorcuera", "@balkian"] + version = '0.2' + extra_params = { + 'language': { + "@id": 'lang_wnaffect', + 'aliases': ['language', 'l'], + 'required': True, + 'options': ['en',] + } + } + synsets_path = "a-synsets.xml" + hierarchy_path = "a-hierarchy.xml" + wn16_path = "wordnet1.6/dict" + onyx__usesEmotionModel = "emoml:big6" + nltk_resources = ['stopwords', 'averaged_perceptron_tagger', 'wordnet'] + def _load_synsets(self, synsets_path): """Returns a dictionary POS tag -> synset offset -> emotion (str -> int -> str).""" tree = ET.parse(synsets_path) @@ -56,7 +73,6 @@ class EmotionTextPlugin(EmotionPlugin, ShelfMixin): def activate(self, *args, **kwargs): - nltk.download(['stopwords', 'averaged_perceptron_tagger', 'wordnet']) self._stopwords = stopwords.words('english') self._wnlemma = wordnet.WordNetLemmatizer() self._syntactics = {'N': 'n', 'V': 'v', 'J': 'a', 'S': 's', 'R': 'r'} @@ -87,16 +103,16 @@ class EmotionTextPlugin(EmotionPlugin, ShelfMixin): 'sadness': 'sadness' } - self._load_emotions(local_path + self.hierarchy_path) + self._load_emotions(self.find_file(self.hierarchy_path)) if 'total_synsets' not in self.sh: - total_synsets = self._load_synsets(local_path + self.synsets_path) + total_synsets = self._load_synsets(self.find_file(self.synsets_path)) self.sh['total_synsets'] = total_synsets self._total_synsets = self.sh['total_synsets'] self._wn16_path = self.wn16_path - self._wn16 = WordNetCorpusReader(os.path.abspath("{0}".format(local_path + self._wn16_path)), nltk.data.find(local_path + self._wn16_path)) + self._wn16 = WordNetCorpusReader(self.find_file(self._wn16_path), nltk.data.find(self.find_file(self._wn16_path))) def deactivate(self, *args, **kwargs): diff --git a/emotion-wnaffect/emotion-wnaffect.senpy b/emotion-wnaffect/emotion-wnaffect.senpy index e769205..54eea36 100644 --- a/emotion-wnaffect/emotion-wnaffect.senpy +++ b/emotion-wnaffect/emotion-wnaffect.senpy @@ -1,24 +1,5 @@ --- -name: emotion-wnaffect module: emotion-wnaffect -description: 'Emotion classifier using WordNet-Affect to calculate the percentage - of each emotion. This plugin classifies among 6 emotions: anger,fear,disgust,joy,sadness - or neutral. The only available language is English (en)' -author: "@icorcuera @balkian" -version: '0.2' -extra_params: - language: - "@id": lang_wnaffect - aliases: - - language - - l - required: false - options: - - en -synsets_path: "/a-synsets.xml" -hierarchy_path: "/a-hierarchy.xml" -wn16_path: "/wordnet1.6/dict" -onyx:usesEmotionModel: emoml:big6 requirements: - nltk>=3.0.5 - lxml>=3.4.2 diff --git a/emotion-wnaffect/wnaffect.py b/emotion-wnaffect/wnaffect.py index 29cf64d..e6e726c 100644 --- a/emotion-wnaffect/wnaffect.py +++ b/emotion-wnaffect/wnaffect.py @@ -17,7 +17,9 @@ from nltk.corpus import WordNetCorpusReader import xml.etree.ElementTree as ET class WNAffect: - """WordNet-Affect ressource.""" + """WordNet-Affect resource.""" + + nltk_resources = ['averaged_perceptron_tagger'] def __init__(self, wordnet16_dir, wn_domains_dir): """Initializes the WordNet-Affect object.""" diff --git a/example-plugin/example.py b/example-plugin/example.py deleted file mode 100644 index 4e5f293..0000000 --- a/example-plugin/example.py +++ /dev/null @@ -1,19 +0,0 @@ -from senpy.plugins import SentimentPlugin -from senpy.models import Response, Entry - -import logging - -logger = logging.getLogger(__name__) - -class ExamplePlugin(SentimentPlugin): - - def analyse(self, *args, **kwargs): - logger.warn('Analysing with the example.') - logger.warn('The answer to this response is: %s.' % kwargs['parameter']) - resp = Response() - ent = Entry(kwargs['input']) - ent['example:reversed'] = kwargs['input'][::-1] - ent['example:the_answer'] = kwargs['parameter'] - resp.entries.append(ent) - - return resp diff --git a/example-plugin/example.senpy b/example-plugin/example.senpy deleted file mode 100644 index 6c667f4..0000000 --- a/example-plugin/example.senpy +++ /dev/null @@ -1,17 +0,0 @@ -{ - "name": "ExamplePlugin", - "module": "example", - "description": "I am just an example", - "author": "@balkian", - "version": "0.1", - "extra_params": { - "parameter": { - "@id": "parameter", - "aliases": ["parameter", "param"], - "required": true, - "default": 42 - } - }, - "requirements": ["noop"], - "custom_attribute": "42" -} diff --git a/example-plugin/example_plugin.py b/example-plugin/example_plugin.py new file mode 100644 index 0000000..3ce1233 --- /dev/null +++ b/example-plugin/example_plugin.py @@ -0,0 +1,37 @@ +from senpy.plugins import Analysis +from senpy.models import Response, Entry + +import logging + +logger = logging.getLogger(__name__) + +class ExamplePlugin(Analysis): + '''A *VERY* simple plugin that exemplifies the development of Senpy Plugins''' + name = "example-plugin" + author = "@balkian" + version = "0.1" + extra_params = { + "parameter": { + "@id": "parameter", + "aliases": ["parameter", "param"], + "required": True, + "default": 42 + } + } + custom_attribute = "42" + + def analyse_entry(self, entry, params): + logger.debug('Analysing with the example.') + logger.debug('The answer to this response is: %s.' % params['parameter']) + resp = Response() + entry['example:reversed'] = entry.text[::-1] + entry['example:the_answer'] = params['parameter'] + + yield entry + + test_cases = [{ + 'input': 'hello', + 'expected': { + 'example:reversed': 'olleh' + } + }] diff --git a/example-plugin/test_example.py b/example-plugin/test_example.py deleted file mode 100644 index f6dafc5..0000000 --- a/example-plugin/test_example.py +++ /dev/null @@ -1,23 +0,0 @@ -import unittest -from flask import Flask -import os - -from senpy.extensions import Senpy - -class emoTextWAFTest(unittest.TestCase): - - def setUp(self): - self.app = Flask("Example") - self.dir = os.path.join(os.path.dirname(__file__)) - self.senpy = Senpy(plugin_folder=self.dir, default_plugins=False) - self.senpy.init_app(self.app) - - def tearDown(self): - self.senpy.deactivate_plugin("ExamplePlugin", sync=True) - - def test_analyse(self): - assert len(self.senpy.plugins.keys()) == 1 - assert True - -if __name__ == '__main__': - unittest.main() diff --git a/sentiment-basic/.gitmodules b/sentiment-basic/.gitmodules deleted file mode 100644 index 3f87a17..0000000 --- a/sentiment-basic/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "data"] - path = data - url = ../data/sentiment-basic diff --git a/sentiment-basic/sentiment-basic.py b/sentiment-basic/sentiment-basic.py index 9dbe4a9..c62e4f4 100644 --- a/sentiment-basic/sentiment-basic.py +++ b/sentiment-basic/sentiment-basic.py @@ -1,5 +1,6 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- import os -import logging import string import nltk import pickle @@ -13,24 +14,40 @@ from os import path from senpy.plugins import SentimentPlugin, SenpyPlugin from senpy.models import Results, Entry, Sentiment -logger = logging.getLogger(__name__) - -class SentiTextPlugin(SentimentPlugin): +class SentimentBasic(SentimentPlugin): + ''' + Sentiment classifier using rule-based classification for Spanish. Based on english to spanish translation and SentiWordNet sentiment knowledge. This is a demo plugin that uses only some features from the TASS 2015 classifier. To use the entirely functional classifier you can use the service in: http://senpy.cluster.gsi.dit.upm.es. + ''' + name = "sentiment-basic" + author = "github.com/nachtkatze" + version = "0.1.1" + extra_params = { + "language": { + "aliases": ["language", "l"], + "required": True, + "options": ["en","es", "it", "fr", "auto"], + "default": "auto" + } + } + sentiword_path = "SentiWordNet_3.0.txt" + pos_path = "unigram_spanish.pickle" + maxPolarityValue = 1 + minPolarityValue = -1 + nltk_resources = ['punkt','wordnet'] def _load_swn(self): - self.swn_path = path.join(path.abspath(path.dirname(__file__)), self.sentiword_path) + self.swn_path = self.find_file(self.sentiword_path) swn = SentiWordNet(self.swn_path) return swn def _load_pos_tagger(self): - self.pos_path = path.join(path.abspath(path.dirname(__file__)), self.pos_path) + self.pos_path = self.find_file(self.pos_path) with open(self.pos_path, 'r') as f: tagger = pickle.load(f) return tagger def activate(self, *args, **kwargs): - nltk.download(['punkt','wordnet']) self._swn = self._load_swn() self._pos_tagger = self._load_pos_tagger() @@ -54,11 +71,6 @@ class SentiTextPlugin(SentimentPlugin): tokens[i]['tokens'] = self._pos_tagger.tag(tokens[i]['tokens']) return tokens - # def _stopwords(sentences, lang='english'): - # for i in sentences: - # sentences[i]['tokens'] = [t for t in sentences[i]['tokens'] if t not in nltk.corpus.stopwords.words(lang)] - # return sentences - def _compare_synsets(self, synsets, tokens, i): for synset in synsets: for word in tokens[i]['lemmas']: @@ -71,7 +83,7 @@ class SentiTextPlugin(SentimentPlugin): def analyse_entry(self, entry, params): language = params.get("language") - text = entry.get("text", None) + text = entry.text tokens = self._tokenize(text) tokens = self._pos(tokens) sufixes = {'es':'spa','en':'eng','it':'ita','fr':'fra'} @@ -130,19 +142,41 @@ class SentiTextPlugin(SentimentPlugin): except: if n_pos == 0 and n_neg == 0: g_score = 0.5 - polarity = 'marl:Neutral' - polarity_value = 0 - if g_score > 0.5: + if g_score >= 0.5: polarity = 'marl:Positive' polarity_value = 1 elif g_score < 0.5: polarity = 'marl:Negative' polarity_value = -1 + else: + polarity = 'marl:Neutral' + polarity_value = 0 opinion = Sentiment(id="Opinion0"+'_'+str(i), marl__hasPolarity=polarity, marl__polarityValue=polarity_value) - + opinion.prov(self) entry.sentiments.append(opinion) yield entry + + test_cases = [ + { + 'input': u'Odio ir al cine', + 'params': {'language': 'es'}, + 'polarity': 'marl:Negative' + + }, + { + 'input': u'El cielo está nublado', + 'params': {'language': 'es'}, + 'polarity': 'marl:Positive' + + }, + { + 'input': u'Esta tarta está muy buena', + 'params': {'language': 'es'}, + 'polarity': 'marl:Negative' + + } + ] diff --git a/sentiment-basic/sentiment-basic.senpy b/sentiment-basic/sentiment-basic.senpy index 13b2429..fee2f7c 100644 --- a/sentiment-basic/sentiment-basic.senpy +++ b/sentiment-basic/sentiment-basic.senpy @@ -1,24 +1,7 @@ -{ - "name": "sentiment-basic", - "module": "sentiment-basic", - "description": "Sentiment classifier using rule-based classification for Spanish. Based on english to spanish translation and SentiWordNet sentiment knowledge. This is a demo plugin that uses only some features from the TASS 2015 classifier. To use the entirely functional classifier you can use the service in: http://senpy.cluster.gsi.dit.upm.es.", - "author": "github.com/nachtkatze", - "version": "0.1", - "requirements": [ - "nltk>=3.0.5", - "scipy>=0.14.0", - "textblob" - ], - "extra_params": { - "language": { - "aliases": ["language", "l"], - "required": true, - "options": ["en","es", "it", "fr", "auto"], - "default": "auto" - }, - }, - "sentiword_path": "data/SentiWordNet_3.0.txt", - "pos_path": "data/unigram_spanish.pickle", - "maxPolarityValue": "1", - "minPolarityValue": "-1" -} +--- +module: sentiment-basic +requirements: +- nltk>=3.0.5 +- scipy>=0.14.0 +- textblob + diff --git a/sentiment-basic/sentiwn.py b/sentiment-basic/sentiwn.py index 42631e5..0c08c31 100644 --- a/sentiment-basic/sentiwn.py +++ b/sentiment-basic/sentiwn.py @@ -46,7 +46,7 @@ class SentiWordNet(object): pos,syn_set_id,pos_score,neg_score,syn_set_score,\ gloss = fields except: - print "Found data without all details" + print("Found data without all details") pass if pos and syn_set_score: @@ -67,4 +67,4 @@ class SentiWordNet(object): senti_scores.append({"pos":pos_val,"neg":neg_val,\ "obj": 1.0 - (pos_val - neg_val),'synset':synset}) - return senti_scores \ No newline at end of file + return senti_scores diff --git a/sentiment-basic/test.py b/sentiment-basic/test.py deleted file mode 100644 index 1bb0c75..0000000 --- a/sentiment-basic/test.py +++ /dev/null @@ -1,42 +0,0 @@ -import os -import logging -logging.basicConfig() -try: - import unittest.mock as mock -except ImportError: - import mock -from senpy.extensions import Senpy -from flask import Flask -import unittest - -class SentiTextTest(unittest.TestCase): - - def setUp(self): - self.app = Flask("test_plugin") - self.dir = os.path.join(os.path.dirname(__file__)) - self.senpy = Senpy(plugin_folder=self.dir, default_plugins=False) - self.senpy.init_app(self.app) - - def tearDown(self): - self.senpy.deactivate_plugin("SentiText", sync=True) - - def test_analyse(self): - plugin = self.senpy.plugins["SentiText"] - plugin.activate() - - texts = {'Odio ir al cine' : 'marl:Neutral', - 'El cielo esta nublado' : 'marl:Positive', - 'Esta tarta esta muy buena' : 'marl:Neutral'} - - for text in texts: - response = plugin.analyse(input=text) - sentimentSet = response.entries[0].sentiments[0] - print sentimentSet - expected = texts[text] - - assert sentimentSet['marl:hasPolarity'] == expected - - plugin.deactivate() - -if __name__ == '__main__': - unittest.main() \ No newline at end of file diff --git a/sentiment-meaningCloud/sentiment_meaningcloud.py b/sentiment-meaningCloud/meaningcloud_plugin.py similarity index 61% rename from sentiment-meaningCloud/sentiment_meaningcloud.py rename to sentiment-meaningCloud/meaningcloud_plugin.py index 7f27fc9..a6a84c5 100644 --- a/sentiment-meaningCloud/sentiment_meaningcloud.py +++ b/sentiment-meaningCloud/meaningcloud_plugin.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- import time -import logging import requests import json import string @@ -9,20 +8,33 @@ import os from os import path import time from senpy.plugins import SentimentPlugin -from senpy.models import Results, Entry, Sentiment, Error +from senpy.models import Results, Entry, Entity, Topic, Sentiment, Error +from senpy.utils import check_template -import mocked_request +from mocked_request import mocked_requests_post try: from unittest import mock except ImportError: import mock -logger = logging.getLogger(__name__) - class MeaningCloudPlugin(SentimentPlugin): - version = "0.1" + ''' + Sentiment analysis with meaningCloud service. + To use this plugin, you need to obtain an API key from meaningCloud signing up here: + https://www.meaningcloud.com/developer/login + + When you had obtained the meaningCloud API Key, you have to provide it to the plugin, using param apiKey. + Example request: + + http://senpy.cluster.gsi.dit.upm.es/api/?algo=meaningCloud&language=en&apiKey=&input=I%20love%20Madrid. + ''' + name = 'sentiment-meaningcloud' + author = 'GSI UPM' + version = "1.1" + maxPolarityValue = 1 + minPolarityValue = -1 extra_params = { "language": { @@ -37,7 +49,6 @@ class MeaningCloudPlugin(SentimentPlugin): } } - """MeaningCloud plugin uses API from Meaning Cloud to perform sentiment analysis.""" def _polarity(self, value): if 'NONE' in value: @@ -81,7 +92,7 @@ class MeaningCloudPlugin(SentimentPlugin): if not api_response.get('score_tag'): raise Error(r.json()) entry['language_detected'] = lang - logger.info(api_response) + self.log.debug(api_response) agg_polarity, agg_polarityValue = self._polarity( api_response.get('score_tag', None)) agg_opinion = Sentiment( @@ -89,13 +100,14 @@ class MeaningCloudPlugin(SentimentPlugin): marl__hasPolarity=agg_polarity, marl__polarityValue=agg_polarityValue, marl__opinionCount=len(api_response['sentence_list'])) + agg_opinion.prov(self) entry.sentiments.append(agg_opinion) - logger.info(api_response['sentence_list']) + self.log.debug(api_response['sentence_list']) count = 1 for sentence in api_response['sentence_list']: for nopinion in sentence['segment_list']: - logger.info(nopinion) + self.log.debug(nopinion) polarity, polarityValue = self._polarity( nopinion.get('score_tag', None)) opinion = Sentiment( @@ -107,64 +119,63 @@ class MeaningCloudPlugin(SentimentPlugin): nif__beginIndex=nopinion.get('inip', None), nif__endIndex=nopinion.get('endp', None)) count += 1 + opinion.prov(self) entry.sentiments.append(opinion) mapper = {'es': 'es.', 'en': '', 'ca': 'es.', 'it':'it.', 'fr':'fr.', 'pt':'pt.'} for sent_entity in api_response_topics['entity_list']: - resource = "_".join(sent_entity.get('form', None).split()) - entity = Sentiment( + entity = Entity( id="Entity{}".format(sent_entity.get('id')), - marl__describesObject="http://{}dbpedia.org/resource/{}".format( + itsrdf__taIdentRef="http://{}dbpedia.org/resource/{}".format( mapper[lang], resource), nif__anchorOf=sent_entity.get('form', None), nif__beginIndex=sent_entity['variant_list'][0].get('inip', None), nif__endIndex=sent_entity['variant_list'][0].get('endp', None)) - entity[ - '@type'] = "ODENTITY_{}".format( - sent_entity['sementity'].get('type', None).split(">")[-1]) + sementity = sent_entity['sementity'].get('type', None).split(">")[-1] + entity['@type'] = "ODENTITY_{}".format(sementity) + entity.prov(self) entry.entities.append(entity) for topic in api_response_topics['concept_list']: if 'semtheme_list' in topic: for theme in topic['semtheme_list']: - concept = Sentiment( - id="Topic{}".format(topic.get('id')), - prov__wasDerivedFrom="http://dbpedia.org/resource/{}". - format(theme['type'].split('>')[-1])) - concept[ - '@type'] = "ODTHEME_{}".format( - theme['type'].split(">")[-1]) + concept = Topic() + concept.id = "Topic{}".format(topic.get('id')) + concept['@type'] = "ODTHEME_{}".format(theme['type'].split(">")[-1]) + concept['fam:topic-reference'] = "http://dbpedia.org/resource/{}".format(theme['type'].split('>')[-1]) + entry.prov(self) entry.topics.append(concept) yield entry - @mock.patch('requests.post', side_effect=mocked_request.mocked_requests_post) + @mock.patch('requests.post', side_effect=mocked_requests_post) def test(self, *args, **kwargs): results = list() - params = {'algo': 'sentiment-meaningCloud', - 'intype': 'direct', - 'expanded-jsonld': 0, - 'informat': 'text', - 'prefix': '', - 'plugin_type': 'analysisPlugin', - 'urischeme': 'RFC5147String', - 'outformat': 'json-ld', - 'i': 'Hello World', - 'input': 'Hello World', - 'conversion': 'full', + params = {'algo': 'sentiment-meaningCloud', + 'intype': 'direct', + 'expanded-jsonld': 0, + 'informat': 'text', + 'prefix': '', + 'plugin_type': 'analysisPlugin', + 'urischeme': 'RFC5147String', + 'outformat': 'json-ld', + 'i': 'Hello World', + 'input': 'Hello World', + 'conversion': 'full', 'language': 'en', - 'apikey': '00000', + 'apikey': '00000', 'algorithm': 'sentiment-meaningCloud'} - for i in range(100): - res = next(self.analyse_entry(Entry(nif__isString="Hello World Obama"), params)) - results.append(res.sentiments[0]['marl:hasPolarity']) - results.append(res.topics[0]['prov:wasDerivedFrom']) - results.append(res.entities[0]['prov:wasDerivedFrom']) + res = next(self.analyse_entry(Entry(nif__isString="Hello World Obama"), params)) - assert 'marl:Neutral' in results - assert 'http://dbpedia.org/resource/Astronomy' in results - assert 'http://dbpedia.org/resource/Obama' in results + check_template(res, + {'sentiments': [ + {'marl:hasPolarity': 'marl:Neutral'}], + 'entities': [ + {'itsrdf:taIdentRef': 'http://dbpedia.org/resource/Obama'}], + 'topics': [ + {'fam:topic-reference': 'http://dbpedia.org/resource/Astronomy'}] + }) if __name__ == '__main__': from senpy import easy_test diff --git a/sentiment-meaningCloud/mocked_request.py b/sentiment-meaningCloud/mocked_request.py index 113ef2e..ae5abce 100644 --- a/sentiment-meaningCloud/mocked_request.py +++ b/sentiment-meaningCloud/mocked_request.py @@ -7,7 +7,6 @@ def mocked_requests_post(*args, **kwargs): def json(self): return self.json_data - print("Mocking request") if args[0] == 'http://api.meaningcloud.com/sentiment-2.1': return MockResponse({ 'model': 'general_en', diff --git a/sentiment-meaningCloud/sentiment_meaningcloud.senpy b/sentiment-meaningCloud/sentiment_meaningcloud.senpy deleted file mode 100644 index eaa417b..0000000 --- a/sentiment-meaningCloud/sentiment_meaningcloud.senpy +++ /dev/null @@ -1,11 +0,0 @@ -{ - "name": "sentiment-meaningcloud", - "module": "sentiment_meaningcloud", - "description": "Sentiment analysis with meaningCloud service. To use this plugin, you need to obtain an API key from meaningCloud signing up here: https://www.meaningcloud.com/developer/login. When you had obtained the meaningCloud API Key, you have to provide it to the plugin, using param apiKey. Example request: http://senpy.cluster.gsi.dit.upm.es/api/?algo=meaningCloud&language=en&apiKey=&input=I%20love%20Madrid.", - "author": "GSI UPM", - "version": "1.0", - "requirements": {}, - "maxPolarityValue": "1", - "minPolarityValue": "-1" - -} diff --git a/sentiment-vader/README.md b/sentiment-vader/README.md index c97f3b7..e47109a 100644 --- a/sentiment-vader/README.md +++ b/sentiment-vader/README.md @@ -1,10 +1,12 @@ # Sentimet-vader plugin -========= - Vader is a plugin developed at GSI UPM for sentiment analysis. +The response of this plugin uses [Marl ontology](https://www.gsi.dit.upm.es/ontologies/marl/) developed at GSI UPM for semantic web. + +## Acknowledgements + +This plugin uses the vaderSentiment module underneath, which is described in the paper: -For developing this plugin, it has been used the module vaderSentiment, which is described in the paper: VADER: A Parsimonious Rule-based Model for Sentiment Analysis of Social Media Text C.J. Hutto and Eric Gilbert Eighth International Conference on Weblogs and Social Media (ICWSM-14). Ann Arbor, MI, June 2014. @@ -15,16 +17,16 @@ For more information about the functionality, check the official repository https://github.com/cjhutto/vaderSentiment -The response of this plugin uses [Marl ontology](https://www.gsi.dit.upm.es/ontologies/marl/) developed at GSI UPM for semantic web. - ## Usage -Params accepted: +Parameters: + - Language: es (Spanish), en(English). - Input: Text to analyse. Example request: + ``` http://senpy.cluster.gsi.dit.upm.es/api/?algo=sentiment-vader&language=en&input=I%20love%20Madrid ``` diff --git a/sentiment-vader/README.txt b/sentiment-vader/README.txt deleted file mode 100644 index bf89160..0000000 --- a/sentiment-vader/README.txt +++ /dev/null @@ -1,16 +0,0 @@ -========== - -This README file describes the plugin vaderSentiment. - -For developing this plugin, it has been used the module vaderSentiment, which is described in the paper: - VADER: A Parsimonious Rule-based Model for Sentiment Analysis of Social Media Text - C.J. Hutto and Eric Gilbert - Eighth International Conference on Weblogs and Social Media (ICWSM-14). Ann Arbor, MI, June 2014. - -If you use this plugin in your research, please cite the above paper - -For more information about the functionality, check the official repository - -https://github.com/cjhutto/vaderSentiment - -======== \ No newline at end of file diff --git a/sentiment-vader/sentiment-vader.senpy b/sentiment-vader/sentiment-vader.senpy deleted file mode 100644 index be3ba15..0000000 --- a/sentiment-vader/sentiment-vader.senpy +++ /dev/null @@ -1,25 +0,0 @@ -{ - "name": "sentiment-vader", - "module": "sentiment-vader", - "description": "Sentiment classifier using vaderSentiment module. Params accepted: Language: {en, es}. The output uses Marl ontology developed at GSI UPM for semantic web.", - "author": "@icorcuera", - "version": "0.1", - "extra_params": { - "language": { - "@id": "lang_rand", - "aliases": ["language", "l"], - "required": false, - "options": ["es", "en", "auto"] - }, - - "aggregate": { - "aliases": ["aggregate","agg"], - "options": ["true", "false"], - "required": false, - "default": false - - } - - }, - "requirements": {} -} diff --git a/sentiment-vader/test.py b/sentiment-vader/test.py deleted file mode 100644 index b85c976..0000000 --- a/sentiment-vader/test.py +++ /dev/null @@ -1,44 +0,0 @@ -import os -import logging -logging.basicConfig() -try: - import unittest.mock as mock -except ImportError: - import mock -from senpy.extensions import Senpy -from flask import Flask -from flask.ext.testing import TestCase -import unittest - -class vaderTest(unittest.TestCase): - - def setUp(self): - self.app = Flask("test_plugin") - self.dir = os.path.join(os.path.dirname(__file__)) - self.senpy = Senpy(plugin_folder=self.dir, default_plugins=False) - self.senpy.init_app(self.app) - - def tearDown(self): - self.senpy.deactivate_plugin("vaderSentiment", sync=True) - - def test_analyse(self): - plugin = self.senpy.plugins["vaderSentiment"] - plugin.activate() - - texts = {'I am tired :(' : 'marl:Negative', - 'I love pizza' : 'marl:Positive', - 'I like going to the cinema :)' : 'marl:Positive', - 'This cake is disgusting' : 'marl:Negative'} - - for text in texts: - response = plugin.analyse(input=text) - expected = texts[text] - sentimentSet = response.entries[0].sentiments - - max_sentiment = max(sentimentSet, key=lambda x: x['marl:polarityValue']) - assert max_sentiment['marl:hasPolarity'] == expected - - plugin.deactivate() - -if __name__ == '__main__': - unittest.main() diff --git a/sentiment-vader/sentiment-vader.py b/sentiment-vader/vader_plugin.py similarity index 50% rename from sentiment-vader/sentiment-vader.py rename to sentiment-vader/vader_plugin.py index 5bec3e8..ec66017 100644 --- a/sentiment-vader/sentiment-vader.py +++ b/sentiment-vader/vader_plugin.py @@ -5,15 +5,37 @@ from senpy.plugins import SentimentPlugin, SenpyPlugin from senpy.models import Results, Sentiment, Entry import logging -logger = logging.getLogger(__name__) -class vaderSentimentPlugin(SentimentPlugin): +class VaderSentimentPlugin(SentimentPlugin): + ''' + Sentiment classifier using vaderSentiment module. Params accepted: Language: {en, es}. The output uses Marl ontology developed at GSI UPM for semantic web. + ''' + name = "sentiment-vader" + module = "sentiment-vader" + author = "@icorcuera" + version = "0.1.1" + extra_params = { + "language": { + "@id": "lang_rand", + "aliases": ["language", "l"], + "default": "auto", + "options": ["es", "en", "auto"] + }, - def analyse_entry(self,entry,params): + "aggregate": { + "aliases": ["aggregate","agg"], + "options": ["true", "false"], + "default": False + } - logger.debug("Analysing with params {}".format(params)) + } + requirements = {} - text_input = entry.get("text", None) + def analyse_entry(self, entry, params): + + self.log.debug("Analysing with params {}".format(params)) + + text_input = entry.text aggregate = params['aggregate'] score = sentiment(text_input) @@ -22,15 +44,18 @@ class vaderSentimentPlugin(SentimentPlugin): marl__hasPolarity= "marl:Positive", marl__algorithmConfidence= score['pos'] ) + opinion0.prov(self) opinion1 = Sentiment(id= "Opinion_negative", marl__hasPolarity= "marl:Negative", marl__algorithmConfidence= score['neg'] ) + opinion1.prov(self) opinion2 = Sentiment(id= "Opinion_neutral", marl__hasPolarity = "marl:Neutral", marl__algorithmConfidence = score['neu'] ) - + opinion2.prov(self) + if aggregate == 'true': res = None confident = max(score['neg'],score['neu'],score['pos']) @@ -47,3 +72,25 @@ class vaderSentimentPlugin(SentimentPlugin): entry.sentiments.append(opinion2) yield entry + + test_cases = [] + + test_cases = [ + { + 'input': 'I am tired :(', + 'polarity': 'marl:Negative' + }, + { + 'input': 'I love pizza :(', + 'polarity': 'marl:Positive' + }, + { + 'input': 'I enjoy going to the cinema :)', + 'polarity': 'marl:Negative' + }, + { + 'input': 'This cake is disgusting', + 'polarity': 'marl:Negative' + }, + + ]