Compare commits

...

164 Commits

Author SHA1 Message Date
J. Fernando Sánchez 297e9e8106 readthedocs: remove pdf output 7 months ago
J. Fernando Sánchez 1eb8e432af add readthedocs config file 7 months ago
J. Fernando Sánchez 8236569818 k8s: add latest-senpy.gsi.upm.es 7 months ago
J. Fernando Sánchez 98d368dd9a k8s: fix volume mount 7 months ago
J. Fernando Sánchez 9747140b54 explicit KUBECONFIG in kubectl version 7 months ago
J. Fernando Sánchez e915766449 ignore uninitialized plugin tests when strict=false 7 months ago
J. Fernando Sánchez b33a70620b use default strict for extension tests 7 months ago
J. Fernando Sánchez e324c730e2 use strict=false in blueprint tests 7 months ago
J. Fernando Sánchez a5c135faac add noop to test-requirements 7 months ago
J. Fernando Sánchez 894942b3ab move nltk data volume 7 months ago
J. Fernando Sánchez 9bb980f6b4 make noop plugin optional 7 months ago
J. Fernando Sánchez 66371c1cd8 add pandas for testing 7 months ago
J. Fernando Sánchez f3d4415ffb Modify dependencies to allow for 3.7 compatibility
Some dependencies are not available for python 3.7 anymore. Instead
of trying to support different versions of the libraries, we opt to
focus on the latest python version, and allow for CORE functionality
for earlier versions.
7 months ago
J. Fernando Sánchez 3f227986f3 relax pandas dependency 7 months ago
J. Fernando Sánchez f2f28644a1 remove duplicated panda requirement 7 months ago
J. Fernando Sánchez 82a456705c remove duplicated requirements 7 months ago
J. Fernando Sánchez 3c35b4ac91 add requirements for community plugins 7 months ago
J. Fernando Sánchez 268d2a4848 adapt deployment 7 months ago
J. Fernando Sánchez 5330ae93fc remove senticnet: API is down 7 months ago
J. Fernando Sánchez 4f95fbcbd1 update to pass tests with community plugins 7 months ago
J. Fernando Sánchez 5b28b6d1b4 merge community plugins 8 months ago
J. Fernando Sánchez e1d888ebd6 Add 'community-plugins/' from commit '4c73797246c6aff8d055abfef73d3f0d34b933a8'
git-subtree-dir: community-plugins
git-subtree-mainline: 7f712952be
git-subtree-split: 4c73797246
8 months ago
J. Fernando Sánchez 7f712952be version 1.0.6 2 years ago
J. Fernando Sánchez 07348de59a Add enable-cors to deployment 2 years ago
J. Fernando Sánchez 39123cea8a Fix typo latest docker image 2 years ago
J. Fernando Sánchez 8a6ef3852a Renamed senpy-deploymebt.yaml 2 years ago
J. Fernando Sánchez 99c8782a92 Add version to latest docker image 2 years ago
J. Fernando Sánchez efa93f5456 Fix docker latest image 2 years ago
J. Fernando Sánchez 55e5ce3a66 Fix docker build and k8s svc 2 years ago
J. Fernando Sánchez 92b654b36c Update k8s files 2 years ago
J. Fernando Sánchez 7febb4d673 Update k8s files 2 years ago
J. Fernando Sánchez 7ca5057705 k8s deploy from raw KUBECONFIG 2 years ago
J. Fernando Sánchez 8489457370 Refine k8s deploy 2 years ago
J. Fernando Sánchez e9266af924 Add k8s deployment 2 years ago
J. Fernando Sánchez a7849bb029 Fix bug docker build /cache 2 years ago
J. Fernando Sánchez bd083a0e55 Fix bug DOCKERHUB gitlab-ci 2 years ago
J. Fernando Sánchez a390f51097 Fix bug gitlab-ci kaniko auth 2 years ago
J. Fernando Sánchez adbcd7d196 Version 1.0.5 2 years ago
J. Fernando Sánchez 4b1eecd1c2 Version 1.0.4 2 years ago
J. Fernando Sánchez c1e4e092a7 Version 1.0.3
Fix fonts mixed-content
Fixed deprecation error collections.MutableMapping (python 3.10)
2 years ago
J. Fernando Sánchez a0abbede49 Version 1.0.2
Update RDFlib to 6.1.1 (removed rdflib-jsonld, as it is deprecated)
Bumped minimum python version: 3.7 (as a result of RDFLIB 6)
Added ProxyFix to run behind nginx (Added --no-proxy to run without the fix)
Replaced http media links to protocol-agnostic links in playground
Enable CORS (via --enable-cors)
Update old urls (replaced *.cluster.gsi.dit.upm.es with *.gsi.upm.es)
2 years ago
J. Fernando Sánchez 4c73797246 update emotion-anew description 5 years ago
J. Fernando Sánchez 228eb6321b update emotion-anew description 5 years ago
J. Fernando Sánchez d575220712 update senpy version 5 years ago
J. Fernando Sánchez bf2feb9839 Add senticnet plugin 5 years ago
J. Fernando Sánchez 5c98326acf Clean up emotion-anew 5 years ago
J. Fernando Sánchez d961d8ac5b Fix URI emotion-anew 5 years ago
J. Fernando Sánchez 96ec10d791 Fix pipeline 5 years ago
J. Fernando Sánchez 6858a139ed Move Taiger to a separate repository 5 years ago
J. Fernando Sánchez 4f286057c9 Update to senpy 0.20 5 years ago
J. Fernando Sánchez fa993c6e2a Add default plugins 5 years ago
J. Fernando Sánchez 238f76442c Add senpy.gsi.upm.es 5 years ago
J. Fernando Sánchez a015ee81f7 Revert to not adding data folder to image 5 years ago
J. Fernando Sánchez d665017154 Compose for taiger plugin 5 years ago
J. Fernando Sánchez 00832e2e1c Add data in image 5 years ago
J. Fernando Sánchez 4ecabadae9 remove unnecessary import 5 years ago
J. Fernando Sánchez bb6f9ee367 tweaks for py2/py3 compatibility 5 years ago
Oscar Araque 80acb9307c Merge branch 'master' of ssh://lab.gsi.upm.es:2200/senpy/senpy-plugins-community 5 years ago
Oscar Araque 94394af20b depechemood updated 5 years ago
J. Fernando Sánchez d5f9ef88b2 Add new taiger plugin 5 years ago
J. Fernando Sánchez 675a905ab4 Add depeche mood 5 years ago
J. Fernando Sánchez 4507449266 Bump senpy version to 0.11.4 6 years ago
J. Fernando Sánchez 2e91a83eb6 Bump senpy version to 0.11.3 6 years ago
J. Fernando Sánchez d8c47220b1 Modify default TAIGER endpoint 6 years ago
J. Fernando Sánchez 0e4146ed8d Merge branch 'taiger' into 'master'
Taiger

Closes #12

See merge request senpy/senpy-plugins-community!1
6 years ago
J. Fernando Sánchez 6d3fc6f861 Taiger 6 years ago
J. Fernando Sánchez 666632a032 Update Makefile to avoid CI build errors 6 years ago
J. Fernando Sánchez 9dbe22b81f Adapt to new mocking of requests 6 years ago
J. Fernando Sánchez 9355d27e71 Bump senpy version to 0.10.9 6 years ago
J. Fernando Sánchez 0ed434ef0c Do not bind port in docker 6 years ago
J. Fernando Sánchez dbc238989b Fix resources sentiment-basic 6 years ago
J. Fernando Sánchez 48ba936a7b Improved docs, docker-compose and dockerfile 6 years ago
J. Fernando Sánchez bbe91e1924 Upgrade to senpy 0.10.7 6 years ago
J. Fernando Sánchez c7091e6323 Force building before pushing 6 years ago
J. Fernando Sánchez f11439d944 Unify data folders 6 years ago
J. Fernando Sánchez b15a0d7dbe Fix problems with echo and newlines
printf is more portable
6 years ago
J. Fernando Sánchez f92617d147 Change submodules to relative URIs 6 years ago
J. Fernando Sánchez 2a773d45aa Fix image name in tests 6 years ago
J. Fernando Sánchez e4e1a74971 Build before testing! 6 years ago
J. Fernando Sánchez 1659285f0b Remove TTY from docker test 6 years ago
J. Fernando Sánchez 57016e1380 Add clean stage 6 years ago
J. Fernando Sánchez 54da48b548 Add CI/CD and k8s 6 years ago
J. Fernando Sánchez 62142482dc Updated makefiles from senpy-plugins-community 6 years ago
J. Fernando Sánchez 982baa04cf Add '.makefiles/' from commit 'a75ba6994d93ca027b6f3ba0b08b75dd60d3aa78'
git-subtree-dir: .makefiles
git-subtree-mainline: c52a894017
git-subtree-split: a75ba6994d
6 years ago
J. Fernando Sánchez c52a894017 Merged into monorepo 6 years ago
J. Fernando Sánchez e51b659030 Merge commit '7c959aace896e9d318497a417e0eec8f78b62314' as 'sentiment-basic' 6 years ago
J. Fernando Sánchez 2a4cc96905 Removed sentiment-basic submodule 6 years ago
J. Fernando Sánchez 7c959aace8 Squashed 'sentiment-basic/' content from commit beb8e31
git-subtree-dir: sentiment-basic
git-subtree-split: beb8e311619059a0c660411edef1cf95b3826c0a
6 years ago
J. Fernando Sánchez 15ac26428a Merge commit '98ec4817cff3abd06f961fbbdb5c860aeb887bca' as 'emotion-anew' 6 years ago
J. Fernando Sánchez 402b49f43f Removed emotion-anew submodule 6 years ago
J. Fernando Sánchez 98ec4817cf Squashed 'emotion-anew/' content from commit e8a3c83
git-subtree-dir: emotion-anew
git-subtree-split: e8a3c837e3543a5f5f19086e1fcaa34b22be639e
6 years ago
J. Fernando Sánchez 08c1b4ce79 Merge commit '23c6cdd58dd3071fe5f707d904afacde6bd1a870' as 'emotion-wnaffect' 6 years ago
J. Fernando Sánchez 50a0599597 Removed emotion-wnaffect submodule 6 years ago
J. Fernando Sánchez 23c6cdd58d Squashed 'emotion-wnaffect/' content from commit 74c40d7
git-subtree-dir: emotion-wnaffect
git-subtree-split: 74c40d7e97d54d3c3e30739a85cf9322c92d5a87
6 years ago
J. Fernando Sánchez 7825802341 Merge commit '4a0b6c1bf4ec7213ad2b5538eb737a27dc28faa8' as 'sentiment-vader' 6 years ago
J. Fernando Sánchez 4a0b6c1bf4 Squashed 'sentiment-vader/' content from commit ddb7432
git-subtree-dir: sentiment-vader
git-subtree-split: ddb7432d260fd2d8fca719f1b3ee46117019f475
6 years ago
J. Fernando Sánchez cd73cd3fc6 Removed sentiment-vader submodule 6 years ago
J. Fernando Sánchez 704aba2ff0 Merge commit '1eec6ecbad039b946c0d7b690335f2bb4ea8f320' as 'sentiment-meaningCloud' 6 years ago
J. Fernando Sánchez bf67422f2f Removed sentiment-meaningCloud submodule 6 years ago
J. Fernando Sánchez 1eec6ecbad Squashed 'sentiment-meaningCloud/' content from commit 2a5d212
git-subtree-dir: sentiment-meaningCloud
git-subtree-split: 2a5d212833fac38efe69b9d90588c1f0a27ff390
6 years ago
J. Fernando Sánchez bec22e44a0 Removed enterprise/unnecessary modules 6 years ago
Manuel Garcia Amado f3961378e0 Add submodule in README 6 years ago
Manuel Garcia Amado fbde8a9462 Add plugins as submodules 6 years ago
Manuel Garcia Amado 582ae8a340 Adding tutorial to submodules 6 years ago
J. Fernando Sánchez a75ba6994d Merge branch 'meaningcloud' into 'master'
Meaningcloud

See merge request docs/templates/makefiles!8
7 years ago
J. Fernando Sánchez 919c4a07a2 Update base.mk 7 years ago
J. Fernando Sánchez 42224e343c Updated makefiles from meaningcloud
Version was "unknown" due to a bug
7 years ago
militarpancho f0c211c00a PYVERSION changed 7 years ago
J. Fernando Sánchez 24d85b18bb Merge branch 'meaningcloud' into 'master'
Updated makefiles from meaningcloud

See merge request docs/templates/makefiles!7
7 years ago
J. Fernando Sánchez d150321741 Updated makefiles from meaningcloud
* Fixed some python+docker variables
* Improved defaults for docker image names
7 years ago
J. Fernando Sánchez 4f88009bd7 Merge branch 'senpy' into 'master'
Senpy

See merge request docs/templates/makefiles!6
7 years ago
J. Fernando Sánchez 1f0703d535 Fixed typo in .gitlab-ci 7 years ago
J. Fernando Sánchez b20982cae1 Merge branch 'senpy' into 'master'
Senpy

See merge request docs/templates/makefiles!5
7 years ago
J. Fernando Sánchez c23f7986b4 Trying to fix push to github 7 years ago
J. Fernando Sánchez 8fe7616bae Updated makefiles from senpy 7 years ago
J. Fernando Sánchez 1543f5550e Updated makefiles from senpy 7 years ago
J. Fernando Sánchez f04cbeeddb Testing new k8s mk 7 years ago
militarpancho b671ff51f9 Add support for py3 in emotion-wnaffect
Normalize polarity values in sentiment-basic and sentiment-140
7 years ago
militarpancho dee007eacf Fixed bug in meaningCloud plugin. Now retrieves Neutral sentiment 7 years ago
J. Fernando Sánchez 1ef9dac86a Made ANEW paths absolute 7 years ago
militarpancho 18486aa3e0 Fixed vader tab error 7 years ago
militarpancho 86fdd8678a Add aggregate sentiment. This closes senpy/senpy-plugins-community#10 7 years ago
militarpancho 1b8a24c530 Fixed ANEW Readme 7 years ago
militarpancho 7f765d004f updated links in readme for obtain resources 7 years ago
militarpancho b22ac843b6 Updated anew and wnaffect README explaining how to obtain resources 7 years ago
militarpancho e88ca98438 Readme updated 7 years ago
militarpancho 65bb517fd2 Readme updated 7 years ago
militarpancho 23d15e9274 Added emotion-anew and sentiment-vader 7 years ago
militarpancho 23a5595d18 Added language information in emotion wn-affect 7 years ago
militarpancho e341cc82fa Fixed sentiment-basic plugin that only retrieved Neutral sentiment. This closes senpy/senpy-plugins-community#6. Also added nltk download for some plugins. 7 years ago
militarpancho 85db4db01d added timeout message to finally fix #5 7 years ago
militarpancho 9f7a0e6907 Added timeout meaningCloud. This should fix #5 7 years ago
militarpancho 241f478a68 Added sufixes dictionary for wordnet lemmatizer. This close #4 7 years ago
militarpancho 5427b02a1a Regarding to #4. English bug with sentiment basic. 7 years ago
militarpancho df2dc17ac0 This should fix #3 7 years ago
militarpancho cc112c5ac5 Addapted plugins to senpy 0.8.2 7 years ago
militarpancho 65b8873092 cambios 7 years ago
militarpancho 9ea177a780 Added shelfmixin to emotion-wnaffect. This closes #2 7 years ago
militarpancho bb0b0fadc2 Added affect description 7 years ago
militarpancho 076813cb1a Added sentiment-140 7 years ago
militarpancho 51b4737c43 Change paths to wordnet files. Deleted loading from local path to load dictionarios from absolute paths 7 years ago
militarpancho 140ea9c159 Added some documentation and changes to .senpy files. 7 years ago
militarpancho 7250f56f17 Plugins names updated 7 years ago
militarpancho b40ac19130 Documentation more improved meaningCloud 7 years ago
militarpancho 37b130abaa Documentation improved meaningCloud 7 years ago
militarpancho 99e13f32e5 meaningCloud documentation improved 7 years ago
militarpancho 71d976acb2 Added documentation to meaningCloud plugin 7 years ago
militarpancho c57cfff0cc Added polarityValue to meaningCloud plugin 7 years ago
militarpancho f9c4e4bd59 Fixed some bugs 7 years ago
militarpancho 7b583f504c Added apikey aliase to Affect plugin 7 years ago
militarpancho 6f3c08f8aa Fixed syntax error 7 years ago
militarpancho 67ff20440a Change module name 7 years ago
militarpancho 82e3062a6b Added meaningCloud to affect 7 years ago
militarpancho 864ca75b8f affect plugin added 7 years ago
militarpancho ac5ac2d06b Readme fixed 7 years ago
militarpancho 1f5188c251 Readme to use plugin 7 years ago
militarpancho 90b55a4b27 Added meaningCloud plugin 7 years ago
J. Fernando Sánchez aa628518ec Added Travis CI 8 years ago
J. Fernando Sánchez 5e8bc717a8 Added WordNet-Affect plugin and Makefile 8 years ago
NachoCP 0e9db7081c Compatibility with senpy 0.5 8 years ago
Oscar Araque 17976d85b1 Added SentiText plugin (for Spanish) 9 years ago
J. Fernando Sánchez 94d82238b8 Added entry to example plugin 9 years ago
J. Fernando Sánchez ed22679e7c Example plugin and README file 9 years ago
J. Fernando Sánchez 6561201cc2 Initial commit 9 years ago

3
.gitignore vendored

@ -7,4 +7,5 @@ README.html
__pycache__
VERSION
Dockerfile-*
Dockerfile
Dockerfile
senpy_data

@ -4,100 +4,130 @@
# - docker:dind
# When using dind, it's wise to use the overlayfs driver for
# improved performance.
stages:
- test
- push
- publish
- test_image
- deploy
- clean
before_script:
- make -e login
.test: &test_definition
stage: test
script:
- make -e test-$PYTHON_VERSION
except:
- tags # Avoid unnecessary double testing
test-3.6:
<<: *test_definition
variables:
PYTHON_VERSION: "3.6"
variables:
KUBENS: senpy
LATEST_IMAGE: "${HUB_REPO}:${CI_COMMIT_SHORT_SHA}"
SENPY_DATA: "/senpy-data/" # This is configured in the CI job
NLTK_DATA: "/senpy-data/nltk_data" # Store NLTK downloaded data
test-3.7:
<<: *test_definition
allow_failure: true
docker:
stage: publish
image:
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
variables:
PYTHON_VERSION: "3.7"
push:
stage: push
PYTHON_VERSION: "3.10"
tags:
- docker
script:
- make -e push
- echo $CI_COMMIT_TAG > senpy/VERSION
- sed "s/{{PYVERSION}}/$PYTHON_VERSION/" Dockerfile.template > Dockerfile
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"},\"https://index.docker.io/v1/\":{\"auth\":\"$HUB_AUTH\"}}}" > /kaniko/.docker/config.json
# The skip-tls-verify flag is there because our registry certificate is self signed
- /kaniko/executor --context $CI_PROJECT_DIR --skip-tls-verify --dockerfile $CI_PROJECT_DIR/Dockerfile --destination $CI_REGISTRY_IMAGE:$CI_COMMIT_TAG --destination $HUB_REPO:$CI_COMMIT_TAG
only:
- tags
- triggers
- fix-makefiles
push-latest:
stage: push
docker-latest:
stage: publish
image:
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
variables:
PYTHON_VERSION: "3.10"
tags:
- docker
script:
- make -e push-latest
- echo git.${CI_COMMIT_SHORT_SHA} > senpy/VERSION
- sed "s/{{PYVERSION}}/$PYTHON_VERSION/" Dockerfile.template > Dockerfile
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"},\"https://index.docker.io/v1/\":{\"auth\":\"$HUB_AUTH\"}}}" > /kaniko/.docker/config.json
# The skip-tls-verify flag is there because our registry certificate is self signed
- /kaniko/executor --context $CI_PROJECT_DIR --skip-tls-verify --dockerfile $CI_PROJECT_DIR/Dockerfile --destination $LATEST_IMAGE --destination "${HUB_REPO}:latest"
only:
- master
- triggers
- fix-makefiles
refs:
- master
push-github:
stage: deploy
script:
- make -e push-github
testimage:
only:
- master
- triggers
- fix-makefiles
- tags
tags:
- docker
stage: test_image
image: "$CI_REGISTRY_IMAGE:$CI_COMMIT_TAG"
script:
- python -m senpy --no-run --test
deploy_pypi:
stage: deploy
script: # Configure the PyPI credentials, then push the package, and cleanup the creds.
- echo "[server-login]" >> ~/.pypirc
- echo "repository=https://upload.pypi.org/legacy/" >> ~/.pypirc
- echo "username=" ${PYPI_USER} >> ~/.pypirc
- echo "password=" ${PYPI_PASSWORD} >> ~/.pypirc
- make pip_upload
- echo "" > ~/.pypirc && rm ~/.pypirc # If the above fails, this won't run.
only:
- /^v?\d+\.\d+\.\d+([abc]\d*)?$/ # PEP-440 compliant version (tags)
except:
- branches
testpy37:
tags:
- docker
variables:
SENPY_STRICT: "false"
image: python:3.7
stage: test
script:
- pip install -r requirements.txt -r test-requirements.txt
- python setup.py test
deploy:
stage: deploy
environment: test
testpy310:
tags:
- docker
variables:
SENPY_STRICT: "true"
image: python:3.10
stage: test
script:
- make -e deploy
- pip install -r requirements.txt -r test-requirements.txt -r extra-requirements.txt
- python setup.py test
push_pypi:
only:
- master
- fix-makefiles
- tags
tags:
- docker
image: python:3.10
stage: publish
script:
- echo $CI_COMMIT_TAG > senpy/VERSION
- pip install twine
- python setup.py sdist bdist_wheel
- TWINE_PASSWORD=$PYPI_PASSWORD TWINE_USERNAME=$PYPI_USERNAME python -m twine upload dist/*
push-github:
check_pypi:
only:
- tags
tags:
- docker
image: python:3.10
stage: deploy
script:
- make -e push-github
only:
- master
- triggers
- pip install senpy==$CI_COMMIT_TAG
# Allow PYPI to update its index before we try to install
when: delayed
start_in: 10 minutes
clean :
stage: clean
latest-demo:
only:
refs:
- master
tags:
- docker
image: alpine/k8s:1.22.6
stage: deploy
environment: production
variables:
KUBECONFIG: "/kubeconfig"
# Same image as docker-latest
IMAGEWTAG: "${LATEST_IMAGE}"
KUBEAPP: "senpy"
script:
- make -e clean
when: manual
cleanup_py:
stage: clean
when: always # this is important; run even if preceding stages failed.
script:
- rm -vf ~/.pypirc # we don't want to leave these around, but GitLab may clean up anyway.
- echo "${KUBECONFIG_RAW}" > $KUBECONFIG
- kubectl --kubeconfig $KUBECONFIG version
- cd k8s/
- cat *.yaml *.tmpl 2>/dev/null | envsubst | kubectl --kubeconfig $KUBECONFIG apply --namespace ${KUBENS:-default} -f -
- kubectl --kubeconfig $KUBECONFIG get all,ing -l app=${KUBEAPP} --namespace=${KUBENS:-default}

@ -2,7 +2,7 @@ These makefiles are recipes for several common tasks in different types of proje
To add them to your project, simply do:
```
git remote add makefiles ssh://git@lab.cluster.gsi.dit.upm.es:2200/docs/templates/makefiles.git
git remote add makefiles ssh://git@lab.gsi.upm.es:2200/docs/templates/makefiles.git
git subtree add --prefix=.makefiles/ makefiles master
touch Makefile
echo "include .makefiles/base.mk" >> Makefile
@ -16,7 +16,7 @@ include .makefiles/python.mk
```
You may need to set special variables like the name of your project or the python versions you're targetting.
Take a look at each specific `.mk` file for more information, and the `Makefile` in the [senpy](https://lab.cluster.gsi.dit.upm.es/senpy/senpy) project for a real use case.
Take a look at each specific `.mk` file for more information, and the `Makefile` in the [senpy](https://lab.gsi.upm.es/senpy/senpy) project for a real use case.
If you update the makefiles from your repository, make sure to push the changes for review in upstream (this repository):

@ -1,5 +1,5 @@
makefiles-remote:
git ls-remote --exit-code makefiles 2> /dev/null || git remote add makefiles ssh://git@lab.cluster.gsi.dit.upm.es:2200/docs/templates/makefiles.git
git ls-remote --exit-code makefiles 2> /dev/null || git remote add makefiles ssh://git@lab.gsi.upm.es:2200/docs/templates/makefiles.git
makefiles-commit: makefiles-remote
git add -f .makefiles

@ -0,0 +1,22 @@
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
version: 2
# Set the OS, Python version and other tools you might need
build:
os: ubuntu-22.04
tools:
python: "3.10"
# Build documentation in the "docs/" directory with Sphinx
sphinx:
configuration: docs/conf.py
# formats:
# - pdf
# - epub
python:
install:
- requirements: docs/requirements.txt

@ -4,6 +4,20 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
### Added
* The code of many senpy community plugins have been included by default. However, additional files (e.g., licensed data) and/or installing additional dependencies may be necessary for some plugins. Read each plugin's documentation for more information.
* `--strict` flag, to fail and not start when a
* `optional` attribute in plugins. Optional plugins may fail to load or activate but the server will be started regardless, unless running in strict mode
* Option in shelf plugins to ignore pickling errors
### Removed
* `--only-install`, `--only-test` and `--only-list` flags were removed in favor of `--no-run` + `--install`/`--test`/`--dependencies`
### Changed
* data directory selection logic is slightly modified, and will choose one of the following (in this order): `data_folder` (argument), `$SENPY_DATA` or `$CWD`
## [1.0.6]
### Fixed
* Plugins now get activated for testing
## [1.0.1]
### Added
* License headers

@ -6,21 +6,20 @@ RUN apt-get update && apt-get install -y \
libblas-dev liblapack-dev liblapacke-dev gfortran \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /cache/ /senpy-plugins /data/
RUN mkdir -p /cache/ /senpy-plugins /data/
VOLUME /data/
ENV PIP_CACHE_DIR=/cache/ SENPY_DATA=/data
ONBUILD COPY . /senpy-plugins/
ONBUILD RUN python -m senpy --only-install -f /senpy-plugins
ONBUILD WORKDIR /senpy-plugins/
WORKDIR /usr/src/app
COPY test-requirements.txt requirements.txt extra-requirements.txt /usr/src/app/
RUN pip install --no-cache-dir -r test-requirements.txt -r requirements.txt -r extra-requirements.txt
COPY . /usr/src/app/
RUN pip install --no-cache-dir --no-index --no-deps --editable .
ONBUILD COPY . /senpy-plugins/
ONBUILD RUN python -m senpy -i --no-run -f /senpy-plugins
ONBUILD WORKDIR /senpy-plugins/
ENTRYPOINT ["python", "-m", "senpy", "-f", "/senpy-plugins/", "--host", "0.0.0.0"]

@ -5,7 +5,7 @@ IMAGENAME=gsiupm/senpy
# The first version is the main one (used for quick builds)
# See .makefiles/python.mk for more info
PYVERSIONS=3.6 3.7
PYVERSIONS ?= 3.10 3.7
DEVPORT=5000

@ -13,7 +13,7 @@
Senpy lets you create sentiment analysis web services easily, fast and using a well known API.
As a bonus, Senpy services use semantic vocabularies (e.g. `NIF <http://persistence.uni-leipzig.org/nlp2rdf/>`_, `Marl <http://www.gsi.dit.upm.es/ontologies/marl>`_, `Onyx <http://www.gsi.dit.upm.es/ontologies/onyx>`_) and formats (turtle, JSON-LD, xml-rdf).
As a bonus, Senpy services use semantic vocabularies (e.g. `NIF <http://persistence.uni-leipzig.org/nlp2rdf/>`_, `Marl <http://www.gsi.upm.es/ontologies/marl>`_, `Onyx <http://www.gsi.upm.es/ontologies/onyx>`_) and formats (turtle, JSON-LD, xml-rdf).
Have you ever wanted to turn your sentiment analysis algorithms into a service?
With Senpy, now you can.
@ -58,7 +58,7 @@ Its code is pure Python, and the only limitations are imposed by its dependencie
Currently, the CI/CD pipeline tests the code on:
* GNU/Linux with Python versions 3.4, 3.5, 3.6 and 3.7
* GNU/Linux with Python versions 3.7+ (3.10+ recommended for all plugins)
* MacOS and homebrew's python3
* Windows 10 and chocolatey's python3

@ -1275,7 +1275,7 @@
" <span class=\"nd\">&quot;@id&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;endpoint:plugins/sentiment-basic_0.1.1&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nd\">&quot;@type&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;SentimentPlugin&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;author&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;github.com/nachtkatze&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;description&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;\\nSentiment classifier using rule-based classification for Spanish. Based on english to spanish translation and SentiWordNet sentiment knowledge. This is a demo plugin that uses only some features from the TASS 2015 classifier. To use the entirely functional classifier you can use the service in: http://senpy.cluster.gsi.dit.upm.es.\\n&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;description&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;\\nSentiment classifier using rule-based classification for Spanish. Based on english to spanish translation and SentiWordNet sentiment knowledge. This is a demo plugin that uses only some features from the TASS 2015 classifier. To use the entirely functional classifier you can use the service in: http://senpy.gsi.upm.es.\\n&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;extra_params&quot;</span><span class=\"p\">:</span> <span class=\"p\">{</span>\n",
" <span class=\"nt\">&quot;language&quot;</span><span class=\"p\">:</span> <span class=\"p\">{</span>\n",
" <span class=\"nt\">&quot;aliases&quot;</span><span class=\"p\">:</span> <span class=\"p\">[</span>\n",
@ -1303,7 +1303,7 @@
" <span class=\"nd\">&quot;@id&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;endpoint:plugins/sentiment-meaningcloud_1.1&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nd\">&quot;@type&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;SentimentPlugin&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;author&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;GSI UPM&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;description&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;\\nSentiment analysis with meaningCloud service.\\nTo use this plugin, you need to obtain an API key from meaningCloud signing up here:\\nhttps://www.meaningcloud.com/developer/login\\n\\nWhen you had obtained the meaningCloud API Key, you have to provide it to the plugin, using param apiKey.\\nExample request:\\n\\nhttp://senpy.cluster.gsi.dit.upm.es/api/?algo=meaningCloud&amp;language=en&amp;apiKey=YOUR_API_KEY&amp;input=I%20love%20Madrid.\\n&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;description&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;\\nSentiment analysis with meaningCloud service.\\nTo use this plugin, you need to obtain an API key from meaningCloud signing up here:\\nhttps://www.meaningcloud.com/developer/login\\n\\nWhen you had obtained the meaningCloud API Key, you have to provide it to the plugin, using param apiKey.\\nExample request:\\n\\nhttp://senpy.gsi.upm.es/api/?algo=meaningCloud&amp;language=en&amp;apiKey=YOUR_API_KEY&amp;input=I%20love%20Madrid.\\n&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;extra_params&quot;</span><span class=\"p\">:</span> <span class=\"p\">{</span>\n",
" <span class=\"nt\">&quot;apikey&quot;</span><span class=\"p\">:</span> <span class=\"p\">{</span>\n",
" <span class=\"nt\">&quot;aliases&quot;</span><span class=\"p\">:</span> <span class=\"p\">[</span>\n",
@ -1536,7 +1536,7 @@
" \\PY{n+nd}{\\PYZdq{}@id\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}endpoint:plugins/sentiment\\PYZhy{}basic\\PYZus{}0.1.1\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nd}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}SentimentPlugin\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}author\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}github.com/nachtkatze\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}description\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}\\PYZbs{}nSentiment classifier using rule\\PYZhy{}based classification for Spanish. Based on english to spanish translation and SentiWordNet sentiment knowledge. This is a demo plugin that uses only some features from the TASS 2015 classifier. To use the entirely functional classifier you can use the service in: http://senpy.cluster.gsi.dit.upm.es.\\PYZbs{}n\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}description\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}\\PYZbs{}nSentiment classifier using rule\\PYZhy{}based classification for Spanish. Based on english to spanish translation and SentiWordNet sentiment knowledge. This is a demo plugin that uses only some features from the TASS 2015 classifier. To use the entirely functional classifier you can use the service in: http://senpy.gsi.upm.es.\\PYZbs{}n\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}extra\\PYZus{}params\\PYZdq{}}\\PY{p}{:} \\PY{p}{\\PYZob{}}\n",
" \\PY{n+nt}{\\PYZdq{}language\\PYZdq{}}\\PY{p}{:} \\PY{p}{\\PYZob{}}\n",
" \\PY{n+nt}{\\PYZdq{}aliases\\PYZdq{}}\\PY{p}{:} \\PY{p}{[}\n",
@ -1564,7 +1564,7 @@
" \\PY{n+nd}{\\PYZdq{}@id\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}endpoint:plugins/sentiment\\PYZhy{}meaningcloud\\PYZus{}1.1\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nd}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}SentimentPlugin\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}author\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}GSI UPM\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}description\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}\\PYZbs{}nSentiment analysis with meaningCloud service.\\PYZbs{}nTo use this plugin, you need to obtain an API key from meaningCloud signing up here:\\PYZbs{}nhttps://www.meaningcloud.com/developer/login\\PYZbs{}n\\PYZbs{}nWhen you had obtained the meaningCloud API Key, you have to provide it to the plugin, using param apiKey.\\PYZbs{}nExample request:\\PYZbs{}n\\PYZbs{}nhttp://senpy.cluster.gsi.dit.upm.es/api/?algo=meaningCloud\\PYZam{}language=en\\PYZam{}apiKey=YOUR\\PYZus{}API\\PYZus{}KEY\\PYZam{}input=I\\PYZpc{}20love\\PYZpc{}20Madrid.\\PYZbs{}n\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}description\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}\\PYZbs{}nSentiment analysis with meaningCloud service.\\PYZbs{}nTo use this plugin, you need to obtain an API key from meaningCloud signing up here:\\PYZbs{}nhttps://www.meaningcloud.com/developer/login\\PYZbs{}n\\PYZbs{}nWhen you had obtained the meaningCloud API Key, you have to provide it to the plugin, using param apiKey.\\PYZbs{}nExample request:\\PYZbs{}n\\PYZbs{}nhttp://senpy.gsi.upm.es/api/?algo=meaningCloud\\PYZam{}language=en\\PYZam{}apiKey=YOUR\\PYZus{}API\\PYZus{}KEY\\PYZam{}input=I\\PYZpc{}20love\\PYZpc{}20Madrid.\\PYZbs{}n\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}extra\\PYZus{}params\\PYZdq{}}\\PY{p}{:} \\PY{p}{\\PYZob{}}\n",
" \\PY{n+nt}{\\PYZdq{}apikey\\PYZdq{}}\\PY{p}{:} \\PY{p}{\\PYZob{}}\n",
" \\PY{n+nt}{\\PYZdq{}aliases\\PYZdq{}}\\PY{p}{:} \\PY{p}{[}\n",
@ -1796,7 +1796,7 @@
" \"@id\": \"endpoint:plugins/sentiment-basic_0.1.1\",\n",
" \"@type\": \"SentimentPlugin\",\n",
" \"author\": \"github.com/nachtkatze\",\n",
" \"description\": \"\\nSentiment classifier using rule-based classification for Spanish. Based on english to spanish translation and SentiWordNet sentiment knowledge. This is a demo plugin that uses only some features from the TASS 2015 classifier. To use the entirely functional classifier you can use the service in: http://senpy.cluster.gsi.dit.upm.es.\\n\",\n",
" \"description\": \"\\nSentiment classifier using rule-based classification for Spanish. Based on english to spanish translation and SentiWordNet sentiment knowledge. This is a demo plugin that uses only some features from the TASS 2015 classifier. To use the entirely functional classifier you can use the service in: http://senpy.gsi.upm.es.\\n\",\n",
" \"extra_params\": {\n",
" \"language\": {\n",
" \"aliases\": [\n",
@ -1824,7 +1824,7 @@
" \"@id\": \"endpoint:plugins/sentiment-meaningcloud_1.1\",\n",
" \"@type\": \"SentimentPlugin\",\n",
" \"author\": \"GSI UPM\",\n",
" \"description\": \"\\nSentiment analysis with meaningCloud service.\\nTo use this plugin, you need to obtain an API key from meaningCloud signing up here:\\nhttps://www.meaningcloud.com/developer/login\\n\\nWhen you had obtained the meaningCloud API Key, you have to provide it to the plugin, using param apiKey.\\nExample request:\\n\\nhttp://senpy.cluster.gsi.dit.upm.es/api/?algo=meaningCloud&language=en&apiKey=YOUR_API_KEY&input=I%20love%20Madrid.\\n\",\n",
" \"description\": \"\\nSentiment analysis with meaningCloud service.\\nTo use this plugin, you need to obtain an API key from meaningCloud signing up here:\\nhttps://www.meaningcloud.com/developer/login\\n\\nWhen you had obtained the meaningCloud API Key, you have to provide it to the plugin, using param apiKey.\\nExample request:\\n\\nhttp://senpy.gsi.upm.es/api/?algo=meaningCloud&language=en&apiKey=YOUR_API_KEY&input=I%20love%20Madrid.\\n\",\n",
" \"extra_params\": {\n",
" \"apikey\": {\n",
" \"aliases\": [\n",
@ -2061,7 +2061,7 @@
" <span class=\"nd\">&quot;@id&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;endpoint:plugins/sentiment-basic_0.1.1&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nd\">&quot;@type&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;SentimentPlugin&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;author&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;github.com/nachtkatze&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;description&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;\\nSentiment classifier using rule-based classification for Spanish. Based on english to spanish translation and SentiWordNet sentiment knowledge. This is a demo plugin that uses only some features from the TASS 2015 classifier. To use the entirely functional classifier you can use the service in: http://senpy.cluster.gsi.dit.upm.es.\\n&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;description&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;\\nSentiment classifier using rule-based classification for Spanish. Based on english to spanish translation and SentiWordNet sentiment knowledge. This is a demo plugin that uses only some features from the TASS 2015 classifier. To use the entirely functional classifier you can use the service in: http://senpy.gsi.upm.es.\\n&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;extra_params&quot;</span><span class=\"p\">:</span> <span class=\"p\">{</span>\n",
" <span class=\"nt\">&quot;language&quot;</span><span class=\"p\">:</span> <span class=\"p\">{</span>\n",
" <span class=\"nt\">&quot;aliases&quot;</span><span class=\"p\">:</span> <span class=\"p\">[</span>\n",
@ -2089,7 +2089,7 @@
" <span class=\"nd\">&quot;@id&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;endpoint:plugins/sentiment-meaningcloud_1.1&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nd\">&quot;@type&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;SentimentPlugin&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;author&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;GSI UPM&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;description&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;\\nSentiment analysis with meaningCloud service.\\nTo use this plugin, you need to obtain an API key from meaningCloud signing up here:\\nhttps://www.meaningcloud.com/developer/login\\n\\nWhen you had obtained the meaningCloud API Key, you have to provide it to the plugin, using param apiKey.\\nExample request:\\n\\nhttp://senpy.cluster.gsi.dit.upm.es/api/?algo=meaningCloud&amp;language=en&amp;apiKey=YOUR_API_KEY&amp;input=I%20love%20Madrid.\\n&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;description&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;\\nSentiment analysis with meaningCloud service.\\nTo use this plugin, you need to obtain an API key from meaningCloud signing up here:\\nhttps://www.meaningcloud.com/developer/login\\n\\nWhen you had obtained the meaningCloud API Key, you have to provide it to the plugin, using param apiKey.\\nExample request:\\n\\nhttp://senpy.gsi.upm.es/api/?algo=meaningCloud&amp;language=en&amp;apiKey=YOUR_API_KEY&amp;input=I%20love%20Madrid.\\n&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;extra_params&quot;</span><span class=\"p\">:</span> <span class=\"p\">{</span>\n",
" <span class=\"nt\">&quot;apikey&quot;</span><span class=\"p\">:</span> <span class=\"p\">{</span>\n",
" <span class=\"nt\">&quot;aliases&quot;</span><span class=\"p\">:</span> <span class=\"p\">[</span>\n",
@ -2207,7 +2207,7 @@
" \\PY{n+nd}{\\PYZdq{}@id\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}endpoint:plugins/sentiment\\PYZhy{}basic\\PYZus{}0.1.1\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nd}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}SentimentPlugin\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}author\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}github.com/nachtkatze\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}description\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}\\PYZbs{}nSentiment classifier using rule\\PYZhy{}based classification for Spanish. Based on english to spanish translation and SentiWordNet sentiment knowledge. This is a demo plugin that uses only some features from the TASS 2015 classifier. To use the entirely functional classifier you can use the service in: http://senpy.cluster.gsi.dit.upm.es.\\PYZbs{}n\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}description\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}\\PYZbs{}nSentiment classifier using rule\\PYZhy{}based classification for Spanish. Based on english to spanish translation and SentiWordNet sentiment knowledge. This is a demo plugin that uses only some features from the TASS 2015 classifier. To use the entirely functional classifier you can use the service in: http://senpy.gsi.upm.es.\\PYZbs{}n\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}extra\\PYZus{}params\\PYZdq{}}\\PY{p}{:} \\PY{p}{\\PYZob{}}\n",
" \\PY{n+nt}{\\PYZdq{}language\\PYZdq{}}\\PY{p}{:} \\PY{p}{\\PYZob{}}\n",
" \\PY{n+nt}{\\PYZdq{}aliases\\PYZdq{}}\\PY{p}{:} \\PY{p}{[}\n",
@ -2235,7 +2235,7 @@
" \\PY{n+nd}{\\PYZdq{}@id\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}endpoint:plugins/sentiment\\PYZhy{}meaningcloud\\PYZus{}1.1\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nd}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}SentimentPlugin\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}author\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}GSI UPM\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}description\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}\\PYZbs{}nSentiment analysis with meaningCloud service.\\PYZbs{}nTo use this plugin, you need to obtain an API key from meaningCloud signing up here:\\PYZbs{}nhttps://www.meaningcloud.com/developer/login\\PYZbs{}n\\PYZbs{}nWhen you had obtained the meaningCloud API Key, you have to provide it to the plugin, using param apiKey.\\PYZbs{}nExample request:\\PYZbs{}n\\PYZbs{}nhttp://senpy.cluster.gsi.dit.upm.es/api/?algo=meaningCloud\\PYZam{}language=en\\PYZam{}apiKey=YOUR\\PYZus{}API\\PYZus{}KEY\\PYZam{}input=I\\PYZpc{}20love\\PYZpc{}20Madrid.\\PYZbs{}n\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}description\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}\\PYZbs{}nSentiment analysis with meaningCloud service.\\PYZbs{}nTo use this plugin, you need to obtain an API key from meaningCloud signing up here:\\PYZbs{}nhttps://www.meaningcloud.com/developer/login\\PYZbs{}n\\PYZbs{}nWhen you had obtained the meaningCloud API Key, you have to provide it to the plugin, using param apiKey.\\PYZbs{}nExample request:\\PYZbs{}n\\PYZbs{}nhttp://senpy.gsi.upm.es/api/?algo=meaningCloud\\PYZam{}language=en\\PYZam{}apiKey=YOUR\\PYZus{}API\\PYZus{}KEY\\PYZam{}input=I\\PYZpc{}20love\\PYZpc{}20Madrid.\\PYZbs{}n\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}extra\\PYZus{}params\\PYZdq{}}\\PY{p}{:} \\PY{p}{\\PYZob{}}\n",
" \\PY{n+nt}{\\PYZdq{}apikey\\PYZdq{}}\\PY{p}{:} \\PY{p}{\\PYZob{}}\n",
" \\PY{n+nt}{\\PYZdq{}aliases\\PYZdq{}}\\PY{p}{:} \\PY{p}{[}\n",
@ -2352,7 +2352,7 @@
" \"@id\": \"endpoint:plugins/sentiment-basic_0.1.1\",\n",
" \"@type\": \"SentimentPlugin\",\n",
" \"author\": \"github.com/nachtkatze\",\n",
" \"description\": \"\\nSentiment classifier using rule-based classification for Spanish. Based on english to spanish translation and SentiWordNet sentiment knowledge. This is a demo plugin that uses only some features from the TASS 2015 classifier. To use the entirely functional classifier you can use the service in: http://senpy.cluster.gsi.dit.upm.es.\\n\",\n",
" \"description\": \"\\nSentiment classifier using rule-based classification for Spanish. Based on english to spanish translation and SentiWordNet sentiment knowledge. This is a demo plugin that uses only some features from the TASS 2015 classifier. To use the entirely functional classifier you can use the service in: http://senpy.gsi.upm.es.\\n\",\n",
" \"extra_params\": {\n",
" \"language\": {\n",
" \"aliases\": [\n",
@ -2380,7 +2380,7 @@
" \"@id\": \"endpoint:plugins/sentiment-meaningcloud_1.1\",\n",
" \"@type\": \"SentimentPlugin\",\n",
" \"author\": \"GSI UPM\",\n",
" \"description\": \"\\nSentiment analysis with meaningCloud service.\\nTo use this plugin, you need to obtain an API key from meaningCloud signing up here:\\nhttps://www.meaningcloud.com/developer/login\\n\\nWhen you had obtained the meaningCloud API Key, you have to provide it to the plugin, using param apiKey.\\nExample request:\\n\\nhttp://senpy.cluster.gsi.dit.upm.es/api/?algo=meaningCloud&language=en&apiKey=YOUR_API_KEY&input=I%20love%20Madrid.\\n\",\n",
" \"description\": \"\\nSentiment analysis with meaningCloud service.\\nTo use this plugin, you need to obtain an API key from meaningCloud signing up here:\\nhttps://www.meaningcloud.com/developer/login\\n\\nWhen you had obtained the meaningCloud API Key, you have to provide it to the plugin, using param apiKey.\\nExample request:\\n\\nhttp://senpy.gsi.upm.es/api/?algo=meaningCloud&language=en&apiKey=YOUR_API_KEY&input=I%20love%20Madrid.\\n\",\n",
" \"extra_params\": {\n",
" \"apikey\": {\n",
" \"aliases\": [\n",
@ -4267,9 +4267,9 @@
" <span class=\"p\">{</span>\n",
" <span class=\"nd\">&quot;@id&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;Emotion0&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nd\">&quot;@type&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;Emotion&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#arousal&quot;</span><span class=\"p\">:</span> <span class=\"mf\">4.22</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#dominance&quot;</span><span class=\"p\">:</span> <span class=\"mf\">5.17</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#valence&quot;</span><span class=\"p\">:</span> <span class=\"mf\">5.2</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns#arousal&quot;</span><span class=\"p\">:</span> <span class=\"mf\">4.22</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns#dominance&quot;</span><span class=\"p\">:</span> <span class=\"mf\">5.17</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns#valence&quot;</span><span class=\"p\">:</span> <span class=\"mf\">5.2</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;prov:wasGeneratedBy&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;prefix:Analysis_1563369541.631805&quot;</span>\n",
" <span class=\"p\">}</span>\n",
" <span class=\"p\">],</span>\n",
@ -4299,9 +4299,9 @@
" \\PY{p}{\\PYZob{}}\n",
" \\PY{n+nd}{\\PYZdq{}@id\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Emotion0\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nd}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Emotion\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns\\PYZsh{}arousal\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{4.22}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns\\PYZsh{}dominance\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{5.17}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns\\PYZsh{}valence\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{5.2}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns\\PYZsh{}arousal\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{4.22}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns\\PYZsh{}dominance\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{5.17}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns\\PYZsh{}valence\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{5.2}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}prov:wasGeneratedBy\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}prefix:Analysis\\PYZus{}1563369541.631805\\PYZdq{}}\n",
" \\PY{p}{\\PYZcb{}}\n",
" \\PY{p}{]}\\PY{p}{,}\n",
@ -4330,9 +4330,9 @@
" {\n",
" \"@id\": \"Emotion0\",\n",
" \"@type\": \"Emotion\",\n",
" \"http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#arousal\": 4.22,\n",
" \"http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#dominance\": 5.17,\n",
" \"http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#valence\": 5.2,\n",
" \"http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns#arousal\": 4.22,\n",
" \"http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns#dominance\": 5.17,\n",
" \"http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns#valence\": 5.2,\n",
" \"prov:wasGeneratedBy\": \"prefix:Analysis_1563369541.631805\"\n",
" }\n",
" ],\n",

@ -333,18 +333,18 @@
".output_html .vm { color: #19177C } /* Name.Variable.Magic */\n",
".output_html .il { color: #666666 } /* Literal.Number.Integer.Long */</style><div class=\"highlight\"><pre><span></span><span class=\"k\">@prefix</span><span class=\"w\"> </span><span class=\"nn\">:</span><span class=\"w\"> </span><span class=\"nv\">&lt;http://www.gsi.upm.es/onto/senpy/ns#&gt;</span><span class=\"w\"> </span><span class=\"p\">.</span><span class=\"w\"></span>\n",
"<span class=\"k\">@prefix</span><span class=\"w\"> </span><span class=\"nn\">dc:</span><span class=\"w\"> </span><span class=\"nv\">&lt;http://dublincore.org/2012/06/14/dcelements#&gt;</span><span class=\"w\"> </span><span class=\"p\">.</span><span class=\"w\"></span>\n",
"<span class=\"k\">@prefix</span><span class=\"w\"> </span><span class=\"nn\">emoml:</span><span class=\"w\"> </span><span class=\"nv\">&lt;http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/emotionml/ns#&gt;</span><span class=\"w\"> </span><span class=\"p\">.</span><span class=\"w\"></span>\n",
"<span class=\"k\">@prefix</span><span class=\"w\"> </span><span class=\"nn\">emoml:</span><span class=\"w\"> </span><span class=\"nv\">&lt;http://www.gsi.upm.es/ontologies/onyx/vocabularies/emotionml/ns#&gt;</span><span class=\"w\"> </span><span class=\"p\">.</span><span class=\"w\"></span>\n",
"<span class=\"k\">@prefix</span><span class=\"w\"> </span><span class=\"nn\">endpoint:</span><span class=\"w\"> </span><span class=\"nv\">&lt;http://senpy.gsi.upm.es/api/&gt;</span><span class=\"w\"> </span><span class=\"p\">.</span><span class=\"w\"></span>\n",
"<span class=\"k\">@prefix</span><span class=\"w\"> </span><span class=\"nn\">fam:</span><span class=\"w\"> </span><span class=\"nv\">&lt;http://vocab.fusepool.info/fam#&gt;</span><span class=\"w\"> </span><span class=\"p\">.</span><span class=\"w\"></span>\n",
"<span class=\"k\">@prefix</span><span class=\"w\"> </span><span class=\"nn\">marl:</span><span class=\"w\"> </span><span class=\"nv\">&lt;http://www.gsi.dit.upm.es/ontologies/marl/ns#&gt;</span><span class=\"w\"> </span><span class=\"p\">.</span><span class=\"w\"></span>\n",
"<span class=\"k\">@prefix</span><span class=\"w\"> </span><span class=\"nn\">marl:</span><span class=\"w\"> </span><span class=\"nv\">&lt;http://www.gsi.upm.es/ontologies/marl/ns#&gt;</span><span class=\"w\"> </span><span class=\"p\">.</span><span class=\"w\"></span>\n",
"<span class=\"k\">@prefix</span><span class=\"w\"> </span><span class=\"nn\">nif:</span><span class=\"w\"> </span><span class=\"nv\">&lt;http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core#&gt;</span><span class=\"w\"> </span><span class=\"p\">.</span><span class=\"w\"></span>\n",
"<span class=\"k\">@prefix</span><span class=\"w\"> </span><span class=\"nn\">onyx:</span><span class=\"w\"> </span><span class=\"nv\">&lt;http://www.gsi.dit.upm.es/ontologies/onyx/ns#&gt;</span><span class=\"w\"> </span><span class=\"p\">.</span><span class=\"w\"></span>\n",
"<span class=\"k\">@prefix</span><span class=\"w\"> </span><span class=\"nn\">onyx:</span><span class=\"w\"> </span><span class=\"nv\">&lt;http://www.gsi.upm.es/ontologies/onyx/ns#&gt;</span><span class=\"w\"> </span><span class=\"p\">.</span><span class=\"w\"></span>\n",
"<span class=\"k\">@prefix</span><span class=\"w\"> </span><span class=\"nn\">prefix:</span><span class=\"w\"> </span><span class=\"nv\">&lt;http://senpy.invalid/&gt;</span><span class=\"w\"> </span><span class=\"p\">.</span><span class=\"w\"></span>\n",
"<span class=\"k\">@prefix</span><span class=\"w\"> </span><span class=\"nn\">prov:</span><span class=\"w\"> </span><span class=\"nv\">&lt;http://www.w3.org/ns/prov#&gt;</span><span class=\"w\"> </span><span class=\"p\">.</span><span class=\"w\"></span>\n",
"<span class=\"k\">@prefix</span><span class=\"w\"> </span><span class=\"nn\">rdf:</span><span class=\"w\"> </span><span class=\"nv\">&lt;http://www.w3.org/1999/02/22-rdf-syntax-ns#&gt;</span><span class=\"w\"> </span><span class=\"p\">.</span><span class=\"w\"></span>\n",
"<span class=\"k\">@prefix</span><span class=\"w\"> </span><span class=\"nn\">rdfs:</span><span class=\"w\"> </span><span class=\"nv\">&lt;http://www.w3.org/2000/01/rdf-schema#&gt;</span><span class=\"w\"> </span><span class=\"p\">.</span><span class=\"w\"></span>\n",
"<span class=\"k\">@prefix</span><span class=\"w\"> </span><span class=\"nn\">senpy:</span><span class=\"w\"> </span><span class=\"nv\">&lt;http://www.gsi.upm.es/onto/senpy/ns#&gt;</span><span class=\"w\"> </span><span class=\"p\">.</span><span class=\"w\"></span>\n",
"<span class=\"k\">@prefix</span><span class=\"w\"> </span><span class=\"nn\">wna:</span><span class=\"w\"> </span><span class=\"nv\">&lt;http://www.gsi.dit.upm.es/ontologies/wnaffect/ns#&gt;</span><span class=\"w\"> </span><span class=\"p\">.</span><span class=\"w\"></span>\n",
"<span class=\"k\">@prefix</span><span class=\"w\"> </span><span class=\"nn\">wna:</span><span class=\"w\"> </span><span class=\"nv\">&lt;http://www.gsi.upm.es/ontologies/wnaffect/ns#&gt;</span><span class=\"w\"> </span><span class=\"p\">.</span><span class=\"w\"></span>\n",
"<span class=\"k\">@prefix</span><span class=\"w\"> </span><span class=\"nn\">xml:</span><span class=\"w\"> </span><span class=\"nv\">&lt;http://www.w3.org/XML/1998/namespace&gt;</span><span class=\"w\"> </span><span class=\"p\">.</span><span class=\"w\"></span>\n",
"<span class=\"k\">@prefix</span><span class=\"w\"> </span><span class=\"nn\">xsd:</span><span class=\"w\"> </span><span class=\"nv\">&lt;http://www.w3.org/2001/XMLSchema#&gt;</span><span class=\"w\"> </span><span class=\"p\">.</span><span class=\"w\"></span>\n",
"\n",
@ -362,18 +362,18 @@
"\\begin{Verbatim}[commandchars=\\\\\\{\\}]\n",
"\\PY{k}{@prefix}\\PY{+w}{ }\\PY{n+nn}{:}\\PY{+w}{ }\\PY{n+nv}{\\PYZlt{}http://www.gsi.upm.es/onto/senpy/ns\\PYZsh{}\\PYZgt{}}\\PY{+w}{ }\\PY{p}{.}\n",
"\\PY{k}{@prefix}\\PY{+w}{ }\\PY{n+nn}{dc:}\\PY{+w}{ }\\PY{n+nv}{\\PYZlt{}http://dublincore.org/2012/06/14/dcelements\\PYZsh{}\\PYZgt{}}\\PY{+w}{ }\\PY{p}{.}\n",
"\\PY{k}{@prefix}\\PY{+w}{ }\\PY{n+nn}{emoml:}\\PY{+w}{ }\\PY{n+nv}{\\PYZlt{}http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/emotionml/ns\\PYZsh{}\\PYZgt{}}\\PY{+w}{ }\\PY{p}{.}\n",
"\\PY{k}{@prefix}\\PY{+w}{ }\\PY{n+nn}{emoml:}\\PY{+w}{ }\\PY{n+nv}{\\PYZlt{}http://www.gsi.upm.es/ontologies/onyx/vocabularies/emotionml/ns\\PYZsh{}\\PYZgt{}}\\PY{+w}{ }\\PY{p}{.}\n",
"\\PY{k}{@prefix}\\PY{+w}{ }\\PY{n+nn}{endpoint:}\\PY{+w}{ }\\PY{n+nv}{\\PYZlt{}http://senpy.gsi.upm.es/api/\\PYZgt{}}\\PY{+w}{ }\\PY{p}{.}\n",
"\\PY{k}{@prefix}\\PY{+w}{ }\\PY{n+nn}{fam:}\\PY{+w}{ }\\PY{n+nv}{\\PYZlt{}http://vocab.fusepool.info/fam\\PYZsh{}\\PYZgt{}}\\PY{+w}{ }\\PY{p}{.}\n",
"\\PY{k}{@prefix}\\PY{+w}{ }\\PY{n+nn}{marl:}\\PY{+w}{ }\\PY{n+nv}{\\PYZlt{}http://www.gsi.dit.upm.es/ontologies/marl/ns\\PYZsh{}\\PYZgt{}}\\PY{+w}{ }\\PY{p}{.}\n",
"\\PY{k}{@prefix}\\PY{+w}{ }\\PY{n+nn}{marl:}\\PY{+w}{ }\\PY{n+nv}{\\PYZlt{}http://www.gsi.upm.es/ontologies/marl/ns\\PYZsh{}\\PYZgt{}}\\PY{+w}{ }\\PY{p}{.}\n",
"\\PY{k}{@prefix}\\PY{+w}{ }\\PY{n+nn}{nif:}\\PY{+w}{ }\\PY{n+nv}{\\PYZlt{}http://persistence.uni\\PYZhy{}leipzig.org/nlp2rdf/ontologies/nif\\PYZhy{}core\\PYZsh{}\\PYZgt{}}\\PY{+w}{ }\\PY{p}{.}\n",
"\\PY{k}{@prefix}\\PY{+w}{ }\\PY{n+nn}{onyx:}\\PY{+w}{ }\\PY{n+nv}{\\PYZlt{}http://www.gsi.dit.upm.es/ontologies/onyx/ns\\PYZsh{}\\PYZgt{}}\\PY{+w}{ }\\PY{p}{.}\n",
"\\PY{k}{@prefix}\\PY{+w}{ }\\PY{n+nn}{onyx:}\\PY{+w}{ }\\PY{n+nv}{\\PYZlt{}http://www.gsi.upm.es/ontologies/onyx/ns\\PYZsh{}\\PYZgt{}}\\PY{+w}{ }\\PY{p}{.}\n",
"\\PY{k}{@prefix}\\PY{+w}{ }\\PY{n+nn}{prefix:}\\PY{+w}{ }\\PY{n+nv}{\\PYZlt{}http://senpy.invalid/\\PYZgt{}}\\PY{+w}{ }\\PY{p}{.}\n",
"\\PY{k}{@prefix}\\PY{+w}{ }\\PY{n+nn}{prov:}\\PY{+w}{ }\\PY{n+nv}{\\PYZlt{}http://www.w3.org/ns/prov\\PYZsh{}\\PYZgt{}}\\PY{+w}{ }\\PY{p}{.}\n",
"\\PY{k}{@prefix}\\PY{+w}{ }\\PY{n+nn}{rdf:}\\PY{+w}{ }\\PY{n+nv}{\\PYZlt{}http://www.w3.org/1999/02/22\\PYZhy{}rdf\\PYZhy{}syntax\\PYZhy{}ns\\PYZsh{}\\PYZgt{}}\\PY{+w}{ }\\PY{p}{.}\n",
"\\PY{k}{@prefix}\\PY{+w}{ }\\PY{n+nn}{rdfs:}\\PY{+w}{ }\\PY{n+nv}{\\PYZlt{}http://www.w3.org/2000/01/rdf\\PYZhy{}schema\\PYZsh{}\\PYZgt{}}\\PY{+w}{ }\\PY{p}{.}\n",
"\\PY{k}{@prefix}\\PY{+w}{ }\\PY{n+nn}{senpy:}\\PY{+w}{ }\\PY{n+nv}{\\PYZlt{}http://www.gsi.upm.es/onto/senpy/ns\\PYZsh{}\\PYZgt{}}\\PY{+w}{ }\\PY{p}{.}\n",
"\\PY{k}{@prefix}\\PY{+w}{ }\\PY{n+nn}{wna:}\\PY{+w}{ }\\PY{n+nv}{\\PYZlt{}http://www.gsi.dit.upm.es/ontologies/wnaffect/ns\\PYZsh{}\\PYZgt{}}\\PY{+w}{ }\\PY{p}{.}\n",
"\\PY{k}{@prefix}\\PY{+w}{ }\\PY{n+nn}{wna:}\\PY{+w}{ }\\PY{n+nv}{\\PYZlt{}http://www.gsi.upm.es/ontologies/wnaffect/ns\\PYZsh{}\\PYZgt{}}\\PY{+w}{ }\\PY{p}{.}\n",
"\\PY{k}{@prefix}\\PY{+w}{ }\\PY{n+nn}{xml:}\\PY{+w}{ }\\PY{n+nv}{\\PYZlt{}http://www.w3.org/XML/1998/namespace\\PYZgt{}}\\PY{+w}{ }\\PY{p}{.}\n",
"\\PY{k}{@prefix}\\PY{+w}{ }\\PY{n+nn}{xsd:}\\PY{+w}{ }\\PY{n+nv}{\\PYZlt{}http://www.w3.org/2001/XMLSchema\\PYZsh{}\\PYZgt{}}\\PY{+w}{ }\\PY{p}{.}\n",
"\n",
@ -390,18 +390,18 @@
"text/plain": [
"@prefix : <http://www.gsi.upm.es/onto/senpy/ns#> .\n",
"@prefix dc: <http://dublincore.org/2012/06/14/dcelements#> .\n",
"@prefix emoml: <http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/emotionml/ns#> .\n",
"@prefix emoml: <http://www.gsi.upm.es/ontologies/onyx/vocabularies/emotionml/ns#> .\n",
"@prefix endpoint: <http://senpy.gsi.upm.es/api/> .\n",
"@prefix fam: <http://vocab.fusepool.info/fam#> .\n",
"@prefix marl: <http://www.gsi.dit.upm.es/ontologies/marl/ns#> .\n",
"@prefix marl: <http://www.gsi.upm.es/ontologies/marl/ns#> .\n",
"@prefix nif: <http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core#> .\n",
"@prefix onyx: <http://www.gsi.dit.upm.es/ontologies/onyx/ns#> .\n",
"@prefix onyx: <http://www.gsi.upm.es/ontologies/onyx/ns#> .\n",
"@prefix prefix: <http://senpy.invalid/> .\n",
"@prefix prov: <http://www.w3.org/ns/prov#> .\n",
"@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .\n",
"@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .\n",
"@prefix senpy: <http://www.gsi.upm.es/onto/senpy/ns#> .\n",
"@prefix wna: <http://www.gsi.dit.upm.es/ontologies/wnaffect/ns#> .\n",
"@prefix wna: <http://www.gsi.upm.es/ontologies/wnaffect/ns#> .\n",
"@prefix xml: <http://www.w3.org/XML/1998/namespace> .\n",
"@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .\n",
"\n",
@ -1187,9 +1187,9 @@
" <span class=\"p\">{</span>\n",
" <span class=\"nd\">&quot;@id&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;Emotion0&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nd\">&quot;@type&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;Emotion&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#arousal&quot;</span><span class=\"p\">:</span> <span class=\"mf\">6.44</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#dominance&quot;</span><span class=\"p\">:</span> <span class=\"mf\">7.11</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#valence&quot;</span><span class=\"p\">:</span> <span class=\"mf\">8.72</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns#arousal&quot;</span><span class=\"p\">:</span> <span class=\"mf\">6.44</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns#dominance&quot;</span><span class=\"p\">:</span> <span class=\"mf\">7.11</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns#valence&quot;</span><span class=\"p\">:</span> <span class=\"mf\">8.72</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;prov:wasGeneratedBy&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;prefix:Analysis_1563372854.2822595&quot;</span>\n",
" <span class=\"p\">}</span>\n",
" <span class=\"p\">],</span>\n",
@ -1220,9 +1220,9 @@
" \\PY{p}{\\PYZob{}}\n",
" \\PY{n+nd}{\\PYZdq{}@id\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Emotion0\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nd}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Emotion\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns\\PYZsh{}arousal\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{6.44}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns\\PYZsh{}dominance\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{7.11}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns\\PYZsh{}valence\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{8.72}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns\\PYZsh{}arousal\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{6.44}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns\\PYZsh{}dominance\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{7.11}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns\\PYZsh{}valence\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{8.72}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}prov:wasGeneratedBy\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}prefix:Analysis\\PYZus{}1563372854.2822595\\PYZdq{}}\n",
" \\PY{p}{\\PYZcb{}}\n",
" \\PY{p}{]}\\PY{p}{,}\n",
@ -1252,9 +1252,9 @@
" {\n",
" \"@id\": \"Emotion0\",\n",
" \"@type\": \"Emotion\",\n",
" \"http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#arousal\": 6.44,\n",
" \"http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#dominance\": 7.11,\n",
" \"http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#valence\": 8.72,\n",
" \"http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns#arousal\": 6.44,\n",
" \"http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns#dominance\": 7.11,\n",
" \"http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns#valence\": 8.72,\n",
" \"prov:wasGeneratedBy\": \"prefix:Analysis_1563372854.2822595\"\n",
" }\n",
" ],\n",
@ -1376,9 +1376,9 @@
" <span class=\"p\">{</span>\n",
" <span class=\"nd\">&quot;@id&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;Emotion0&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nd\">&quot;@type&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;Emotion&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#arousal&quot;</span><span class=\"p\">:</span> <span class=\"mf\">6.44</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#dominance&quot;</span><span class=\"p\">:</span> <span class=\"mf\">7.11</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#valence&quot;</span><span class=\"p\">:</span> <span class=\"mf\">8.72</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns#arousal&quot;</span><span class=\"p\">:</span> <span class=\"mf\">6.44</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns#dominance&quot;</span><span class=\"p\">:</span> <span class=\"mf\">7.11</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns#valence&quot;</span><span class=\"p\">:</span> <span class=\"mf\">8.72</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;prov:wasGeneratedBy&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;prefix:Analysis_1563372854.3354168&quot;</span>\n",
" <span class=\"p\">}</span>\n",
" <span class=\"p\">],</span>\n",
@ -1409,9 +1409,9 @@
" \\PY{p}{\\PYZob{}}\n",
" \\PY{n+nd}{\\PYZdq{}@id\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Emotion0\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nd}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Emotion\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns\\PYZsh{}arousal\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{6.44}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns\\PYZsh{}dominance\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{7.11}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns\\PYZsh{}valence\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{8.72}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns\\PYZsh{}arousal\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{6.44}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns\\PYZsh{}dominance\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{7.11}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns\\PYZsh{}valence\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{8.72}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}prov:wasGeneratedBy\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}prefix:Analysis\\PYZus{}1563372854.3354168\\PYZdq{}}\n",
" \\PY{p}{\\PYZcb{}}\n",
" \\PY{p}{]}\\PY{p}{,}\n",
@ -1441,9 +1441,9 @@
" {\n",
" \"@id\": \"Emotion0\",\n",
" \"@type\": \"Emotion\",\n",
" \"http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#arousal\": 6.44,\n",
" \"http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#dominance\": 7.11,\n",
" \"http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#valence\": 8.72,\n",
" \"http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns#arousal\": 6.44,\n",
" \"http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns#dominance\": 7.11,\n",
" \"http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns#valence\": 8.72,\n",
" \"prov:wasGeneratedBy\": \"prefix:Analysis_1563372854.3354168\"\n",
" }\n",
" ],\n",
@ -1572,9 +1572,9 @@
" <span class=\"p\">{</span>\n",
" <span class=\"nd\">&quot;@id&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;Emotion0&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nd\">&quot;@type&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;Emotion&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#arousal&quot;</span><span class=\"p\">:</span> <span class=\"mf\">6.44</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#dominance&quot;</span><span class=\"p\">:</span> <span class=\"mf\">7.11</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#valence&quot;</span><span class=\"p\">:</span> <span class=\"mf\">8.72</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns#arousal&quot;</span><span class=\"p\">:</span> <span class=\"mf\">6.44</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns#dominance&quot;</span><span class=\"p\">:</span> <span class=\"mf\">7.11</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns#valence&quot;</span><span class=\"p\">:</span> <span class=\"mf\">8.72</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;prov:wasGeneratedBy&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;prefix:Analysis_1563372854.3876536&quot;</span>\n",
" <span class=\"p\">}</span>\n",
" <span class=\"p\">],</span>\n",
@ -1605,9 +1605,9 @@
" \\PY{p}{\\PYZob{}}\n",
" \\PY{n+nd}{\\PYZdq{}@id\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Emotion0\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nd}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Emotion\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns\\PYZsh{}arousal\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{6.44}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns\\PYZsh{}dominance\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{7.11}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns\\PYZsh{}valence\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{8.72}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns\\PYZsh{}arousal\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{6.44}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns\\PYZsh{}dominance\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{7.11}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns\\PYZsh{}valence\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{8.72}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}prov:wasGeneratedBy\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}prefix:Analysis\\PYZus{}1563372854.3876536\\PYZdq{}}\n",
" \\PY{p}{\\PYZcb{}}\n",
" \\PY{p}{]}\\PY{p}{,}\n",
@ -1637,9 +1637,9 @@
" {\n",
" \"@id\": \"Emotion0\",\n",
" \"@type\": \"Emotion\",\n",
" \"http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#arousal\": 6.44,\n",
" \"http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#dominance\": 7.11,\n",
" \"http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#valence\": 8.72,\n",
" \"http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns#arousal\": 6.44,\n",
" \"http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns#dominance\": 7.11,\n",
" \"http://www.gsi.upm.es/ontologies/onyx/vocabularies/anew/ns#valence\": 8.72,\n",
" \"prov:wasGeneratedBy\": \"prefix:Analysis_1563372854.3876536\"\n",
" }\n",
" ],\n",

@ -2,7 +2,7 @@
"@context": [
"http://mixedemotions-project.eu/ns/context.jsonld",
{
"emovoc": "http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/emotionml/ns#"
"emovoc": "http://www.gsi.upm.es/ontologies/onyx/vocabularies/emotionml/ns#"
}
],
"@id": "me:Result1",

@ -67,7 +67,7 @@ You can analyze the same sentence using a different sentiment service (e.g. Vade
@prefix : <http://www.gsi.upm.es/onto/senpy/ns#> .
@prefix endpoint: <http://senpy.gsi.upm.es/api/> .
@prefix marl: <http://www.gsi.dit.upm.es/ontologies/marl/ns#> .
@prefix marl: <http://www.gsi.upm.es/ontologies/marl/ns#> .
@prefix nif: <http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core#> .
@prefix prefix: <http://senpy.invalid/> .
@prefix prov: <http://www.w3.org/ns/prov#> .

@ -49,7 +49,7 @@ To use your custom plugins, you can add volume to the container:
.. code:: bash
docker run -ti -p 5000:5000 -v <PATH OF PLUGINS>:/plugins gsiupm/senpy --host 0.0.0.0 --plugins -f /plugins
docker run -ti -p 5000:5000 -v <PATH OF PLUGINS>:/plugins gsiupm/senpy --host 0.0.0.0 --plugins-folder /plugins
Alias

@ -2,7 +2,7 @@ Publications
============
And if you use Senpy in your research, please cite `Senpy: A Pragmatic Linked Sentiment Analysis Framework <http://gsi.dit.upm.es/index.php/es/investigacion/publicaciones?view=publication&task=show&id=417>`__ (`BibTex <http://gsi.dit.upm.es/index.php/es/investigacion/publicaciones?controller=publications&task=export&format=bibtex&id=417>`__):
And if you use Senpy in your research, please cite `Senpy: A Pragmatic Linked Sentiment Analysis Framework <http://gsi.upm.es/index.php/es/investigacion/publicaciones?view=publication&task=show&id=417>`__ (`BibTex <http://gsi.upm.es/index.php/es/investigacion/publicaciones?controller=publications&task=export&format=bibtex&id=417>`__):
.. code-block:: text

@ -10,8 +10,8 @@ The senpy server is launched via the `senpy` command:
usage: senpy [-h] [--level logging_level] [--log-format log_format] [--debug]
[--no-default-plugins] [--host HOST] [--port PORT]
[--plugins-folder PLUGINS_FOLDER] [--only-install] [--only-test]
[--test] [--only-list] [--data-folder DATA_FOLDER]
[--plugins-folder PLUGINS_FOLDER] [--install]
[--test] [--no-run] [--data-folder DATA_FOLDER]
[--no-threaded] [--no-deps] [--version] [--allow-fail]
Run a Senpy server
@ -28,10 +28,9 @@ The senpy server is launched via the `senpy` command:
--port PORT, -p PORT Port to listen on.
--plugins-folder PLUGINS_FOLDER, -f PLUGINS_FOLDER
Where to look for plugins.
--only-install, -i Do not run a server, only install plugin dependencies
--only-test Do not run a server, just test all plugins
--install, -i Install plugin dependencies before launching the server.
--test, -t Test all plugins before launching the server
--only-list, --list Do not run a server, only list plugins found
--no-run Do not launch the server
--data-folder DATA_FOLDER, --data DATA_FOLDER
Where to look for data. It be set with the SENPY_DATA
environment variable as well.

@ -13,9 +13,9 @@ An overview of the vocabularies and their use can be found in [4].
[1] Guidelines for developing NIF-based NLP services, Final Community Group Report 22 December 2015 Available at: https://www.w3.org/2015/09/bpmlod-reports/nif-based-nlp-webservices/
[2] Marl Ontology Specification, available at http://www.gsi.dit.upm.es/ontologies/marl/
[2] Marl Ontology Specification, available at http://www.gsi.upm.es/ontologies/marl/
[3] Onyx Ontology Specification, available at http://www.gsi.dit.upm.es/ontologies/onyx/
[3] Onyx Ontology Specification, available at http://www.gsi.upm.es/ontologies/onyx/
[4] Iglesias, C. A., Sánchez-Rada, J. F., Vulcu, G., & Buitelaar, P. (2017). Linked Data Models for Sentiment and Emotion Analysis in Social Networks. In Sentiment Analysis in Social Networks (pp. 49-69).

@ -1,3 +1,4 @@
module: mynoop
optional: true
requirements:
- noop

@ -31,7 +31,7 @@ pipeline = Pipeline([('cv', count_vec),
('clf', clf3)])
pipeline.fit(X_train, y_train)
print('Feature names: {}'.format(count_vec.get_feature_names()))
print('Feature names: {}'.format(count_vec.get_feature_names_out()))
print('Class count: {}'.format(clf3.class_count_))

@ -1 +1,6 @@
gsitk>0.1.9.1
flask_cors==3.0.10
Pattern==3.6
lxml==4.9.3
pandas==2.1.1
textblob==0.17.1

@ -1,20 +1,24 @@
---
apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: senpy-latest
spec:
replicas: 1
selector:
matchLabels:
app: senpy-latest
template:
metadata:
labels:
app: senpy-latest
role: senpy-latest
app: test
spec:
containers:
- name: senpy-latest
image: $IMAGEWTAG
imagePullPolicy: Always
args: ["--enable-cors"]
resources:
limits:
memory: "512Mi"
@ -22,3 +26,11 @@ spec:
ports:
- name: web
containerPort: 5000
volumeMounts:
- name: senpy-data
mountPath: /senpy-data
subPath: data
volumes:
- name: senpy-data
persistentVolumeClaim:
claimName: pvc-senpy

@ -1,21 +1,29 @@
---
apiVersion: extensions/v1beta1
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: senpy-ingress
labels:
app: senpy-latest
spec:
rules:
- host: latest.senpy.cluster.gsi.dit.upm.es
- host: senpy-latest.gsi.upm.es
http:
paths:
- path: /
pathType: Prefix
backend:
serviceName: senpy-latest
servicePort: 5000
service:
name: senpy-latest
port:
number: 5000
- host: latest.senpy.gsi.upm.es
http:
paths:
- path: /
pathType: Prefix
backend:
serviceName: senpy-latest
servicePort: 5000
service:
name: senpy-latest
port:
number: 5000

@ -3,10 +3,12 @@ apiVersion: v1
kind: Service
metadata:
name: senpy-latest
labels:
app: senpy-latest
spec:
type: ClusterIP
ports:
- port: 5000
protocol: TCP
selector:
role: senpy-latest
app: senpy-latest

@ -7,8 +7,7 @@ future
jsonschema
jsonref
PyYAML
rdflib
rdflib-jsonld
rdflib==6.1.1
numpy
scipy
scikit-learn>=0.20

@ -22,6 +22,8 @@ the server.
from flask import Flask
from senpy.extensions import Senpy
from senpy.utils import easy_test
from senpy.plugins import list_dependencies
from senpy import config
import logging
import os
@ -41,6 +43,11 @@ def main():
type=str,
default="INFO",
help='Logging level')
parser.add_argument(
'--no-proxy-fix',
action='store_true',
default=False,
help='Do not assume senpy will be running behind a proxy (e.g., nginx)')
parser.add_argument(
'--log-format',
metavar='log_format',
@ -76,16 +83,21 @@ def main():
action='append',
help='Where to look for plugins.')
parser.add_argument(
'--only-install',
'--install',
'-i',
action='store_true',
default=False,
help='Do not run a server, only install plugin dependencies')
help='Install plugin dependencies before running.')
parser.add_argument(
'--only-test',
'--dependencies',
action='store_true',
default=False,
help='Do not run a server, just test all plugins')
help='List plugin dependencies')
parser.add_argument(
'--strict',
action='store_true',
default=config.strict,
help='Fail if optional plugins cannot be loaded.')
parser.add_argument(
'--test',
'-t',
@ -93,11 +105,10 @@ def main():
default=False,
help='Test all plugins before launching the server')
parser.add_argument(
'--only-list',
'--list',
'--no-run',
action='store_true',
default=False,
help='Do not run a server, only list plugins found')
help='Do not launch the server.')
parser.add_argument(
'--data-folder',
'--data',
@ -127,6 +138,12 @@ def main():
action='store_true',
default=False,
help='Do not exit if some plugins fail to activate')
parser.add_argument(
'--enable-cors',
'--cors',
action='store_true',
default=False,
help='Enable CORS for all domains (requires flask-cors to be installed)')
args = parser.parse_args()
print('Senpy version {}'.format(senpy.__version__))
print(sys.version)
@ -141,9 +158,12 @@ def main():
app = Flask(__name__)
app.debug = args.debug
sp = Senpy(app,
plugin_folder=None,
default_plugins=not args.no_default_plugins,
install=args.install,
strict=args.strict,
data_folder=args.data_folder)
folders = list(args.plugins_folder) if args.plugins_folder else []
if not folders:
@ -163,20 +183,54 @@ def main():
fpath,
maxname=maxname,
maxversion=maxversion))
if args.only_list:
return
if not args.no_deps:
if args.dependencies:
print('Listing dependencies')
missing = []
installed = []
for plug in sp.plugins(is_activated=False):
inst, miss, nltkres = list_dependencies(plug)
if not any([inst, miss, nltkres]):
continue
print(f'Plugin: {plug.id}')
for m in miss:
missing.append(f'{m} # {plug.id}')
for i in inst:
installed.append(f'{i} # {plug.id}')
if installed:
print('Installed packages:')
for i in installed:
print(f'\t{i}')
if missing:
print('Missing packages:')
for m in missing:
print(f'\t{m}')
if args.install:
sp.install_deps()
if args.only_install:
if args.test:
sp.activate_all(sync=True)
easy_test(sp.plugins(is_activated=True), debug=args.debug)
if args.no_run:
return
sp.activate_all(allow_fail=args.allow_fail)
if args.test or args.only_test:
easy_test(sp.plugins(), debug=args.debug)
if args.only_test:
return
sp.activate_all(sync=True)
if sp.strict:
inactive = sp.plugins(is_activated=False)
assert not inactive
print('Senpy version {}'.format(senpy.__version__))
print('Server running on port %s:%d. Ctrl+C to quit' % (args.host,
args.port))
if args.enable_cors:
from flask_cors import CORS
CORS(app)
if not args.no_proxy_fix:
from werkzeug.middleware.proxy_fix import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
try:
app.run(args.host,
args.port,

@ -204,8 +204,8 @@ def basic_api(f):
return decorated_function
@api_blueprint.route('/', defaults={'plugins': None}, methods=['POST', 'GET'])
@api_blueprint.route('/<path:plugins>', methods=['POST', 'GET'])
@api_blueprint.route('/', defaults={'plugins': None}, methods=['POST', 'GET'], strict_slashes=False)
@api_blueprint.route('/<path:plugins>', methods=['POST', 'GET'], strict_slashes=False)
@basic_api
def api_root(plugins):
if plugins:
@ -240,7 +240,7 @@ def api_root(plugins):
return results
@api_blueprint.route('/evaluate/', methods=['POST', 'GET'])
@api_blueprint.route('/evaluate', methods=['POST', 'GET'], strict_slashes=False)
@basic_api
def evaluate():
if request.parameters['help']:
@ -253,7 +253,7 @@ def evaluate():
return response
@api_blueprint.route('/plugins/', methods=['POST', 'GET'])
@api_blueprint.route('/plugins', methods=['POST', 'GET'], strict_slashes=False)
@basic_api
def plugins():
sp = current_app.senpy
@ -264,14 +264,14 @@ def plugins():
return dic
@api_blueprint.route('/plugins/<plugin>/', methods=['POST', 'GET'])
@api_blueprint.route('/plugins/<plugin>', methods=['POST', 'GET'], strict_slashes=False)
@basic_api
def plugin(plugin):
sp = current_app.senpy
return sp.get_plugin(plugin)
@api_blueprint.route('/datasets/', methods=['POST', 'GET'])
@api_blueprint.route('/datasets', methods=['POST', 'GET'], strict_slashes=False)
@basic_api
def get_datasets():
dic = Datasets(datasets=list(datasets.values()))

@ -0,0 +1,7 @@
import os
strict = os.environ.get('SENPY_STRICT', '').lower() not in ["", "false", "f"]
data_folder = os.environ.get('SENPY_DATA', None)
if data_folder:
data_folder = os.path.abspath(data_folder)
testing = os.environ.get('SENPY_TESTING', "") != ""

@ -20,6 +20,7 @@ It orchestrates plugin (de)activation and analysis.
from future import standard_library
standard_library.install_aliases()
from . import config
from . import plugins, api
from .models import Error, AggregatedEvaluation
from .plugins import AnalysisPlugin
@ -44,8 +45,11 @@ class Senpy(object):
app=None,
plugin_folder=".",
data_folder=None,
install=False,
strict=None,
default_plugins=False):
default_data = os.path.join(os.getcwd(), 'senpy_data')
self.data_folder = data_folder or os.environ.get('SENPY_DATA', default_data)
try:
@ -57,6 +61,8 @@ class Senpy(object):
raise
self._default = None
self.strict = strict if strict is not None else config.strict
self.install = install
self._plugins = {}
if plugin_folder:
self.add_folder(plugin_folder)
@ -148,7 +154,8 @@ class Senpy(object):
logger.debug("Adding folder: %s", folder)
if os.path.isdir(folder):
new_plugins = plugins.from_folder([folder],
data_folder=self.data_folder)
data_folder=self.data_folder,
strict=self.strict)
for plugin in new_plugins:
self.add_plugin(plugin)
else:
@ -173,7 +180,7 @@ class Senpy(object):
logger.info('Installing dependencies')
# If a plugin is activated, its dependencies should already be installed
# Otherwise, it would've failed to activate.
plugins.install_deps(*self.plugins(is_activated=False))
plugins.install_deps(*self._plugins.values())
def analyse(self, request, analyses=None):
"""
@ -340,13 +347,13 @@ class Senpy(object):
else:
self._default = self._plugins[value.lower()]
def activate_all(self, sync=True, allow_fail=False):
def activate_all(self, sync=True):
ps = []
for plug in self._plugins.keys():
try:
self.activate_plugin(plug, sync=sync)
except Exception as ex:
if not allow_fail:
if self.strict:
raise
logger.error('Could not activate {}: {}'.format(plug, ex))
return ps
@ -358,15 +365,20 @@ class Senpy(object):
return ps
def _activate(self, plugin):
success = False
with plugin._lock:
if plugin.is_activated:
return
plugin._activate()
msg = "Plugin activated: {}".format(plugin.name)
logger.info(msg)
success = plugin.is_activated
return success
try:
logger.info("Activating plugin: {}".format(plugin.name))
assert plugin._activate()
logger.info(f"Plugin activated: {plugin.name}")
except Exception as ex:
if getattr(plugin, "optional", False) and not self.strict:
logger.info(f"Plugin could NOT be activated: {plugin.name}")
return False
raise
return plugin.is_activated
def activate_plugin(self, plugin_name, sync=True):
plugin_name = plugin_name.lower()

@ -23,7 +23,8 @@ import inspect
import copy
from abc import ABCMeta
from collections import MutableMapping, namedtuple
from collections import namedtuple
from collections.abc import MutableMapping
class BaseMeta(ABCMeta):

@ -216,7 +216,7 @@ class BaseModel(with_metaclass(BaseMeta, CustomDict)):
logger.debug(
'Parsing with prefix: {}'.format(kwargs.get('prefix')))
content = g.serialize(format=format,
prefix=prefix).decode('utf-8')
prefix=prefix)
mimetype = 'text/{}'.format(format)
else:
raise Error('Unknown outformat: {}'.format(format))

@ -46,6 +46,7 @@ from .. import models, utils
from .. import api
from .. import gsitk_compat
from .. import testing
from .. import config
logger = logging.getLogger(__name__)
@ -75,7 +76,7 @@ class PluginMeta(models.BaseMeta):
cls = super(PluginMeta, mcs).__new__(mcs, name, bases, attrs)
if alias in mcs._classes:
if os.environ.get('SENPY_TESTING', ""):
if config.testing:
raise Exception(
('The type of plugin {} already exists. '
'Please, choose a different name').format(name))
@ -122,7 +123,13 @@ class Plugin(with_metaclass(PluginMeta, models.Plugin)):
self._directory = os.path.abspath(
os.path.dirname(inspect.getfile(self.__class__)))
data_folder = data_folder or os.getcwd()
if not data_folder:
data_folder = config.data_folder
if not data_folder:
data_folder = os.getcwd()
data_folder = os.path.abspath(data_folder)
subdir = os.path.join(data_folder, self.name)
self._data_paths = [
@ -155,8 +162,11 @@ class Plugin(with_metaclass(PluginMeta, models.Plugin)):
return os.path.dirname(inspect.getfile(self.__class__))
def _activate(self):
if self.is_activated:
return
self.activate()
self.is_activated = True
return self.is_activated
def _deactivate(self):
self.is_activated = False
@ -200,6 +210,8 @@ class Plugin(with_metaclass(PluginMeta, models.Plugin)):
)
def test(self, test_cases=None):
if not self.is_activated:
self._activate()
if not test_cases:
if not hasattr(self, 'test_cases'):
raise AttributeError(
@ -260,11 +272,13 @@ class Plugin(with_metaclass(PluginMeta, models.Plugin)):
assert not should_fail
def find_file(self, fname):
tried = []
for p in self._data_paths:
alternative = os.path.join(p, fname)
alternative = os.path.abspath(os.path.join(p, fname))
if os.path.exists(alternative):
return alternative
raise IOError('File does not exist: {}'.format(fname))
tried.append(alternative)
raise IOError(f'File does not exist: {fname}. Tried: {tried}')
def path(self, fpath):
if not os.path.isabs(fpath):
@ -288,6 +302,28 @@ class Plugin(with_metaclass(PluginMeta, models.Plugin)):
SenpyPlugin = Plugin
class FailedPlugin(Plugin):
"""A plugin that has failed to initialize."""
version = 0
def __init__(self, info, function):
super().__init__(info)
a = info.get('name', info.get('module', self.name))
self['name'] == a
self._function = function
self.is_activated = False
def retry(self):
return self._function()
def test(self):
'''
A module that failed to load cannot be tested. But non-optional
plugins should not fail to load in strict mode.
'''
assert self.optional and not config.strict
class Analyser(Plugin):
'''
A subclass of Plugin that analyses text and provides an annotation.
@ -631,11 +667,17 @@ class ShelfMixin(object):
def shelf_file(self, value):
self._shelf_file = value
def save(self):
self.log.debug('Saving pickle')
if hasattr(self, '_sh') and self._sh is not None:
with self.open(self.shelf_file, 'wb') as f:
pickle.dump(self._sh, f)
def save(self, ignore_errors=False):
try:
self.log.debug('Saving pickle')
if hasattr(self, '_sh') and self._sh is not None:
with self.open(self.shelf_file, 'wb') as f:
pickle.dump(self._sh, f)
except Exception as ex:
self.log.warning("Could not save shelf state. Check folder permissions for: "
f" {self.shelf_file}. Error: { ex }")
if not ignore_errors:
raise
def pfilter(plugins, plugin_type=Analyser, **kwargs):
@ -697,23 +739,31 @@ def missing_requirements(reqs):
res = pool.apply_async(pkg_resources.get_distribution, (req,))
queue.append((req, res))
missing = []
installed = []
for req, job in queue:
try:
job.get(1)
installed.append(job.get(1))
except Exception:
missing.append(req)
return missing
return installed, missing
def install_deps(*plugins):
installed = False
def list_dependencies(*plugins):
'''List all dependencies (python and nltk) for the given list of plugins'''
nltk_resources = set()
requirements = []
missing = []
installed = []
for info in plugins:
requirements = info.get('requirements', [])
if requirements:
requirements += missing_requirements(requirements)
reqs = info.get('requirements', [])
if reqs:
inst, miss= missing_requirements(reqs)
installed += inst
missing += miss
nltk_resources |= set(info.get('nltk_resources', []))
return installed, missing, nltk_resources
def install_deps(*plugins):
_, requirements, nltk_resources = list_dependencies(*plugins)
installed = False
if requirements:
logger.info('Installing requirements: ' + str(requirements))
pip_args = [sys.executable, '-m', 'pip', 'install']
@ -727,8 +777,7 @@ def install_deps(*plugins):
if exitcode != 0:
raise models.Error(
"Dependencies not properly installed: {}".format(pip_args))
installed |= download(list(nltk_resources))
return installed
return installed or download(list(nltk_resources))
is_plugin_file = re.compile(r'.*\.senpy$|senpy_[a-zA-Z0-9_]+\.py$|'
@ -745,7 +794,7 @@ def find_plugins(folders):
yield fpath
def from_path(fpath, install_on_fail=False, **kwargs):
def from_path(fpath, **kwargs):
logger.debug("Loading plugin from {}".format(fpath))
if fpath.endswith('.py'):
# We asume root is the dir of the file, and module is the name of the file
@ -755,18 +804,18 @@ def from_path(fpath, install_on_fail=False, **kwargs):
yield instance
else:
info = parse_plugin_info(fpath)
yield from_info(info, install_on_fail=install_on_fail, **kwargs)
yield from_info(info, **kwargs)
def from_folder(folders, loader=from_path, **kwargs):
plugins = []
for fpath in find_plugins(folders):
for plugin in loader(fpath, **kwargs):
plugins.append(plugin)
if plugin:
plugins.append(plugin)
return plugins
def from_info(info, root=None, install_on_fail=True, **kwargs):
def from_info(info, root=None, strict=False, **kwargs):
if any(x not in info for x in ('module', )):
raise ValueError('Plugin info is not valid: {}'.format(info))
module = info["module"]
@ -778,8 +827,10 @@ def from_info(info, root=None, install_on_fail=True, **kwargs):
try:
return fun()
except (ImportError, LookupError):
install_deps(info)
return fun()
if strict or not str(info.get("optional", "false")).lower() in ["True", "true", "t"]:
raise
print(f"Could not import plugin: { info }")
return FailedPlugin(info, fun)
def parse_plugin_info(fpath):

@ -0,0 +1,60 @@
# Plugin emotion-anew
This plugin consists on an **emotion classifier** that detects six possible emotions:
- Anger : general-dislike.
- Fear : negative-fear.
- Disgust : shame.
- Joy : gratitude, affective, enthusiasm, love, joy, liking.
- Sadness : ingrattitude, daze, humlity, compassion, despair, anxiety, sadness.
- Neutral: not detected a particulary emotion.
The plugin uses **ANEW lexicon** dictionary to calculate VAD (valence-arousal-dominance) of the sentence and determinate which emotion is closer to this value. To do this comparision, it is defined that each emotion has a centroid, calculated according to this article: http://www.aclweb.org/anthology/W10-0208.
The plugin is going to look for the words in the sentence that appear in the ANEW dictionary and calculate the average VAD score for the sentence. Once this score is calculated, it is going to seek the emotion that is closest to this value.
The response of this plugin uses [Onyx ontology](https://www.gsi.dit.upm.es/ontologies/onyx/) developed at GSI UPM, to express the information.
## Installation
* Download
```
git clone https://lab.cluster.gsi.dit.upm.es/senpy/emotion-anew.git
```
* Get data
```
cd emotion-anew
git submodule update --init --recursive
```
* Run
```
docker run -p 5000:5000 -v $PWD:/plugins gsiupm/senpy:python2.7 -f /plugins
```
## Data format
`data/Corpus/affective-isear.tsv` contains data from ISEAR Databank: http://emotion-research.net/toolbox/toolboxdatabase.2006-10-13.2581092615
##Usage
Params accepted:
- Language: English (en) and Spanish (es).
- Input: input text to analyse.
Example request:
```
http://senpy.cluster.gsi.dit.upm.es/api/?algo=emotion-anew&language=en&input=I%20love%20Madrid
```
Example respond: This plugin follows the standard for the senpy plugin response. For more information, please visit [senpy documentation](http://senpy.readthedocs.io). Specifically, NIF API section.
# Known issues
- To obtain Anew dictionary you can download from here: <https://github.com/hcorona/SMC2015/blob/master/resources/ANEW2010All.txt>
- This plugin only supports **Python2**
![alt GSI Logo][logoGSI]
[logoES]: https://www.gsi.dit.upm.es/ontologies/onyx/img/eurosentiment_logo.png "EuroSentiment logo"
[logoGSI]: http://www.gsi.dit.upm.es/images/stories/logos/gsi.png "GSI Logo"

@ -0,0 +1,269 @@
# -*- coding: utf-8 -*-
import re
import nltk
import csv
import sys
import os
import unicodedata
import string
import xml.etree.ElementTree as ET
import math
from sklearn.svm import LinearSVC
from sklearn.feature_extraction import DictVectorizer
from nltk import bigrams
from nltk import trigrams
from nltk.corpus import stopwords
from pattern.en import parse as parse_en
from pattern.es import parse as parse_es
from senpy.plugins import EmotionPlugin, SenpyPlugin
from senpy.models import Results, EmotionSet, Entry, Emotion
### BEGIN WORKAROUND FOR PATTERN
# See: https://github.com/clips/pattern/issues/308
import os.path
import pattern.text
from pattern.helpers import decode_string
from codecs import BOM_UTF8
BOM_UTF8 = BOM_UTF8.decode("utf-8")
decode_utf8 = decode_string
MODEL = "emoml:pad-dimensions_"
VALENCE = f"{MODEL}_valence"
AROUSAL = f"{MODEL}_arousal"
DOMINANCE = f"{MODEL}_dominance"
def _read(path, encoding="utf-8", comment=";;;"):
"""Returns an iterator over the lines in the file at the given path,
strippping comments and decoding each line to Unicode.
"""
if path:
if isinstance(path, str) and os.path.exists(path):
# From file path.
f = open(path, "r", encoding="utf-8")
elif isinstance(path, str):
# From string.
f = path.splitlines()
else:
# From file or buffer.
f = path
for i, line in enumerate(f):
line = line.strip(BOM_UTF8) if i == 0 and isinstance(line, str) else line
line = line.strip()
line = decode_utf8(line, encoding)
if not line or (comment and line.startswith(comment)):
continue
yield line
pattern.text._read = _read
## END WORKAROUND
class ANEW(EmotionPlugin):
description = "This plugin consists on an emotion classifier using ANEW lexicon dictionary. It averages the VAD (valence-arousal-dominance) value of each word in the text that is also in the ANEW dictionary. To obtain a categorical value (e.g., happy) use the emotion conversion API (e.g., `emotion-model=emoml:big6`)."
author = "@icorcuera"
version = "0.5.2"
name = "emotion-anew"
extra_params = {
"language": {
"description": "language of the input",
"aliases": ["language", "l"],
"required": True,
"options": ["es","en"],
"default": "en"
}
}
anew_path_es = "Dictionary/Redondo(2007).csv"
anew_path_en = "Dictionary/ANEW2010All.txt"
onyx__usesEmotionModel = MODEL
nltk_resources = ['stopwords']
def activate(self, *args, **kwargs):
self._stopwords = stopwords.words('english')
dictionary={}
dictionary['es'] = {}
with self.open(self.anew_path_es,'r') as tabfile:
reader = csv.reader(tabfile, delimiter='\t')
for row in reader:
dictionary['es'][row[2]]={}
dictionary['es'][row[2]]['V']=row[3]
dictionary['es'][row[2]]['A']=row[5]
dictionary['es'][row[2]]['D']=row[7]
dictionary['en'] = {}
with self.open(self.anew_path_en,'r') as tabfile:
reader = csv.reader(tabfile, delimiter='\t')
for row in reader:
dictionary['en'][row[0]]={}
dictionary['en'][row[0]]['V']=row[2]
dictionary['en'][row[0]]['A']=row[4]
dictionary['en'][row[0]]['D']=row[6]
self._dictionary = dictionary
def _my_preprocessor(self, text):
regHttp = re.compile('(http://)[a-zA-Z0-9]*.[a-zA-Z0-9/]*(.[a-zA-Z0-9]*)?')
regHttps = re.compile('(https://)[a-zA-Z0-9]*.[a-zA-Z0-9/]*(.[a-zA-Z0-9]*)?')
regAt = re.compile('@([a-zA-Z0-9]*[*_/&%#@$]*)*[a-zA-Z0-9]*')
text = re.sub(regHttp, '', text)
text = re.sub(regAt, '', text)
text = re.sub('RT : ', '', text)
text = re.sub(regHttps, '', text)
text = re.sub('[0-9]', '', text)
text = self._delete_punctuation(text)
return text
def _delete_punctuation(self, text):
exclude = set(string.punctuation)
s = ''.join(ch for ch in text if ch not in exclude)
return s
def _extract_ngrams(self, text, lang):
unigrams_lemmas = []
unigrams_words = []
pos_tagged = []
if lang == 'es':
sentences = list(parse_es(text, lemmata=True).split())
else:
sentences = list(parse_en(text, lemmata=True).split())
for sentence in sentences:
for token in sentence:
if token[0].lower() not in self._stopwords:
unigrams_words.append(token[0].lower())
unigrams_lemmas.append(token[4])
pos_tagged.append(token[1])
return unigrams_lemmas,unigrams_words,pos_tagged
def _find_ngrams(self, input_list, n):
return zip(*[input_list[i:] for i in range(n)])
def _extract_features(self, tweet,dictionary,lang):
feature_set={}
ngrams_lemmas,ngrams_words,pos_tagged = self._extract_ngrams(tweet,lang)
pos_tags={'NN':'NN', 'NNS':'NN', 'JJ':'JJ', 'JJR':'JJ', 'JJS':'JJ', 'RB':'RB', 'RBR':'RB',
'RBS':'RB', 'VB':'VB', 'VBD':'VB', 'VGB':'VB', 'VBN':'VB', 'VBP':'VB', 'VBZ':'VB'}
totalVAD=[0,0,0]
matches=0
for word in range(len(ngrams_lemmas)):
VAD=[]
if ngrams_lemmas[word] in dictionary:
matches+=1
totalVAD = [totalVAD[0]+float(dictionary[ngrams_lemmas[word]]['V']),
totalVAD[1]+float(dictionary[ngrams_lemmas[word]]['A']),
totalVAD[2]+float(dictionary[ngrams_lemmas[word]]['D'])]
elif ngrams_words[word] in dictionary:
matches+=1
totalVAD = [totalVAD[0]+float(dictionary[ngrams_words[word]]['V']),
totalVAD[1]+float(dictionary[ngrams_words[word]]['A']),
totalVAD[2]+float(dictionary[ngrams_words[word]]['D'])]
if matches==0:
emotion='neutral'
else:
totalVAD=[totalVAD[0]/matches,totalVAD[1]/matches,totalVAD[2]/matches]
feature_set['V'] = totalVAD[0]
feature_set['A'] = totalVAD[1]
feature_set['D'] = totalVAD[2]
return feature_set
def analyse_entry(self, entry, activity):
params = activity.params
text_input = entry.text
text = self._my_preprocessor(text_input)
dictionary = self._dictionary[params['language']]
feature_set=self._extract_features(text, dictionary, params['language'])
emotions = EmotionSet()
emotions.id = "Emotions0"
emotion1 = Emotion(id="Emotion0")
emotion1[VALENCE] = feature_set['V']
emotion1[AROUSAL] = feature_set['A']
emotion1[DOMINANCE] = feature_set['D']
emotion1.prov(activity)
emotions.prov(activity)
emotions.onyx__hasEmotion.append(emotion1)
entry.emotions = [emotions, ]
yield entry
test_cases = [
{
'name': 'anger with VAD=(2.12, 6.95, 5.05)',
'input': 'I hate you',
'expected': {
'onyx:hasEmotionSet': [{
'onyx:hasEmotion': [{
AROUSAL: 6.95,
DOMINANCE: 5.05,
VALENCE: 2.12,
}]
}]
}
}, {
'input': 'i am sad',
'expected': {
'onyx:hasEmotionSet': [{
'onyx:hasEmotion': [{
f"{MODEL}_arousal": 4.13,
}]
}]
}
}, {
'name': 'joy',
'input': 'i am happy with my marks',
'expected': {
'onyx:hasEmotionSet': [{
'onyx:hasEmotion': [{
AROUSAL: 6.49,
DOMINANCE: 6.63,
VALENCE: 8.21,
}]
}]
}
}, {
'name': 'negative-feat',
'input': 'This movie is scary',
'expected': {
'onyx:hasEmotionSet': [{
'onyx:hasEmotion': [{
AROUSAL: 5.8100000000000005,
DOMINANCE: 4.33,
VALENCE: 5.050000000000001,
}]
}]
}
}, {
'name': 'negative-fear',
'input': 'this cake is disgusting' ,
'expected': {
'onyx:hasEmotionSet': [{
'onyx:hasEmotion': [{
AROUSAL: 5.09,
DOMINANCE: 4.4,
VALENCE: 5.109999999999999,
}]
}]
}
}
]

@ -0,0 +1,12 @@
---
module: emotion-anew
optional: true
requirements:
- numpy
- pandas
- nltk
- scipy
- scikit-learn
- textblob
- pattern
- lxml

@ -0,0 +1,179 @@
#!/usr/local/bin/python
# coding: utf-8
from future import standard_library
standard_library.install_aliases()
import os
import re
import sys
import string
import numpy as np
from six.moves import urllib
from nltk.corpus import stopwords
from senpy import EmotionBox, models
def ignore(dchars):
deletechars = "".join(dchars)
tbl = str.maketrans("", "", deletechars)
ignore = lambda s: s.translate(tbl)
return ignore
class DepecheMood(EmotionBox):
'''
Plugin that uses the DepecheMood emotion lexicon.
DepecheMood is an emotion lexicon automatically generated from news articles where users expressed their associated emotions. It contains two languages (English and Italian), as well as three types of word representations (token, lemma and lemma#PoS). For English, the lexicon contains 165k tokens, while the Italian version contains 116k. Unsupervised techniques can be applied to generate simple but effective baselines. To learn more, please visit https://github.com/marcoguerini/DepecheMood and http://www.depechemood.eu/
'''
author = 'Oscar Araque'
name = 'emotion-depechemood'
version = '0.1'
requirements = ['pandas']
optional = True
nltk_resources = ["stopwords"]
onyx__usesEmotionModel = 'wna:WNAModel'
EMOTIONS = ['wna:negative-fear',
'wna:amusement',
'wna:anger',
'wna:annoyance',
'wna:indifference',
'wna:joy',
'wna:awe',
'wna:sadness']
DM_EMOTIONS = ['AFRAID', 'AMUSED', 'ANGRY', 'ANNOYED', 'DONT_CARE', 'HAPPY', 'INSPIRED', 'SAD',]
def __init__(self, *args, **kwargs):
super(DepecheMood, self).__init__(*args, **kwargs)
self.LEXICON_URL = "https://github.com/marcoguerini/DepecheMood/raw/master/DepecheMood%2B%2B/DepecheMood_english_token_full.tsv"
self._denoise = ignore(set(string.punctuation)|set('«»'))
self._stop_words = []
self._lex_vocab = None
self._lex = None
def activate(self):
self._lex = self.download_lex()
self._lex_vocab = set(list(self._lex.keys()))
self._stop_words = stopwords.words('english') + ['']
def clean_str(self, string):
string = re.sub(r"[^A-Za-z0-9().,!?\'\`]", " ", string)
string = re.sub(r"[0-9]+", " num ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r"\.", " . ", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " ( ", string)
string = re.sub(r"\)", " ) ", string)
string = re.sub(r"\?", " ? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def preprocess(self, text):
if text is None:
return None
tokens = self._denoise(self.clean_str(text)).split(' ')
tokens = [tok for tok in tokens if tok not in self._stop_words]
return tokens
def estimate_emotion(self, tokens, emotion):
s = []
for tok in tokens:
s.append(self._lex[tok][emotion])
dividend = np.sum(s) if np.sum(s) > 0 else 0
divisor = len(s) if len(s) > 0 else 1
S = np.sum(s) / divisor
return S
def estimate_all_emotions(self, tokens):
S = []
intersection = set(tokens) & self._lex_vocab
for emotion in self.DM_EMOTIONS:
s = self.estimate_emotion(intersection, emotion)
S.append(s)
return S
def download_lex(self, file_path='DepecheMood_english_token_full.tsv', freq_threshold=10):
import pandas as pd
try:
file_path = self.find_file(file_path)
except IOError:
file_path = self.path(file_path)
filename, _ = urllib.request.urlretrieve(self.LEXICON_URL, file_path)
lexicon = pd.read_csv(file_path, sep='\t', index_col=0)
lexicon = lexicon[lexicon['freq'] >= freq_threshold]
lexicon.drop('freq', axis=1, inplace=True)
lexicon = lexicon.T.to_dict()
return lexicon
def predict_one(self, features, **kwargs):
tokens = self.preprocess(features[0])
estimation = self.estimate_all_emotions(tokens)
return estimation
test_cases = [
{
'entry': {
'nif:isString': 'My cat is very happy',
},
'expected': {
'onyx:hasEmotionSet': [
{
'onyx:hasEmotion': [
{
'onyx:hasEmotionCategory': 'wna:negative-fear',
'onyx:hasEmotionIntensity': 0.05278117640010922
},
{
'onyx:hasEmotionCategory': 'wna:amusement',
'onyx:hasEmotionIntensity': 0.2114806151413433,
},
{
'onyx:hasEmotionCategory': 'wna:anger',
'onyx:hasEmotionIntensity': 0.05726119426520887
},
{
'onyx:hasEmotionCategory': 'wna:annoyance',
'onyx:hasEmotionIntensity': 0.12295990731053638,
},
{
'onyx:hasEmotionCategory': 'wna:indifference',
'onyx:hasEmotionIntensity': 0.1860159893608025,
},
{
'onyx:hasEmotionCategory': 'wna:joy',
'onyx:hasEmotionIntensity': 0.12904050973724163,
},
{
'onyx:hasEmotionCategory': 'wna:awe',
'onyx:hasEmotionIntensity': 0.17973650399862967,
},
{
'onyx:hasEmotionCategory': 'wna:sadness',
'onyx:hasEmotionIntensity': 0.060724103786128455,
},
]
}
]
}
}
]
if __name__ == '__main__':
from senpy.utils import easy_test
easy_test(debug=False)

@ -0,0 +1,3 @@
FROM gsiupm/senpy:python{{PYVERSION}}
MAINTAINER manuel.garcia-amado.sancho@alumnos.upm.es

@ -0,0 +1,9 @@
NAME:=wnaffect
VERSIONFILE:=VERSION
IMAGENAME:=registry.cluster.gsi.dit.upm.es/senpy/emotion-wnaffect
PYVERSIONS:=2.7 3.5
DEVPORT:=5000
include .makefiles/base.mk
include .makefiles/k8s.mk
include .makefiles/python.mk

@ -0,0 +1,62 @@
# WordNet-Affect plugin
This plugin uses WordNet-Affect (http://wndomains.fbk.eu/wnaffect.html) to calculate the percentage of each emotion. The plugin classifies among five diferent emotions: anger, fear, disgust, joy and sadness. It is has been used a emotion mapping enlarge the emotions:
- anger : general-dislike
- fear : negative-fear
- disgust : shame
- joy : gratitude, affective, enthusiasm, love, joy, liking
- sadness : ingrattitude, daze, humlity, compassion, despair, anxiety, sadness
## Installation
* Download
```
git clone https://lab.cluster.gsi.dit.upm.es/senpy/emotion-wnaffect.git
```
* Get data
```
cd emotion-wnaffect
git submodule update --init --recursive
```
* Run
```
docker run -p 5000:5000 -v $PWD:/plugins gsiupm/senpy -f /plugins
```
## Data format
`data/a-hierarchy.xml` is a xml file
`data/a-synsets.xml` is a xml file
## Usage
The parameters accepted are:
- Language: English (en).
- Input: Text to analyse.
Example request:
```
http://senpy.cluster.gsi.dit.upm.es/api/?algo=emotion-wnaffect&language=en&input=I%20love%20Madrid
```
Example respond: This plugin follows the standard for the senpy plugin response. For more information, please visit [senpy documentation](http://senpy.readthedocs.io). Specifically, NIF API section.
The response of this plugin uses [Onyx ontology](https://www.gsi.dit.upm.es/ontologies/onyx/) developed at GSI UPM for semantic web.
This plugin uses WNAffect labels for emotion analysis.
The emotion-wnaffect.senpy file can be copied and modified to use different versions of wnaffect with the same python code.
## Known issues
- This plugin run on **Python2.7** and **Python3.5**
- Wnaffect and corpora files are not included in the repository, but can be easily added either to the docker image (using a volume) or in a new docker image.
- You can download Wordnet 1.6 here: <http://wordnetcode.princeton.edu/1.6/wn16.unix.tar.gz> and extract the dict folder.
- The hierarchy and synsets files can be found here: <https://github.com/larsmans/wordnet-domains-sentiwords/tree/master/wn-domains/wn-affect-1.1>
![alt GSI Logo][logoGSI]
[logoGSI]: http://www.gsi.dit.upm.es/images/stories/logos/gsi.png "GSI Logo"

@ -0,0 +1,278 @@
# -*- coding: utf-8 -*-
from __future__ import division
import re
import nltk
import os
import string
import xml.etree.ElementTree as ET
from nltk.corpus import stopwords
from nltk.corpus import WordNetCorpusReader
from nltk.stem import wordnet
from emotion import Emotion as Emo
from senpy.plugins import EmotionPlugin, AnalysisPlugin, ShelfMixin
from senpy.models import Results, EmotionSet, Entry, Emotion
class WNAffect(EmotionPlugin, ShelfMixin):
'''
Emotion classifier using WordNet-Affect to calculate the percentage
of each emotion. This plugin classifies among 6 emotions: anger,fear,disgust,joy,sadness
or neutral. The only available language is English (en)
'''
name = 'emotion-wnaffect'
author = ["@icorcuera", "@balkian"]
version = '0.2'
extra_params = {
'language': {
"@id": 'lang_wnaffect',
'description': 'language of the input',
'aliases': ['language', 'l'],
'required': True,
'options': ['en',]
}
}
synsets_path = "a-synsets.xml"
hierarchy_path = "a-hierarchy.xml"
wn16_path = "wordnet1.6/dict"
onyx__usesEmotionModel = "emoml:big6"
nltk_resources = ['stopwords', 'averaged_perceptron_tagger', 'wordnet']
def _load_synsets(self, synsets_path):
"""Returns a dictionary POS tag -> synset offset -> emotion (str -> int -> str)."""
tree = ET.parse(synsets_path)
root = tree.getroot()
pos_map = {"noun": "NN", "adj": "JJ", "verb": "VB", "adv": "RB"}
synsets = {}
for pos in ["noun", "adj", "verb", "adv"]:
tag = pos_map[pos]
synsets[tag] = {}
for elem in root.findall(
".//{0}-syn-list//{0}-syn".format(pos, pos)):
offset = int(elem.get("id")[2:])
if not offset: continue
if elem.get("categ"):
synsets[tag][offset] = Emo.emotions[elem.get(
"categ")] if elem.get(
"categ") in Emo.emotions else None
elif elem.get("noun-id"):
synsets[tag][offset] = synsets[pos_map["noun"]][int(
elem.get("noun-id")[2:])]
return synsets
def _load_emotions(self, hierarchy_path):
"""Loads the hierarchy of emotions from the WordNet-Affect xml."""
tree = ET.parse(hierarchy_path)
root = tree.getroot()
for elem in root.findall("categ"):
name = elem.get("name")
if name == "root":
Emo.emotions["root"] = Emo("root")
else:
Emo.emotions[name] = Emo(name, elem.get("isa"))
def activate(self, *args, **kwargs):
self._stopwords = stopwords.words('english')
self._wnlemma = wordnet.WordNetLemmatizer()
self._syntactics = {'N': 'n', 'V': 'v', 'J': 'a', 'S': 's', 'R': 'r'}
local_path = os.environ.get("SENPY_DATA")
self._categories = {
'anger': [
'general-dislike',
],
'fear': [
'negative-fear',
],
'disgust': [
'shame',
],
'joy':
['gratitude', 'affective', 'enthusiasm', 'love', 'joy', 'liking'],
'sadness': [
'ingrattitude', 'daze', 'humility', 'compassion', 'despair',
'anxiety', 'sadness'
]
}
self._wnaffect_mappings = {
'anger': 'anger',
'fear': 'negative-fear',
'disgust': 'disgust',
'joy': 'joy',
'sadness': 'sadness'
}
self._load_emotions(self.find_file(self.hierarchy_path))
if 'total_synsets' not in self.sh:
total_synsets = self._load_synsets(self.find_file(self.synsets_path))
self.sh['total_synsets'] = total_synsets
self._total_synsets = self.sh['total_synsets']
self._wn16_path = self.wn16_path
self._wn16 = WordNetCorpusReader(self.find_file(self._wn16_path), nltk.data.find(self.find_file(self._wn16_path)))
def deactivate(self, *args, **kwargs):
self.save(ignore_errors=True)
def _my_preprocessor(self, text):
regHttp = re.compile(
'(http://)[a-zA-Z0-9]*.[a-zA-Z0-9/]*(.[a-zA-Z0-9]*)?')
regHttps = re.compile(
'(https://)[a-zA-Z0-9]*.[a-zA-Z0-9/]*(.[a-zA-Z0-9]*)?')
regAt = re.compile('@([a-zA-Z0-9]*[*_/&%#@$]*)*[a-zA-Z0-9]*')
text = re.sub(regHttp, '', text)
text = re.sub(regAt, '', text)
text = re.sub('RT : ', '', text)
text = re.sub(regHttps, '', text)
text = re.sub('[0-9]', '', text)
text = self._delete_punctuation(text)
return text
def _delete_punctuation(self, text):
exclude = set(string.punctuation)
s = ''.join(ch for ch in text if ch not in exclude)
return s
def _extract_ngrams(self, text):
unigrams_lemmas = []
pos_tagged = []
unigrams_words = []
tokens = text.split()
for token in nltk.pos_tag(tokens):
unigrams_words.append(token[0])
pos_tagged.append(token[1])
if token[1][0] in self._syntactics.keys():
unigrams_lemmas.append(
self._wnlemma.lemmatize(token[0], self._syntactics[token[1]
[0]]))
else:
unigrams_lemmas.append(token[0])
return unigrams_words, unigrams_lemmas, pos_tagged
def _find_ngrams(self, input_list, n):
return zip(*[input_list[i:] for i in range(n)])
def _clean_pos(self, pos_tagged):
pos_tags = {
'NN': 'NN',
'NNP': 'NN',
'NNP-LOC': 'NN',
'NNS': 'NN',
'JJ': 'JJ',
'JJR': 'JJ',
'JJS': 'JJ',
'RB': 'RB',
'RBR': 'RB',
'RBS': 'RB',
'VB': 'VB',
'VBD': 'VB',
'VGB': 'VB',
'VBN': 'VB',
'VBP': 'VB',
'VBZ': 'VB'
}
for i in range(len(pos_tagged)):
if pos_tagged[i] in pos_tags:
pos_tagged[i] = pos_tags[pos_tagged[i]]
return pos_tagged
def _extract_features(self, text):
feature_set = {k: 0 for k in self._categories}
ngrams_words, ngrams_lemmas, pos_tagged = self._extract_ngrams(text)
matches = 0
pos_tagged = self._clean_pos(pos_tagged)
tag_wn = {
'NN': self._wn16.NOUN,
'JJ': self._wn16.ADJ,
'VB': self._wn16.VERB,
'RB': self._wn16.ADV
}
for i in range(len(pos_tagged)):
if pos_tagged[i] in tag_wn:
synsets = self._wn16.synsets(ngrams_words[i],
tag_wn[pos_tagged[i]])
if synsets:
offset = synsets[0].offset()
if offset in self._total_synsets[pos_tagged[i]]:
if self._total_synsets[pos_tagged[i]][offset] is None:
continue
else:
emotion = self._total_synsets[pos_tagged[i]][
offset].get_level(5).name
matches += 1
for i in self._categories:
if emotion in self._categories[i]:
feature_set[i] += 1
if matches == 0:
matches = 1
for i in feature_set:
feature_set[i] = (feature_set[i] / matches)
return feature_set
def analyse_entry(self, entry, activity):
params = activity.params
text_input = entry['nif:isString']
text = self._my_preprocessor(text_input)
feature_text = self._extract_features(text)
emotionSet = EmotionSet(id="Emotions0")
emotions = emotionSet.onyx__hasEmotion
for i in feature_text:
emotions.append(
Emotion(
onyx__hasEmotionCategory=self._wnaffect_mappings[i],
onyx__hasEmotionIntensity=feature_text[i]))
entry.emotions = [emotionSet]
yield entry
def test(self, *args, **kwargs):
results = list()
params = {'algo': 'emotion-wnaffect',
'intype': 'direct',
'expanded-jsonld': 0,
'informat': 'text',
'prefix': '',
'plugin_type': 'analysisPlugin',
'urischeme': 'RFC5147String',
'outformat': 'json-ld',
'i': 'Hello World',
'input': 'Hello World',
'conversion': 'full',
'language': 'en',
'algorithm': 'emotion-wnaffect'}
self.activate()
texts = {'I hate you': 'anger',
'i am sad': 'sadness',
'i am happy with my marks': 'joy',
'This movie is scary': 'negative-fear'}
for text in texts:
response = next(self.analyse_entry(Entry(nif__isString=text),
self.activity(params)))
expected = texts[text]
emotionSet = response.emotions[0]
max_emotion = max(emotionSet['onyx:hasEmotion'], key=lambda x: x['onyx:hasEmotionIntensity'])
assert max_emotion['onyx:hasEmotionCategory'] == expected

@ -0,0 +1,7 @@
---
module: emotion-wnaffect
optional: true
requirements:
- nltk>=3.0.5
- lxml>=3.4.2
async: false

@ -0,0 +1,95 @@
# -*- coding: utf-8 -*-
"""
Clement Michard (c) 2015
"""
class Emotion:
"""Defines an emotion."""
emotions = {} # name to emotion (str -> Emotion)
def __init__(self, name, parent_name=None):
"""Initializes an Emotion object.
name -- name of the emotion (str)
parent_name -- name of the parent emotion (str)
"""
self.name = name
self.parent = None
self.level = 0
self.children = []
if parent_name:
self.parent = Emotion.emotions[parent_name] if parent_name else None
self.parent.children.append(self)
self.level = self.parent.level + 1
def get_level(self, level):
"""Returns the parent of self at the given level.
level -- level in the hierarchy (int)
"""
em = self
while em.level > level and em.level >= 0:
em = em.parent
return em
def __str__(self):
"""Returns the emotion string formatted."""
return self.name
def nb_children(self):
"""Returns the number of children of the emotion."""
return sum(child.nb_children() for child in self.children) + 1
@staticmethod
def printTree(emotion=None, indent="", last='updown'):
"""Prints the hierarchy of emotions.
emotion -- root emotion (Emotion)
"""
if not emotion:
emotion = Emotion.emotions["root"]
size_branch = {child: child.nb_children() for child in emotion.children}
leaves = sorted(emotion.children, key=lambda emotion: emotion.nb_children())
up, down = [], []
if leaves:
while sum(size_branch[e] for e in down) < sum(size_branch[e] for e in leaves):
down.append(leaves.pop())
up = leaves
for leaf in up:
next_last = 'up' if up.index(leaf) is 0 else ''
next_indent = '{0}{1}{2}'.format(indent, ' ' if 'up' in last else '', " " * len(emotion.name))
Emotion.printTree(leaf, indent=next_indent, last=next_last)
if last == 'up':
start_shape = ''
elif last == 'down':
start_shape = ''
elif last == 'updown':
start_shape = ' '
else:
start_shape = ''
if up:
end_shape = ''
elif down:
end_shape = ''
else:
end_shape = ''
print ('{0}{1}{2}{3}'.format(indent, start_shape, emotion.name, end_shape))
for leaf in down:
next_last = 'down' if down.index(leaf) is len(down) - 1 else ''
next_indent = '{0}{1}{2}'.format(indent, ' ' if 'down' in last else '', " " * len(emotion.name))
Emotion.printTree(leaf, indent=next_indent, last=next_last)

@ -0,0 +1,94 @@
# coding: utf-8
# In[1]:
# -*- coding: utf-8 -*-
"""
Clement Michard (c) 2015
"""
import os
import sys
import nltk
from emotion import Emotion
from nltk.corpus import WordNetCorpusReader
import xml.etree.ElementTree as ET
class WNAffect:
"""WordNet-Affect resource."""
nltk_resources = ['averaged_perceptron_tagger']
def __init__(self, wordnet16_dir, wn_domains_dir):
"""Initializes the WordNet-Affect object."""
cwd = os.getcwd()
nltk.data.path.append(cwd)
wn16_path = "{0}/dict".format(wordnet16_dir)
self.wn16 = WordNetCorpusReader(os.path.abspath("{0}/{1}".format(cwd, wn16_path)), nltk.data.find(wn16_path))
self.flat_pos = {'NN':'NN', 'NNS':'NN', 'JJ':'JJ', 'JJR':'JJ', 'JJS':'JJ', 'RB':'RB', 'RBR':'RB', 'RBS':'RB', 'VB':'VB', 'VBD':'VB', 'VGB':'VB', 'VBN':'VB', 'VBP':'VB', 'VBZ':'VB'}
self.wn_pos = {'NN':self.wn16.NOUN, 'JJ':self.wn16.ADJ, 'VB':self.wn16.VERB, 'RB':self.wn16.ADV}
self._load_emotions(wn_domains_dir)
self.synsets = self._load_synsets(wn_domains_dir)
def _load_synsets(self, wn_domains_dir):
"""Returns a dictionary POS tag -> synset offset -> emotion (str -> int -> str)."""
tree = ET.parse("{0}/a-synsets.xml".format(wn_domains_dir))
root = tree.getroot()
pos_map = { "noun": "NN", "adj": "JJ", "verb": "VB", "adv": "RB" }
synsets = {}
for pos in ["noun", "adj", "verb", "adv"]:
tag = pos_map[pos]
synsets[tag] = {}
for elem in root.findall(".//{0}-syn-list//{0}-syn".format(pos, pos)):
offset = int(elem.get("id")[2:])
if not offset: continue
if elem.get("categ"):
synsets[tag][offset] = Emotion.emotions[elem.get("categ")] if elem.get("categ") in Emotion.emotions else None
elif elem.get("noun-id"):
synsets[tag][offset] = synsets[pos_map["noun"]][int(elem.get("noun-id")[2:])]
return synsets
def _load_emotions(self, wn_domains_dir):
"""Loads the hierarchy of emotions from the WordNet-Affect xml."""
tree = ET.parse("{0}/a-hierarchy.xml".format(wn_domains_dir))
root = tree.getroot()
for elem in root.findall("categ"):
name = elem.get("name")
if name == "root":
Emotion.emotions["root"] = Emotion("root")
else:
Emotion.emotions[name] = Emotion(name, elem.get("isa"))
def get_emotion(self, word, pos):
"""Returns the emotion of the word.
word -- the word (str)
pos -- part-of-speech (str)
"""
if pos in self.flat_pos:
pos = self.flat_pos[pos]
synsets = self.wn16.synsets(word, self.wn_pos[pos])
if synsets:
offset = synsets[0].offset()
if offset in self.synsets[pos]:
return self.synsets[pos][offset]
return None
if __name__ == "__main__":
wordnet16, wndomains32, word, pos = sys.argv[1:5]
wna = WNAffect(wordnet16, wndomains32)
print wna.get_emotion(word, pos)

@ -0,0 +1,28 @@
# Sentiment basic plugin
This plugin is based on the classifier developed for the TASS 2015 competition. It has been developed for Spanish and English. This is a demo plugin that uses only some features from the TASS 2015 classifier. To use the entirely functional classifier you can use the service in: http://senpy.cluster.gsi.dit.upm.es
There is more information avaliable in:
- Aspect based Sentiment Analysis of Spanish Tweets, Oscar Araque and Ignacio Corcuera-Platas and Constantino Román-Gómez and Carlos A. Iglesias and J. Fernando Sánchez-Rada. http://gsi.dit.upm.es/es/investigacion/publicaciones?view=publication&task=show&id=376
## Usage
Params accepted:
- Language: Spanish (es).
- Input: text to analyse.
Example request:
```
http://senpy.cluster.gsi.dit.upm.es/api/?algo=sentiment-basic&language=es&input=I%20love%20Madrid
```
Example respond: This plugin follows the standard for the senpy plugin response. For more information, please visit [senpy documentation](http://senpy.readthedocs.io). Specifically, NIF API section.
This plugin only supports **python2**
![alt GSI Logo][logoGSI]
[logoGSI]: http://www.gsi.dit.upm.es/images/stories/logos/gsi.png "GSI Logo"

@ -0,0 +1,177 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import string
import nltk
import pickle
from sentiwn import SentiWordNet
from nltk.corpus import wordnet as wn
from textblob import TextBlob
from scipy.interpolate import interp1d
from os import path
from senpy.plugins import SentimentBox, SenpyPlugin
from senpy.models import Results, Entry, Sentiment, Error
if sys.version_info[0] >= 3:
unicode = str
class SentimentBasic(SentimentBox):
'''
Sentiment classifier using rule-based classification for Spanish. Based on english to spanish translation and SentiWordNet sentiment knowledge. This is a demo plugin that uses only some features from the TASS 2015 classifier. To use the entirely functional classifier you can use the service in: http://senpy.cluster.gsi.dit.upm.es.
'''
name = "sentiment-basic"
author = "github.com/nachtkatze"
version = "0.1.1"
extra_params = {
"language": {
"description": "language of the text",
"aliases": ["language", "l"],
"required": True,
"options": ["en","es", "it", "fr"],
"default": "en"
}
}
sentiword_path = "SentiWordNet_3.0.txt"
pos_path = "unigram_spanish.pickle"
maxPolarityValue = 1
minPolarityValue = -1
nltk_resources = ['punkt','wordnet', 'omw', 'omw-1.4']
with_polarity = False
def _load_swn(self):
self.swn_path = self.find_file(self.sentiword_path)
swn = SentiWordNet(self.swn_path)
return swn
def _load_pos_tagger(self):
self.pos_path = self.find_file(self.pos_path)
with open(self.pos_path, 'rb') as f:
tagger = pickle.load(f)
return tagger
def activate(self, *args, **kwargs):
self._swn = self._load_swn()
self._pos_tagger = self._load_pos_tagger()
def _remove_punctuation(self, tokens):
return [t for t in tokens if t not in string.punctuation]
def _tokenize(self, text):
sentence_ = {}
words = nltk.word_tokenize(text)
sentence_['sentence'] = text
tokens_ = [w.lower() for w in words]
sentence_['tokens'] = self._remove_punctuation(tokens_)
return sentence_
def _pos(self, tokens):
tokens['tokens'] = self._pos_tagger.tag(tokens['tokens'])
return tokens
def _compare_synsets(self, synsets, tokens):
for synset in synsets:
for word, lemmas in tokens['lemmas'].items():
for lemma in lemmas:
synset_ = lemma.synset()
if synset == synset_:
return synset
return None
def predict_one(self, features, activity):
language = activity.param("language")
text = features[0]
tokens = self._tokenize(text)
tokens = self._pos(tokens)
sufixes = {'es':'spa','en':'eng','it':'ita','fr':'fra'}
tokens['lemmas'] = {}
for w in tokens['tokens']:
lemmas = wn.lemmas(w[0], lang=sufixes[language])
if len(lemmas) == 0:
continue
tokens['lemmas'][w[0]] = lemmas
if language == "en":
trans = TextBlob(unicode(text))
else:
try:
trans = TextBlob(unicode(text)).translate(from_lang=language,to='en')
except Exception as ex:
raise Error('Could not translate the text from "{}" to "{}": {}'.format(language,
'en',
str(ex)))
useful_synsets = {}
for w_i, t_w in enumerate(trans.sentences[0].words):
synsets = wn.synsets(trans.sentences[0].words[w_i])
if len(synsets) == 0:
continue
eq_synset = self._compare_synsets(synsets, tokens)
useful_synsets[t_w] = eq_synset
scores = {}
scores = {}
if useful_synsets != None:
for word in useful_synsets:
if useful_synsets[word] is None:
continue
temp_scores = self._swn.get_score(useful_synsets[word].name().split('.')[0].replace(' ',' '))
for score in temp_scores:
if score['synset'] == useful_synsets[word]:
t_score = score['pos'] - score['neg']
f_score = 'neu'
if t_score > 0:
f_score = 'pos'
elif t_score < 0:
f_score = 'neg'
score['score'] = f_score
scores[word] = score
break
g_score = 0.5
for i in scores:
n_pos = 0.0
n_neg = 0.0
for w in scores:
if scores[w]['score'] == 'pos':
n_pos += 1.0
elif scores[w]['score'] == 'neg':
n_neg += 1.0
inter = interp1d([-1.0, 1.0], [0.0, 1.0])
try:
g_score = (n_pos - n_neg) / (n_pos + n_neg)
g_score = float(inter(g_score))
except:
if n_pos == 0 and n_neg == 0:
g_score = 0.5
if g_score > 0.5: # Positive
return [1, 0, 0]
elif g_score < 0.5: # Negative
return [0, 0, 1]
else:
return [0, 1, 0]
test_cases = [
{
'input': 'Odio ir al cine',
'params': {'language': 'es'},
'polarity': 'marl:Negative'
},
{
'input': 'El cielo está nublado',
'params': {'language': 'es'},
'polarity': 'marl:Neutral'
},
{
'input': 'Esta tarta está muy buena',
'params': {'language': 'es'},
'polarity': 'marl:Negative' # SURPRISINGLY!
}
]

@ -0,0 +1,8 @@
---
module: sentiment-basic
optional: true
requirements:
- nltk>=3.0.5
- scipy>=0.14.0
- textblob

@ -0,0 +1,70 @@
#!/usr/bin/env python
"""
Author : Jaganadh Gopinadhan <jaganadhg@gmail.com>
Copywright (C) : Jaganadh Gopinadhan
Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys,os
import re
from nltk.corpus import wordnet
class SentiWordNet(object):
"""
Interface to SentiWordNet
"""
def __init__(self,swn_file):
"""
"""
self.swn_file = swn_file
self.pos_synset = self.__parse_swn_file()
def __parse_swn_file(self):
"""
Parse the SentiWordNet file and populate the POS and SynsetID hash
"""
pos_synset_hash = {}
swn_data = open(self.swn_file,'r').readlines()
head_less_swn_data = filter((lambda line: not re.search(r"^\s*#",\
line)), swn_data)
for data in head_less_swn_data:
fields = data.strip().split("\t")
try:
pos,syn_set_id,pos_score,neg_score,syn_set_score,\
gloss = fields
except:
print("Found data without all details")
pass
if pos and syn_set_score:
pos_synset_hash[(pos,int(syn_set_id))] = (float(pos_score),\
float(neg_score))
return pos_synset_hash
def get_score(self,word,pos=None):
"""
Get score for a given word/word pos combination
"""
senti_scores = []
synsets = wordnet.synsets(word,pos)
for synset in synsets:
if (synset.pos(), synset.offset()) in self.pos_synset:
pos_val, neg_val = self.pos_synset[(synset.pos(), synset.offset())]
senti_scores.append({"pos":pos_val,"neg":neg_val,\
"obj": 1.0 - (pos_val - neg_val),'synset':synset})
return senti_scores

@ -0,0 +1,280 @@
# -*- coding: utf-8 -*-
'''
MeaningCloud plugin uses API from Meaning Cloud to perform sentiment analysis.
For more information about Meaning Cloud and its services, please visit: https://www.meaningcloud.com/developer/apis
## Usage
To use this plugin, you need to obtain an API key from meaningCloud signing up here: https://www.meaningcloud.com/developer/login
When you had obtained the meaningCloud API Key, you have to provide it to the plugin, using the param **apiKey**.
To use this plugin, you should use a GET Requests with the following possible params:
Params:
- Language: English (en) and Spanish (es). (default: en)
- API Key: the API key from Meaning Cloud. Aliases: ["apiKey","meaningCloud-key"]. (required)
- Input: text to analyse.(required)
- Model: model provided to Meaning Cloud API (for general domain). (default: general)
## Example of Usage
Example request:
```
http://senpy.gsi.upm.es/api/?algo=meaningCloud&language=en&apiKey=<put here your API key>&input=I%20love%20Madrid
```
'''
import time
import requests
import json
import string
import os
from os import path
import time
from senpy.plugins import SentimentPlugin
from senpy.models import Results, Entry, Entity, Topic, Sentiment, Error
from senpy.utils import check_template
class MeaningCloudPlugin(SentimentPlugin):
'''
Sentiment analysis with meaningCloud service.
To use this plugin, you need to obtain an API key from meaningCloud signing up here:
https://www.meaningcloud.com/developer/login
When you had obtained the meaningCloud API Key, you have to provide it to the plugin, using param apiKey.
Example request:
http://senpy.cluster.gsi.dit.upm.es/api/?algo=meaningCloud&language=en&apiKey=YOUR_API_KEY&input=I%20love%20Madrid.
'''
name = 'sentiment-meaningcloud'
author = 'GSI UPM'
version = "1.1"
maxPolarityValue = 1
minPolarityValue = -1
extra_params = {
"language": {
"description": "language of the input",
"aliases": ["language", "l"],
"required": True,
"options": ["en","es","ca","it","pt","fr","auto"],
"default": "auto"
},
"apikey":{
"description": "API key for the meaningcloud service. See https://www.meaningcloud.com/developer/login",
"aliases": ["apiKey", "meaningcloud-key", "meaningcloud-apikey"],
"required": True
}
}
def _polarity(self, value):
if 'NONE' in value:
polarity = 'marl:Neutral'
polarityValue = 0
elif 'N' in value:
polarity = 'marl:Negative'
polarityValue = -1
elif 'P' in value:
polarity = 'marl:Positive'
polarityValue = 1
return polarity, polarityValue
def analyse_entry(self, entry, activity):
params = activity.params
txt = entry['nif:isString']
api = 'http://api.meaningcloud.com/'
lang = params.get("language")
model = "general"
key = params["apikey"]
parameters = {
'key': key,
'model': model,
'lang': lang,
'of': 'json',
'txt': txt,
'tt': 'a'
}
try:
r = requests.post(
api + "sentiment-2.1", params=parameters, timeout=3)
parameters['lang'] = r.json()['model'].split('_')[1]
lang = parameters['lang']
r2 = requests.post(
api + "topics-2.0", params=parameters, timeout=3)
except requests.exceptions.Timeout:
raise Error("Meaning Cloud API does not response")
api_response = r.json()
api_response_topics = r2.json()
if not api_response.get('score_tag'):
raise Error(r.json())
entry['language_detected'] = lang
self.log.debug(api_response)
agg_polarity, agg_polarityValue = self._polarity(
api_response.get('score_tag', None))
agg_opinion = Sentiment(
id="Opinion0",
marl__hasPolarity=agg_polarity,
marl__polarityValue=agg_polarityValue,
marl__opinionCount=len(api_response['sentence_list']))
agg_opinion.prov(self)
entry.sentiments.append(agg_opinion)
self.log.debug(api_response['sentence_list'])
count = 1
for sentence in api_response['sentence_list']:
for nopinion in sentence['segment_list']:
self.log.debug(nopinion)
polarity, polarityValue = self._polarity(
nopinion.get('score_tag', None))
opinion = Sentiment(
id="Opinion{}".format(count),
marl__hasPolarity=polarity,
marl__polarityValue=polarityValue,
marl__aggregatesOpinion=agg_opinion.get('id'),
nif__anchorOf=nopinion.get('text', None),
nif__beginIndex=int(nopinion.get('inip', None)),
nif__endIndex=int(nopinion.get('endp', None)))
count += 1
opinion.prov(self)
entry.sentiments.append(opinion)
mapper = {'es': 'es.', 'en': '', 'ca': 'es.', 'it':'it.', 'fr':'fr.', 'pt':'pt.'}
for sent_entity in api_response_topics['entity_list']:
resource = "_".join(sent_entity.get('form', None).split())
entity = Entity(
id="Entity{}".format(sent_entity.get('id')),
itsrdf__taIdentRef="http://{}dbpedia.org/resource/{}".format(
mapper[lang], resource),
nif__anchorOf=sent_entity.get('form', None),
nif__beginIndex=int(sent_entity['variant_list'][0].get('inip', None)),
nif__endIndex=int(sent_entity['variant_list'][0].get('endp', None)))
sementity = sent_entity['sementity'].get('type', None).split(">")[-1]
entity['@type'] = "ODENTITY_{}".format(sementity)
entity.prov(self)
if 'senpy:hasEntity' not in entry:
entry['senpy:hasEntity'] = []
entry['senpy:hasEntity'].append(entity)
for topic in api_response_topics['concept_list']:
if 'semtheme_list' in topic:
for theme in topic['semtheme_list']:
concept = Topic()
concept.id = "Topic{}".format(topic.get('id'))
concept['@type'] = "ODTHEME_{}".format(theme['type'].split(">")[-1])
concept['fam:topic-reference'] = "http://dbpedia.org/resource/{}".format(theme['type'].split('>')[-1])
entry.prov(self)
if 'senpy:hasTopic' not in entry:
entry['senpy:hasTopic'] = []
entry['senpy:hasTopic'].append(concept)
yield entry
test_cases = [
{
'params': {
'algo': 'sentiment-meaningCloud',
'intype': 'direct',
'expanded-jsonld': 0,
'informat': 'text',
'prefix': '',
'plugin_type': 'analysisPlugin',
'urischeme': 'RFC5147String',
'outformat': 'json-ld',
'conversion': 'full',
'language': 'en',
'apikey': '00000',
'algorithm': 'sentiment-meaningCloud'
},
'input': 'Hello World Obama',
'expected': {
'marl:hasOpinion': [
{'marl:hasPolarity': 'marl:Neutral'}],
'senpy:hasEntity': [
{'itsrdf:taIdentRef': 'http://dbpedia.org/resource/Obama'}],
'senpy:hasTopic': [
{'fam:topic-reference': 'http://dbpedia.org/resource/Astronomy'}]
},
'responses': [
{
'url': 'http://api.meaningcloud.com/sentiment-2.1',
'method': 'POST',
'json': {
'model': 'general_en',
'sentence_list': [{
'text':
'Hello World',
'endp':
'10',
'inip':
'0',
'segment_list': [{
'text':
'Hello World',
'segment_type':
'secondary',
'confidence':
'100',
'inip':
'0',
'agreement':
'AGREEMENT',
'endp':
'10',
'polarity_term_list': [],
'score_tag':
'NONE'
}],
'score_tag':
'NONE',
}],
'score_tag':
'NONE'
}
}, {
'url': 'http://api.meaningcloud.com/topics-2.0',
'method': 'POST',
'json': {
'entity_list': [{
'form':
'Obama',
'id':
'__1265958475430276310',
'variant_list': [{
'endp': '16',
'form': 'Obama',
'inip': '12'
}],
'sementity': {
'fiction': 'nonfiction',
'confidence': 'uncertain',
'class': 'instance',
'type': 'Top>Person'
}
}],
'concept_list': [{
'form':
'world',
'id':
'5c053cd39d',
'relevance':
'100',
'semtheme_list': [{
'id': 'ODTHEME_ASTRONOMY',
'type': 'Top>NaturalSciences>Astronomy'
}]
}],
}
}]
}
]
if __name__ == '__main__':
from senpy import easy_test
easy_test()

@ -0,0 +1,42 @@
# Sentimet-vader plugin
Vader is a plugin developed at GSI UPM for sentiment analysis.
The response of this plugin uses [Marl ontology](https://www.gsi.dit.upm.es/ontologies/marl/) developed at GSI UPM for semantic web.
## Acknowledgements
This plugin uses the vaderSentiment module underneath, which is described in the paper:
VADER: A Parsimonious Rule-based Model for Sentiment Analysis of Social Media Text
C.J. Hutto and Eric Gilbert
Eighth International Conference on Weblogs and Social Media (ICWSM-14). Ann Arbor, MI, June 2014.
If you use this plugin in your research, please cite the above paper.
For more information about the functionality, check the official repository
https://github.com/cjhutto/vaderSentiment
## Usage
Parameters:
- Language: es (Spanish), en(English).
- Input: Text to analyse.
Example request:
```
http://senpy.cluster.gsi.dit.upm.es/api/?algo=sentiment-vader&language=en&input=I%20love%20Madrid
```
Example respond: This plugin follows the standard for the senpy plugin response. For more information, please visit [senpy documentation](http://senpy.readthedocs.io). Specifically, NIF API section.
This plugin supports **python3**
![alt GSI Logo][logoGSI]
[logoGSI]: http://www.gsi.dit.upm.es/images/stories/logos/gsi.png "GSI Logo"
========

@ -0,0 +1,368 @@
#!/usr/bin/python
# coding: utf-8
'''
Created on July 04, 2013
@author: C.J. Hutto
Citation Information
If you use any of the VADER sentiment analysis tools
(VADER sentiment lexicon or Python code for rule-based sentiment
analysis engine) in your work or research, please cite the paper.
For example:
Hutto, C.J. & Gilbert, E.E. (2014). VADER: A Parsimonious Rule-based Model for
Sentiment Analysis of Social Media Text. Eighth International Conference on
Weblogs and Social Media (ICWSM-14). Ann Arbor, MI, June 2014.
'''
import os, math, re, sys, fnmatch, string
import codecs
def make_lex_dict(f):
maps = {}
with codecs.open(f, encoding='iso-8859-1') as f:
for wmsr in f:
w, m = wmsr.strip().split('\t')[:2]
maps[w] = m
return maps
f = 'vader_sentiment_lexicon.txt' # empirically derived valence ratings for words, emoticons, slang, swear words, acronyms/initialisms
try:
word_valence_dict = make_lex_dict(f)
except:
f = os.path.join(os.path.dirname(__file__),'vader_sentiment_lexicon.txt')
word_valence_dict = make_lex_dict(f)
# for removing punctuation
regex_remove_punctuation = re.compile('[%s]' % re.escape(string.punctuation))
def sentiment(text):
"""
Returns a float for sentiment strength based on the input text.
Positive values are positive valence, negative value are negative valence.
"""
wordsAndEmoticons = str(text).split() #doesn't separate words from adjacent punctuation (keeps emoticons & contractions)
text_mod = regex_remove_punctuation.sub('', text) # removes punctuation (but loses emoticons & contractions)
wordsOnly = str(text_mod).split()
# get rid of empty items or single letter "words" like 'a' and 'I' from wordsOnly
for word in wordsOnly:
if len(word) <= 1:
wordsOnly.remove(word)
# now remove adjacent & redundant punctuation from [wordsAndEmoticons] while keeping emoticons and contractions
puncList = [".", "!", "?", ",", ";", ":", "-", "'", "\"",
"!!", "!!!", "??", "???", "?!?", "!?!", "?!?!", "!?!?"]
for word in wordsOnly:
for p in puncList:
pword = p + word
x1 = wordsAndEmoticons.count(pword)
while x1 > 0:
i = wordsAndEmoticons.index(pword)
wordsAndEmoticons.remove(pword)
wordsAndEmoticons.insert(i, word)
x1 = wordsAndEmoticons.count(pword)
wordp = word + p
x2 = wordsAndEmoticons.count(wordp)
while x2 > 0:
i = wordsAndEmoticons.index(wordp)
wordsAndEmoticons.remove(wordp)
wordsAndEmoticons.insert(i, word)
x2 = wordsAndEmoticons.count(wordp)
# get rid of residual empty items or single letter "words" like 'a' and 'I' from wordsAndEmoticons
for word in wordsAndEmoticons:
if len(word) <= 1:
wordsAndEmoticons.remove(word)
# remove stopwords from [wordsAndEmoticons]
#stopwords = [str(word).strip() for word in open('stopwords.txt')]
#for word in wordsAndEmoticons:
# if word in stopwords:
# wordsAndEmoticons.remove(word)
# check for negation
negate = ["aint", "arent", "cannot", "cant", "couldnt", "darent", "didnt", "doesnt",
"ain't", "aren't", "can't", "couldn't", "daren't", "didn't", "doesn't",
"dont", "hadnt", "hasnt", "havent", "isnt", "mightnt", "mustnt", "neither",
"don't", "hadn't", "hasn't", "haven't", "isn't", "mightn't", "mustn't",
"neednt", "needn't", "never", "none", "nope", "nor", "not", "nothing", "nowhere",
"oughtnt", "shant", "shouldnt", "uhuh", "wasnt", "werent",
"oughtn't", "shan't", "shouldn't", "uh-uh", "wasn't", "weren't",
"without", "wont", "wouldnt", "won't", "wouldn't", "rarely", "seldom", "despite"]
def negated(list, nWords=[], includeNT=True):
nWords.extend(negate)
for word in nWords:
if word in list:
return True
if includeNT:
for word in list:
if "n't" in word:
return True
if "least" in list:
i = list.index("least")
if i > 0 and list[i-1] != "at":
return True
return False
def normalize(score, alpha=15):
# normalize the score to be between -1 and 1 using an alpha that approximates the max expected value
normScore = score/math.sqrt( ((score*score) + alpha) )
return normScore
def wildCardMatch(patternWithWildcard, listOfStringsToMatchAgainst):
listOfMatches = fnmatch.filter(listOfStringsToMatchAgainst, patternWithWildcard)
return listOfMatches
def isALLCAP_differential(wordList):
countALLCAPS= 0
for w in wordList:
if str(w).isupper():
countALLCAPS += 1
cap_differential = len(wordList) - countALLCAPS
if cap_differential > 0 and cap_differential < len(wordList):
isDiff = True
else: isDiff = False
return isDiff
isCap_diff = isALLCAP_differential(wordsAndEmoticons)
b_incr = 0.293 #(empirically derived mean sentiment intensity rating increase for booster words)
b_decr = -0.293
# booster/dampener 'intensifiers' or 'degree adverbs' http://en.wiktionary.org/wiki/Category:English_degree_adverbs
booster_dict = {"absolutely": b_incr, "amazingly": b_incr, "awfully": b_incr, "completely": b_incr, "considerably": b_incr,
"decidedly": b_incr, "deeply": b_incr, "effing": b_incr, "enormously": b_incr,
"entirely": b_incr, "especially": b_incr, "exceptionally": b_incr, "extremely": b_incr,
"fabulously": b_incr, "flipping": b_incr, "flippin": b_incr,
"fricking": b_incr, "frickin": b_incr, "frigging": b_incr, "friggin": b_incr, "fully": b_incr, "fucking": b_incr,
"greatly": b_incr, "hella": b_incr, "highly": b_incr, "hugely": b_incr, "incredibly": b_incr,
"intensely": b_incr, "majorly": b_incr, "more": b_incr, "most": b_incr, "particularly": b_incr,
"purely": b_incr, "quite": b_incr, "really": b_incr, "remarkably": b_incr,
"so": b_incr, "substantially": b_incr,
"thoroughly": b_incr, "totally": b_incr, "tremendously": b_incr,
"uber": b_incr, "unbelievably": b_incr, "unusually": b_incr, "utterly": b_incr,
"very": b_incr,
"almost": b_decr, "barely": b_decr, "hardly": b_decr, "just enough": b_decr,
"kind of": b_decr, "kinda": b_decr, "kindof": b_decr, "kind-of": b_decr,
"less": b_decr, "little": b_decr, "marginally": b_decr, "occasionally": b_decr, "partly": b_decr,
"scarcely": b_decr, "slightly": b_decr, "somewhat": b_decr,
"sort of": b_decr, "sorta": b_decr, "sortof": b_decr, "sort-of": b_decr}
sentiments = []
for item in wordsAndEmoticons:
v = 0
i = wordsAndEmoticons.index(item)
if (i < len(wordsAndEmoticons)-1 and str(item).lower() == "kind" and \
str(wordsAndEmoticons[i+1]).lower() == "of") or str(item).lower() in booster_dict:
sentiments.append(v)
continue
item_lowercase = str(item).lower()
if item_lowercase in word_valence_dict:
#get the sentiment valence
v = float(word_valence_dict[item_lowercase])
#check if sentiment laden word is in ALLCAPS (while others aren't)
c_incr = 0.733 #(empirically derived mean sentiment intensity rating increase for using ALLCAPs to emphasize a word)
if str(item).isupper() and isCap_diff:
if v > 0: v += c_incr
else: v -= c_incr
#check if the preceding words increase, decrease, or negate/nullify the valence
def scalar_inc_dec(word, valence):
scalar = 0.0
word_lower = str(word).lower()
if word_lower in booster_dict:
scalar = booster_dict[word_lower]
if valence < 0: scalar *= -1
#check if booster/dampener word is in ALLCAPS (while others aren't)
if str(word).isupper() and isCap_diff:
if valence > 0: scalar += c_incr
else: scalar -= c_incr
return scalar
n_scalar = -0.74
if i > 0 and str(wordsAndEmoticons[i-1]).lower() not in word_valence_dict:
s1 = scalar_inc_dec(wordsAndEmoticons[i-1], v)
v = v+s1
if negated([wordsAndEmoticons[i-1]]): v = v*n_scalar
if i > 1 and str(wordsAndEmoticons[i-2]).lower() not in word_valence_dict:
s2 = scalar_inc_dec(wordsAndEmoticons[i-2], v)
if s2 != 0: s2 = s2*0.95
v = v+s2
# check for special use of 'never' as valence modifier instead of negation
if wordsAndEmoticons[i-2] == "never" and (wordsAndEmoticons[i-1] == "so" or wordsAndEmoticons[i-1] == "this"):
v = v*1.5
# otherwise, check for negation/nullification
elif negated([wordsAndEmoticons[i-2]]): v = v*n_scalar
if i > 2 and str(wordsAndEmoticons[i-3]).lower() not in word_valence_dict:
s3 = scalar_inc_dec(wordsAndEmoticons[i-3], v)
if s3 != 0: s3 = s3*0.9
v = v+s3
# check for special use of 'never' as valence modifier instead of negation
if wordsAndEmoticons[i-3] == "never" and \
(wordsAndEmoticons[i-2] == "so" or wordsAndEmoticons[i-2] == "this") or \
(wordsAndEmoticons[i-1] == "so" or wordsAndEmoticons[i-1] == "this"):
v = v*1.25
# otherwise, check for negation/nullification
elif negated([wordsAndEmoticons[i-3]]): v = v*n_scalar
# check for special case idioms using a sentiment-laden keyword known to SAGE
special_case_idioms = {"the shit": 3, "the bomb": 3, "bad ass": 1.5, "yeah right": -2,
"cut the mustard": 2, "kiss of death": -1.5, "hand to mouth": -2}
# future work: consider other sentiment-laden idioms
#other_idioms = {"back handed": -2, "blow smoke": -2, "blowing smoke": -2, "upper hand": 1, "break a leg": 2,
# "cooking with gas": 2, "in the black": 2, "in the red": -2, "on the ball": 2,"under the weather": -2}
onezero = "{} {}".format(str(wordsAndEmoticons[i-1]), str(wordsAndEmoticons[i]))
twoonezero = "{} {} {}".format(str(wordsAndEmoticons[i-2]), str(wordsAndEmoticons[i-1]), str(wordsAndEmoticons[i]))
twoone = "{} {}".format(str(wordsAndEmoticons[i-2]), str(wordsAndEmoticons[i-1]))
threetwoone = "{} {} {}".format(str(wordsAndEmoticons[i-3]), str(wordsAndEmoticons[i-2]), str(wordsAndEmoticons[i-1]))
threetwo = "{} {}".format(str(wordsAndEmoticons[i-3]), str(wordsAndEmoticons[i-2]))
if onezero in special_case_idioms: v = special_case_idioms[onezero]
elif twoonezero in special_case_idioms: v = special_case_idioms[twoonezero]
elif twoone in special_case_idioms: v = special_case_idioms[twoone]
elif threetwoone in special_case_idioms: v = special_case_idioms[threetwoone]
elif threetwo in special_case_idioms: v = special_case_idioms[threetwo]
if len(wordsAndEmoticons)-1 > i:
zeroone = "{} {}".format(str(wordsAndEmoticons[i]), str(wordsAndEmoticons[i+1]))
if zeroone in special_case_idioms: v = special_case_idioms[zeroone]
if len(wordsAndEmoticons)-1 > i+1:
zeroonetwo = "{} {}".format(str(wordsAndEmoticons[i]), str(wordsAndEmoticons[i+1]), str(wordsAndEmoticons[i+2]))
if zeroonetwo in special_case_idioms: v = special_case_idioms[zeroonetwo]
# check for booster/dampener bi-grams such as 'sort of' or 'kind of'
if threetwo in booster_dict or twoone in booster_dict:
v = v+b_decr
# check for negation case using "least"
if i > 1 and str(wordsAndEmoticons[i-1]).lower() not in word_valence_dict \
and str(wordsAndEmoticons[i-1]).lower() == "least":
if (str(wordsAndEmoticons[i-2]).lower() != "at" and str(wordsAndEmoticons[i-2]).lower() != "very"):
v = v*n_scalar
elif i > 0 and str(wordsAndEmoticons[i-1]).lower() not in word_valence_dict \
and str(wordsAndEmoticons[i-1]).lower() == "least":
v = v*n_scalar
sentiments.append(v)
# check for modification in sentiment due to contrastive conjunction 'but'
if 'but' in wordsAndEmoticons or 'BUT' in wordsAndEmoticons:
try: bi = wordsAndEmoticons.index('but')
except: bi = wordsAndEmoticons.index('BUT')
for s in sentiments:
si = sentiments.index(s)
if si < bi:
sentiments.pop(si)
sentiments.insert(si, s*0.5)
elif si > bi:
sentiments.pop(si)
sentiments.insert(si, s*1.5)
if sentiments:
sum_s = float(sum(sentiments))
#print sentiments, sum_s
# check for added emphasis resulting from exclamation points (up to 4 of them)
ep_count = str(text).count("!")
if ep_count > 4: ep_count = 4
ep_amplifier = ep_count*0.292 #(empirically derived mean sentiment intensity rating increase for exclamation points)
if sum_s > 0: sum_s += ep_amplifier
elif sum_s < 0: sum_s -= ep_amplifier
# check for added emphasis resulting from question marks (2 or 3+)
qm_count = str(text).count("?")
qm_amplifier = 0
if qm_count > 1:
if qm_count <= 3: qm_amplifier = qm_count*0.18
else: qm_amplifier = 0.96
if sum_s > 0: sum_s += qm_amplifier
elif sum_s < 0: sum_s -= qm_amplifier
compound = normalize(sum_s)
# want separate positive versus negative sentiment scores
pos_sum = 0.0
neg_sum = 0.0
neu_count = 0
for sentiment_score in sentiments:
if sentiment_score > 0:
pos_sum += (float(sentiment_score) +1) # compensates for neutral words that are counted as 1
if sentiment_score < 0:
neg_sum += (float(sentiment_score) -1) # when used with math.fabs(), compensates for neutrals
if sentiment_score == 0:
neu_count += 1
if pos_sum > math.fabs(neg_sum): pos_sum += (ep_amplifier+qm_amplifier)
elif pos_sum < math.fabs(neg_sum): neg_sum -= (ep_amplifier+qm_amplifier)
total = pos_sum + math.fabs(neg_sum) + neu_count
pos = math.fabs(pos_sum / total)
neg = math.fabs(neg_sum / total)
neu = math.fabs(neu_count / total)
else:
compound = 0.0; pos = 0.0; neg = 0.0; neu = 0.0
s = {"neg" : round(neg, 3),
"neu" : round(neu, 3),
"pos" : round(pos, 3),
"compound" : round(compound, 4)}
return s
if __name__ == '__main__':
# --- examples -------
sentences = [
"VADER is smart, handsome, and funny.", # positive sentence example
"VADER is smart, handsome, and funny!", # punctuation emphasis handled correctly (sentiment intensity adjusted)
"VADER is very smart, handsome, and funny.", # booster words handled correctly (sentiment intensity adjusted)
"VADER is VERY SMART, handsome, and FUNNY.", # emphasis for ALLCAPS handled
"VADER is VERY SMART, handsome, and FUNNY!!!",# combination of signals - VADER appropriately adjusts intensity
"VADER is VERY SMART, really handsome, and INCREDIBLY FUNNY!!!",# booster words & punctuation make this close to ceiling for score
"The book was good.", # positive sentence
"The book was kind of good.", # qualified positive sentence is handled correctly (intensity adjusted)
"The plot was good, but the characters are uncompelling and the dialog is not great.", # mixed negation sentence
"A really bad, horrible book.", # negative sentence with booster words
"At least it isn't a horrible book.", # negated negative sentence with contraction
":) and :D", # emoticons handled
"", # an empty string is correctly handled
"Today sux", # negative slang handled
"Today sux!", # negative slang with punctuation emphasis handled
"Today SUX!", # negative slang with capitalization emphasis
"Today kinda sux! But I'll get by, lol" # mixed sentiment example with slang and constrastive conjunction "but"
]
paragraph = "It was one of the worst movies I've seen, despite good reviews. \
Unbelievably bad acting!! Poor direction. VERY poor production. \
The movie was bad. Very bad movie. VERY bad movie. VERY BAD movie. VERY BAD movie!"
from nltk import tokenize
lines_list = tokenize.sent_tokenize(paragraph)
sentences.extend(lines_list)
tricky_sentences = [
"Most automated sentiment analysis tools are shit.",
"VADER sentiment analysis is the shit.",
"Sentiment analysis has never been good.",
"Sentiment analysis with VADER has never been this good.",
"Warren Beatty has never been so entertaining.",
"I won't say that the movie is astounding and I wouldn't claim that the movie is too banal either.",
"I like to hate Michael Bay films, but I couldn't fault this one",
"It's one thing to watch an Uwe Boll film, but another thing entirely to pay for it",
"The movie was too good",
"This movie was actually neither that funny, nor super witty.",
"This movie doesn't care about cleverness, wit or any other kind of intelligent humor.",
"Those who find ugly meanings in beautiful things are corrupt without being charming.",
"There are slow and repetitive parts, BUT it has just enough spice to keep it interesting.",
"The script is not fantastic, but the acting is decent and the cinematography is EXCELLENT!",
"Roger Dodger is one of the most compelling variations on this theme.",
"Roger Dodger is one of the least compelling variations on this theme.",
"Roger Dodger is at least compelling as a variation on the theme.",
"they fall in love with the product",
"but then it breaks",
"usually around the time the 90 day warranty expires",
"the twin towers collapsed today",
"However, Mr. Carter solemnly argues, his client carried out the kidnapping under orders and in the ''least offensive way possible.''"
]
sentences.extend(tricky_sentences)
for sentence in sentences:
print(sentence)
ss = sentiment(sentence)
print("\t" + str(ss))
print("\n\n Done!")

@ -0,0 +1,74 @@
# -*- coding: utf-8 -*-
from vaderSentiment import sentiment
from senpy.plugins import SentimentBox, SenpyPlugin
from senpy.models import Results, Sentiment, Entry
import logging
class VaderSentimentPlugin(SentimentBox):
'''
Sentiment classifier using vaderSentiment module. Params accepted: Language: {en, es}. The output uses Marl ontology developed at GSI UPM for semantic web.
'''
name = "sentiment-vader"
module = "sentiment-vader"
author = "@icorcuera"
version = "0.1.1"
extra_params = {
"language": {
"description": "language of the input",
"@id": "lang_rand",
"aliases": ["language", "l"],
"default": "auto",
"options": ["es", "en", "auto"]
},
"aggregate": {
"description": "Show only the strongest sentiment (aggregate) or all sentiments",
"aliases": ["aggregate","agg"],
"options": [True, False],
"default": False
}
}
requirements = {}
_VADER_KEYS = ['pos', 'neu', 'neg']
binary = False
def predict_one(self, features, activity):
text_input = ' '.join(features)
scores = sentiment(text_input)
sentiments = []
for k in self._VADER_KEYS:
sentiments.append(scores[k])
if activity.param('aggregate'):
m = max(sentiments)
sentiments = [k if k==m else None for k in sentiments]
return sentiments
test_cases = []
test_cases = [
{
'input': 'I am tired :(',
'polarity': 'marl:Negative'
},
{
'input': 'I love pizza :(',
'polarity': 'marl:Positive'
},
{
'input': 'I enjoy going to the cinema :)',
'polarity': 'marl:Negative'
},
{
'input': 'This cake is disgusting',
'polarity': 'marl:Negative'
},
]

File diff suppressed because it is too large Load Diff

@ -5,10 +5,10 @@
"senpy": "http://www.gsi.upm.es/onto/senpy/ns#",
"prov": "http://www.w3.org/ns/prov#",
"nif": "http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core#",
"marl": "http://www.gsi.dit.upm.es/ontologies/marl/ns#",
"onyx": "http://www.gsi.dit.upm.es/ontologies/onyx/ns#",
"wna": "http://www.gsi.dit.upm.es/ontologies/wnaffect/ns#",
"emoml": "http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/emotionml/ns#",
"marl": "http://www.gsi.upm.es/ontologies/marl/ns#",
"onyx": "http://www.gsi.upm.es/ontologies/onyx/ns#",
"wna": "http://www.gsi.upm.es/ontologies/wnaffect/ns#",
"emoml": "http://www.gsi.upm.es/ontologies/onyx/vocabularies/emotionml/ns#",
"xsd": "http://www.w3.org/2001/XMLSchema#",
"fam": "http://vocab.fusepool.info/fam#",
"topics": {

@ -1,4 +1,4 @@
var ONYX = "http://www.gsi.dit.upm.es/ontologies/onyx/ns#";
var ONYX = "http://www.gsi.upm.es/ontologies/onyx/ns#";
var RDF_TYPE = "http://www.w3.org/1999/02/22-rdf-syntax-ns#type";
var plugins_params = default_params = {};
var plugins = [];

@ -1,7 +1,7 @@
ns = {
'http://www.gsi.dit.upm.es/ontologies/marl/ns#': 'marl',
'http://www.gsi.dit.upm.es/ontologies/onyx/ns#': 'onyx',
'http://www.gsi.dit.upm.es/ontologies/senpy/ns#': 'onyx',
'http://www.gsi.upm.es/ontologies/marl/ns#': 'marl',
'http://www.gsi.upm.es/ontologies/onyx/ns#': 'onyx',
'http://www.gsi.upm.es/ontologies/senpy/ns#': 'onyx',
'http://www.gsi.upm.es/onto/senpy/ns#': 'senpy',
'http://www.w3.org/ns/prov#': 'prov',
'http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core#': 'nif'

@ -64,7 +64,7 @@
These are some of the things you can do with the API:
<ul>
<li>List all available plugins: <a href="/api/plugins">/api/plugins</a></li>
<li>Get information about the default plugin: <a href="/api/plugins/default">/api/plugins/default</a></li>
<li>Get information about the default plugin: <a href="/api/plugins/default/">/api/plugins/default</a></li>
<li>List all available datasets: <a href="/api/datasets">/api/datasets</a></li>
<li>Download the JSON-LD context used: <a href="/api/contexts/Results.jsonld">/api/contexts/Results.jsonld</a></li>
</ul>
@ -326,11 +326,11 @@ In Data Science and Advanced Analytics (DSAA),
</p>
</div>
<div id="site-logos">
<a href="http://www.gsi.dit.upm.es" target="_blank"><img id="mixedemotions-logo"src="static/img/me.png"/></a>
<a href="http://www.gsi.upm.es" target="_blank"><img id="mixedemotions-logo"src="static/img/me.png"/></a>
</div>
</div>
</div>
</body>
<link href='http://fonts.googleapis.com/css?family=Architects+Daughter' rel='stylesheet' type='text/css'>
<link href='//fonts.googleapis.com/css?family=Architects+Daughter' rel='stylesheet' type='text/css'>
</html>

@ -14,7 +14,7 @@
# limitations under the License.
#
from . import models, __version__
from collections import MutableMapping
from collections.abc import MutableMapping
import pprint
import pdb
@ -44,6 +44,10 @@ def check_template(indict, template):
raise models.Error(('Element not found.'
'\nExpected: {}\nIn: {}').format(pprint.pformat(e),
pprint.pformat(indict)))
elif isinstance(template, float) and isinstance(indict, float):
diff = abs(indict - template)
if (diff > 0) and diff/(abs(indict+template)) > 0.05:
raise models.Error('Differences greater than 10% found.\n')
else:
if indict != template:
raise models.Error(('Differences found.\n'

@ -46,7 +46,7 @@ extra_reqs = parse_requirements("extra-requirements.txt")
setup(
name='senpy',
python_requires='>3.3',
python_requires='>3.6',
packages=['senpy'], # this must be the same as the name above
version=__version__,
description=('A sentiment analysis server implementation. '
@ -67,7 +67,8 @@ setup(
tests_require=test_reqs,
setup_requires=['pytest-runner', ],
extras_require={
'evaluation': extra_reqs
'evaluation': extra_reqs,
'extras': extra_reqs
},
include_package_data=True,
entry_points={

@ -2,3 +2,4 @@ mock
pytest-cov
pytest
pandas
noop

@ -38,7 +38,7 @@ class BlueprintsTest(TestCase):
"""Set up only once, and re-use in every individual test"""
cls.app = Flask("test_extensions")
cls.client = cls.app.test_client()
cls.senpy = Senpy(default_plugins=True)
cls.senpy = Senpy(default_plugins=True, strict=False) # Ignore any optional plugins
cls.senpy.init_app(cls.app)
cls.dir = os.path.join(os.path.dirname(__file__), "..")
cls.senpy.add_folder(cls.dir)
@ -209,8 +209,8 @@ class BlueprintsTest(TestCase):
"""
# First, we split by sentence twice. Each call should generate 3 additional entries
# (one per sentence in the original).
resp = self.client.get('/api/split/split?i=The first sentence. The second sentence.'
'\nA new paragraph&delimiter=sentence&verbose')
resp = self.client.get('/api/split/split?i=The first sentence. The second sentence.%0A'
'A new paragraph&delimiter=sentence&verbose')
js = parse_resp(resp)
assert len(js['activities']) == 2
assert len(js['entries']) == 7
@ -218,9 +218,8 @@ class BlueprintsTest(TestCase):
# Now, we split by sentence. This produces 3 additional entries.
# Then, we split by paragraph. This should create 2 additional entries (One per paragraph
# in the original text)
resp = self.client.get('/api/split/split?i=The first sentence. The second sentence.'
'\nA new paragraph&0.delimiter=sentence'
'&1.delimiter=paragraph&verbose')
resp = self.client.get('/api/split/split?i=The first sentence. The second sentence.%0AA new paragraph'
'&0.delimiter=sentence&1.delimiter=paragraph&verbose')
# Calling dummy twice, should return the same string
self.assertCode(resp, 200)
js = parse_resp(resp)
@ -292,7 +291,7 @@ class BlueprintsTest(TestCase):
assert "@context" in js
assert check_dict(
js["@context"],
{"marl": "http://www.gsi.dit.upm.es/ontologies/marl/ns#"})
{"marl": "http://www.gsi.upm.es/ontologies/marl/ns#"})
def test_schema(self):
resp = self.client.get("/api/schemas/definitions.json")

@ -22,9 +22,8 @@ import logging
from functools import partial
from senpy.extensions import Senpy
from senpy import plugins
from senpy import plugins, config, api
from senpy.models import Error, Results, Entry, EmotionSet, Emotion, Plugin
from senpy import api
from flask import Flask
from unittest import TestCase
@ -69,8 +68,9 @@ class ExtensionsTest(TestCase):
def test_adding_folder(self):
""" It should be possible for senpy to look for plugins in more folders. """
app = Flask('test_adding_folder')
senpy = Senpy(plugin_folder=None,
app=self.app,
app=app,
default_plugins=False)
assert not senpy.analysis_plugins()
senpy.add_folder(self.examples_dir)
@ -141,8 +141,9 @@ class ExtensionsTest(TestCase):
def test_analyse_empty(self):
""" Trying to analyse when no plugins are installed should raise an error."""
app = Flask('test_adding_folder')
senpy = Senpy(plugin_folder=None,
app=self.app,
app=app,
default_plugins=False)
self.assertRaises(Error, senpy.analyse, Results(), [])
@ -253,3 +254,9 @@ class ExtensionsTest(TestCase):
self.senpy.analyse(r3)
assert len(r3.entries[0].emotions) == 1
r3.jsonld()
def testDefaultPlugins(self):
'''The default set of plugins should all load'''
self.app = Flask('test_extensions')
self.examples_dir = os.path.join(os.path.dirname(__file__), '..', 'example-plugins')
self.senpy = Senpy(app=self.app, default_plugins=False)

@ -27,6 +27,7 @@ from senpy.models import Results, Entry, EmotionSet, Emotion, Plugins
from senpy import plugins
from senpy.plugins.postprocessing.emotion.centroids import CentroidConversion
from senpy.gsitk_compat import GSITK_AVAILABLE
from senpy import config
import pandas as pd
@ -386,7 +387,7 @@ class PluginsTest(TestCase):
def make_mini_test(fpath):
def mini_test(self):
for plugin in plugins.from_path(fpath, install=True):
for plugin in plugins.from_path(fpath, install=True, strict=config.strict):
plugin.test()
return mini_test

@ -75,9 +75,9 @@ class SemanticsTest(TestCase):
assert g
qres = g.query("""
PREFIX prov: <http://www.w3.org/ns/prov#>
PREFIX marl: <http://www.gsi.dit.upm.es/ontologies/marl/ns#>
PREFIX marl: <http://www.gsi.upm.es/ontologies/marl/ns#>
PREFIX nif: <http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core#>
PREFIX onyx: <http://www.gsi.dit.upm.es/ontologies/onyx/ns#>
PREFIX onyx: <http://www.gsi.upm.es/ontologies/onyx/ns#>
PREFIX senpy: <http://www.gsi.upm.es/onto/senpy/ns#>
SELECT DISTINCT ?entry ?text ?sentiment
@ -109,9 +109,9 @@ class SemanticsTest(TestCase):
g = parse_resp(resp, 'ttl')
qres = g.query("""
PREFIX prov: <http://www.w3.org/ns/prov#>
PREFIX marl: <http://www.gsi.dit.upm.es/ontologies/marl/ns#>
PREFIX marl: <http://www.gsi.upm.es/ontologies/marl/ns#>
PREFIX nif: <http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core#>
PREFIX onyx: <http://www.gsi.dit.upm.es/ontologies/onyx/ns#>
PREFIX onyx: <http://www.gsi.upm.es/ontologies/onyx/ns#>
PREFIX senpy: <http://www.gsi.upm.es/onto/senpy/ns#>
SELECT DISTINCT ?entry ?text ?sentiment

Loading…
Cancel
Save