1
0
mirror of https://github.com/gsi-upm/senpy synced 2025-10-21 18:58:25 +00:00

Compare commits

..

2 Commits

Author SHA1 Message Date
Daniel Suarez Souto
530982be62 Fixed some code problems 2018-09-28 11:36:53 +02:00
Daniel Suarez Souto
38b478890b Added maxSentiment plugin 2018-09-28 11:11:47 +02:00
177 changed files with 48086 additions and 21808 deletions

1
.gitignore vendored
View File

@@ -8,4 +8,3 @@ __pycache__
VERSION VERSION
Dockerfile-* Dockerfile-*
Dockerfile Dockerfile
senpy_data

View File

@@ -4,130 +4,110 @@
# - docker:dind # - docker:dind
# When using dind, it's wise to use the overlayfs driver for # When using dind, it's wise to use the overlayfs driver for
# improved performance. # improved performance.
stages: stages:
- test - test
- publish - push
- test_image
- deploy - deploy
- clean
variables: before_script:
KUBENS: senpy - make -e login
LATEST_IMAGE: "${HUB_REPO}:${CI_COMMIT_SHORT_SHA}"
SENPY_DATA: "/senpy-data/" # This is configured in the CI job
NLTK_DATA: "/senpy-data/nltk_data" # Store NLTK downloaded data
docker: .test: &test_definition
stage: publish
image:
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
variables:
PYTHON_VERSION: "3.10"
tags:
- docker
script:
- echo $CI_COMMIT_TAG > senpy/VERSION
- sed "s/{{PYVERSION}}/$PYTHON_VERSION/" Dockerfile.template > Dockerfile
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"},\"https://index.docker.io/v1/\":{\"auth\":\"$HUB_AUTH\"}}}" > /kaniko/.docker/config.json
# The skip-tls-verify flag is there because our registry certificate is self signed
- /kaniko/executor --context $CI_PROJECT_DIR --skip-tls-verify --dockerfile $CI_PROJECT_DIR/Dockerfile --destination $CI_REGISTRY_IMAGE:$CI_COMMIT_TAG --destination $HUB_REPO:$CI_COMMIT_TAG
only:
- tags
docker-latest:
stage: publish
image:
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
variables:
PYTHON_VERSION: "3.10"
tags:
- docker
script:
- echo git.${CI_COMMIT_SHORT_SHA} > senpy/VERSION
- sed "s/{{PYVERSION}}/$PYTHON_VERSION/" Dockerfile.template > Dockerfile
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"},\"https://index.docker.io/v1/\":{\"auth\":\"$HUB_AUTH\"}}}" > /kaniko/.docker/config.json
# The skip-tls-verify flag is there because our registry certificate is self signed
- /kaniko/executor --context $CI_PROJECT_DIR --skip-tls-verify --dockerfile $CI_PROJECT_DIR/Dockerfile --destination $LATEST_IMAGE --destination "${HUB_REPO}:latest"
only:
refs:
- master
testimage:
only:
- tags
tags:
- docker
stage: test_image
image: "$CI_REGISTRY_IMAGE:$CI_COMMIT_TAG"
script:
- python -m senpy --no-run --test
testpy37:
tags:
- docker
variables:
SENPY_STRICT: "false"
image: python:3.7
stage: test stage: test
script: script:
- pip install -r requirements.txt -r test-requirements.txt - make -e test-$PYTHON_VERSION
- python setup.py test except:
- tags # Avoid unnecessary double testing
testpy310: test-3.5:
tags: <<: *test_definition
- docker
variables: variables:
SENPY_STRICT: "true" PYTHON_VERSION: "3.5"
image: python:3.10
stage: test
script:
- pip install -r requirements.txt -r test-requirements.txt -r extra-requirements.txt
- python setup.py test
push_pypi: test-2.7:
<<: *test_definition
variables:
PYTHON_VERSION: "2.7"
.image: &image_definition
stage: push
script:
- make -e push-$PYTHON_VERSION
only: only:
- tags - tags
tags: - triggers
- docker - fix-makefiles
image: python:3.10
stage: publish
script:
- echo $CI_COMMIT_TAG > senpy/VERSION
- pip install twine
- python setup.py sdist bdist_wheel
- TWINE_PASSWORD=$PYPI_PASSWORD TWINE_USERNAME=$PYPI_USERNAME python -m twine upload dist/*
check_pypi: push-3.5:
only: <<: *image_definition
- tags variables:
tags: PYTHON_VERSION: "3.5"
- docker
image: python:3.10
stage: deploy
script:
- pip install senpy==$CI_COMMIT_TAG
# Allow PYPI to update its index before we try to install
when: delayed
start_in: 10 minutes
latest-demo: push-2.7:
<<: *image_definition
variables:
PYTHON_VERSION: "2.7"
push-latest:
<<: *image_definition
variables:
PYTHON_VERSION: latest
only: only:
refs:
- master - master
tags: - triggers
- docker - fix-makefiles
image: alpine/k8s:1.22.6
push-github:
stage: deploy stage: deploy
environment: production
variables:
KUBECONFIG: "/kubeconfig"
# Same image as docker-latest
IMAGEWTAG: "${LATEST_IMAGE}"
KUBEAPP: "senpy"
script: script:
- echo "${KUBECONFIG_RAW}" > $KUBECONFIG - make -e push-github
- kubectl --kubeconfig $KUBECONFIG version only:
- cd k8s/ - master
- cat *.yaml *.tmpl 2>/dev/null | envsubst | kubectl --kubeconfig $KUBECONFIG apply --namespace ${KUBENS:-default} -f - - triggers
- kubectl --kubeconfig $KUBECONFIG get all,ing -l app=${KUBEAPP} --namespace=${KUBENS:-default} - fix-makefiles
deploy_pypi:
stage: deploy
script: # Configure the PyPI credentials, then push the package, and cleanup the creds.
- echo "[server-login]" >> ~/.pypirc
- echo "repository=https://upload.pypi.org/legacy/" >> ~/.pypirc
- echo "username=" ${PYPI_USER} >> ~/.pypirc
- echo "password=" ${PYPI_PASSWORD} >> ~/.pypirc
- make pip_upload
- echo "" > ~/.pypirc && rm ~/.pypirc # If the above fails, this won't run.
only:
- /^v?\d+\.\d+\.\d+([abc]\d*)?$/ # PEP-440 compliant version (tags)
except:
- branches
deploy:
stage: deploy
environment: test
script:
- make -e deploy
only:
- master
- fix-makefiles
push-github:
stage: deploy
script:
- make -e push-github
only:
- master
- triggers
clean :
stage: clean
script:
- make -e clean
when: manual
cleanup_py:
stage: clean
when: always # this is important; run even if preceding stages failed.
script:
- rm -vf ~/.pypirc # we don't want to leave these around, but GitLab may clean up anyway.
- docker logout

View File

@@ -2,7 +2,7 @@ These makefiles are recipes for several common tasks in different types of proje
To add them to your project, simply do: To add them to your project, simply do:
``` ```
git remote add makefiles ssh://git@lab.gsi.upm.es:2200/docs/templates/makefiles.git git remote add makefiles ssh://git@lab.cluster.gsi.dit.upm.es:2200/docs/templates/makefiles.git
git subtree add --prefix=.makefiles/ makefiles master git subtree add --prefix=.makefiles/ makefiles master
touch Makefile touch Makefile
echo "include .makefiles/base.mk" >> Makefile echo "include .makefiles/base.mk" >> Makefile
@@ -16,7 +16,7 @@ include .makefiles/python.mk
``` ```
You may need to set special variables like the name of your project or the python versions you're targetting. You may need to set special variables like the name of your project or the python versions you're targetting.
Take a look at each specific `.mk` file for more information, and the `Makefile` in the [senpy](https://lab.gsi.upm.es/senpy/senpy) project for a real use case. Take a look at each specific `.mk` file for more information, and the `Makefile` in the [senpy](https://lab.cluster.gsi.dit.upm.es/senpy/senpy) project for a real use case.
If you update the makefiles from your repository, make sure to push the changes for review in upstream (this repository): If you update the makefiles from your repository, make sure to push the changes for review in upstream (this repository):

View File

@@ -22,4 +22,7 @@ else
rm $(KEY_FILE) rm $(KEY_FILE)
endif endif
.PHONY:: commit tag git-push git-pull push-github push:: git-push
pull:: git-pull
.PHONY:: commit tag push git-push git-pull push-github

View File

@@ -1,15 +1,17 @@
makefiles-remote: makefiles-remote:
git ls-remote --exit-code makefiles 2> /dev/null || git remote add makefiles ssh://git@lab.gsi.upm.es:2200/docs/templates/makefiles.git @git remote add makefiles ssh://git@lab.cluster.gsi.dit.upm.es:2200/docs/templates/makefiles.git 2>/dev/null || true
makefiles-commit: makefiles-remote makefiles-commit: makefiles-remote
git add -f .makefiles git add -f .makefiles
git commit -em "Updated makefiles from ${NAME}" git commit -em "Updated makefiles from ${NAME}"
makefiles-push: makefiles-push:
git fetch makefiles $(NAME)
git subtree push --prefix=.makefiles/ makefiles $(NAME) git subtree push --prefix=.makefiles/ makefiles $(NAME)
makefiles-pull: makefiles-remote makefiles-pull: makefiles-remote
git subtree pull --prefix=.makefiles/ makefiles master --squash git subtree pull --prefix=.makefiles/ makefiles master --squash
.PHONY:: makefiles-remote makefiles-commit makefiles-push makefiles-pull pull:: makefiles-pull
push:: makefiles-push
.PHONY:: makefiles-remote makefiles-commit makefiles-push makefiles-pull pull push

View File

@@ -29,7 +29,7 @@ build: $(addprefix build-, $(PYVERSIONS)) ## Build all images / python versions
docker tag $(IMAGEWTAG)-python$(PYMAIN) $(IMAGEWTAG) docker tag $(IMAGEWTAG)-python$(PYMAIN) $(IMAGEWTAG)
build-%: version Dockerfile-% ## Build a specific version (e.g. build-2.7) build-%: version Dockerfile-% ## Build a specific version (e.g. build-2.7)
docker build --pull -t '$(IMAGEWTAG)-python$*' -f Dockerfile-$* .; docker build -t '$(IMAGEWTAG)-python$*' -f Dockerfile-$* .;
dev-%: ## Launch a specific development environment using docker (e.g. dev-2.7) dev-%: ## Launch a specific development environment using docker (e.g. dev-2.7)
@docker start $(NAME)-dev$* || (\ @docker start $(NAME)-dev$* || (\

View File

@@ -1,22 +0,0 @@
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
version: 2
# Set the OS, Python version and other tools you might need
build:
os: ubuntu-22.04
tools:
python: "3.10"
# Build documentation in the "docs/" directory with Sphinx
sphinx:
configuration: docs/conf.py
# formats:
# - pdf
# - epub
python:
install:
- requirements: docs/requirements.txt

View File

@@ -1,43 +1,12 @@
sudo: required sudo: required
matrix: services:
allow_failures: - docker
# Windows is experimental in Travis.
# As of this writing, senpy installs but hangs on tests that use the flask test client (e.g. blueprints)
- os: windows
include:
- os: linux
language: python language: python
python: 3.4
before_install: env:
- pip install --upgrade --force-reinstall pandas - PYV=2.7
- os: linux - PYV=3.5
language: python # run nosetests - Tests
python: 3.5 script: make test-$PYV
- os: linux
language: python
python: 3.6
- os: linux
language: python
python: 3.7
- os: osx
language: generic
addons:
homebrew:
# update: true
packages: python3
before_install:
- python3 -m pip install --upgrade virtualenv
- virtualenv -p python3 --system-site-packages "$HOME/venv"
- source "$HOME/venv/bin/activate"
- os: windows
language: bash
before_install:
- choco install -y python3
- python -m pip install --upgrade pip
env: PATH=/c/Python37:/c/Python37/Scripts:$PATH
# command to run tests
# 'python' points to Python 2.7 on macOS but points to Python 3.7 on Linux and Windows
# 'python3' is a 'command not found' error on Windows but 'py' works on Windows only
script:
- python3 setup.py test || python setup.py test

View File

@@ -1,86 +0,0 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
### Changed
* Changed the underlying model to use `pydantic` models
* Plugin interface slightly changed. Activation should be performed in the `activate` method
* data directory selection logic is slightly modified, and will choose one of the following (in this order): `data_folder` (argument), `$SENPY_DATA` or `$CWD`
* Plugins cannot be deactivated
### Fixed
* Tests now detect errors on optional plugins
### Added
* The code of many senpy community plugins have been included by default. However, additional files (e.g., licensed data) and/or installing additional dependencies may be necessary for some plugins. Read each plugin's documentation for more information.
* `optional` attribute in plugins. Optional plugins may fail to load or activate but the server will be started regardless, unless running in strict mode
* Option in shelf plugins to ignore pickling errors
### Removed
* `--only-install`, `--only-test` and `--only-list` flags were removed in favor of `--no-run` + `--install`/`--test`/`--dependencies`
## [1.0.6]
### Fixed
* Plugins now get activated for testing
## [1.0.1]
### Added
* License headers
* Description for PyPI (setup.py)
### Changed
* The evaluation tab shows datasets inline, and a tooltip shows the number of instances
* The docs should be clearer now
## [1.0.0]
### Fixed
* Restored hash changing function in `main.js`
## 0.20
### Added
* Objects can control the keys that will be used in `serialize`/`jsonld`/`as_dict` by specifying a list of keys in `terse_keys`.
e.g.
```python
>>> class MyModel(senpy.models.BaseModel):
... _terse_keys = ['visible']
... invisible = 5
... visible = 1
...
>>> m = MyModel(id='testing')
>>> m.jsonld()
{'invisible': 5, 'visible': 1, '@id': 'testing'}
>>> m.jsonld(verbose=False)
{'visible': 1}
```
* Configurable logging format.
* Added default terse keys for the most common classes (entry, sentiment, emotion...).
* Flag parameters (boolean) are set to true even when no value is added (e.g. `&verbose` is the same as `&verbose=true`).
* Plugin and parameter descriptions are now formatted with (showdown)[https://github.com/showdownjs/showdown].
* The web UI requests extra_parameters from the server. This is useful for pipelines. See #52
* First batch of semantic tests (using SPARQL)
* `Plugin.path()` method to get a file path from a relative path (using the senpy data folder)
### Changed
* `install_deps` now checks what requirements are already met before installing with pip.
* Help is now provided verbosely by default
* Other outputs are terse by default. This means some properties are now hidden unless verbose is set.
* `sentiments` and `emotions` are now `marl:hasOpinion` and `onyx:hasEmotionSet`, respectively.
* Nicer logging format
* Context aliases (e.g. `sentiments` and `emotions` properties) have been replaced with the original properties (e.g. `marl:hasOpinion` and `onyx:hasEmotionSet**), to use aliases, pass the `aliases** parameter.
* Several UI improvements
* Dedicated tab to show the list of plugins
* URLs in plugin descriptions are shown as links
* The format of the response is selected by clicking on a tab instead of selecting from a drop-down
* list of examples
* Bootstrap v4
* RandEmotion and RandSentiment are no longer included in the base set of plugins
* The `--plugin-folder` option can be used more than once, and every folder will be added to the app.
### Deprecated
### Removed
* Python 2.7 is no longer test or officially supported
### Fixed
* Plugin descriptions are now dedented when they are extracted from the docstring.
### Security

View File

@@ -6,20 +6,21 @@ RUN apt-get update && apt-get install -y \
libblas-dev liblapack-dev liblapacke-dev gfortran \ libblas-dev liblapack-dev liblapacke-dev gfortran \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
RUN mkdir -p /cache/ /senpy-plugins /data/ RUN mkdir /cache/ /senpy-plugins /data/
VOLUME /data/ VOLUME /data/
ENV PIP_CACHE_DIR=/cache/ SENPY_DATA=/data ENV PIP_CACHE_DIR=/cache/ SENPY_DATA=/data
ONBUILD COPY . /senpy-plugins/
ONBUILD RUN python -m senpy --only-install -f /senpy-plugins
ONBUILD WORKDIR /senpy-plugins/
WORKDIR /usr/src/app WORKDIR /usr/src/app
COPY test-requirements.txt requirements.txt extra-requirements.txt /usr/src/app/ COPY test-requirements.txt requirements.txt extra-requirements.txt /usr/src/app/
RUN pip install --no-cache-dir -r test-requirements.txt -r requirements.txt -r extra-requirements.txt RUN pip install --no-cache-dir -r test-requirements.txt -r requirements.txt -r extra-requirements.txt
COPY . /usr/src/app/ COPY . /usr/src/app/
RUN pip install --no-cache-dir --no-index --no-deps --editable . RUN pip install --no-cache-dir --no-index --no-deps --editable .
ONBUILD COPY . /senpy-plugins/
ONBUILD RUN python -m senpy -i --no-run -f /senpy-plugins
ONBUILD WORKDIR /senpy-plugins/
ENTRYPOINT ["python", "-m", "senpy", "-f", "/senpy-plugins/", "--host", "0.0.0.0"] ENTRYPOINT ["python", "-m", "senpy", "-f", "/senpy-plugins/", "--host", "0.0.0.0"]

View File

@@ -2,7 +2,6 @@ include requirements.txt
include test-requirements.txt include test-requirements.txt
include extra-requirements.txt include extra-requirements.txt
include README.rst include README.rst
include LICENSE.txt
include senpy/VERSION include senpy/VERSION
graft senpy/plugins graft senpy/plugins
graft senpy/schemas graft senpy/schemas

View File

@@ -5,7 +5,7 @@ IMAGENAME=gsiupm/senpy
# The first version is the main one (used for quick builds) # The first version is the main one (used for quick builds)
# See .makefiles/python.mk for more info # See .makefiles/python.mk for more info
PYVERSIONS ?= 3.10 3.7 PYVERSIONS=3.5 2.7
DEVPORT=5000 DEVPORT=5000

View File

@@ -1 +1 @@
web: python -m senpy --host 0.0.0.0 --port $PORT web: python -m senpy --host 0.0.0.0 --port $PORT --default-plugins

View File

@@ -1,25 +1,18 @@
.. image:: img/header.png .. image:: img/header.png
:width: 100% :width: 100%
:target: http://senpy.gsi.upm.es :target: http://demos.gsi.dit.upm.es/senpy
.. image:: https://readthedocs.org/projects/senpy/badge/?version=latest
:target: http://senpy.readthedocs.io/en/latest/
.. image:: https://badge.fury.io/py/senpy.svg
:target: https://badge.fury.io/py/senpy
.. image:: https://travis-ci.org/gsi-upm/senpy.svg
:target: https://github.com/gsi-upm/senpy/senpy/tree/master
.. image:: https://img.shields.io/pypi/l/requests.svg
:target: https://lab.gsi.upm.es/senpy/senpy/
.. image:: https://travis-ci.org/gsi-upm/senpy.svg?branch=master
:target: https://travis-ci.org/gsi-upm/senpy
Senpy lets you create sentiment analysis web services easily, fast and using a well known API. Senpy lets you create sentiment analysis web services easily, fast and using a well known API.
As a bonus, Senpy services use semantic vocabularies (e.g. `NIF <http://persistence.uni-leipzig.org/nlp2rdf/>`_, `Marl <http://www.gsi.upm.es/ontologies/marl>`_, `Onyx <http://www.gsi.upm.es/ontologies/onyx>`_) and formats (turtle, JSON-LD, xml-rdf). As a bonus, senpy services use semantic vocabularies (e.g. `NIF <http://persistence.uni-leipzig.org/nlp2rdf/>`_, `Marl <http://www.gsi.dit.upm.es/ontologies/marl>`_, `Onyx <http://www.gsi.dit.upm.es/ontologies/onyx>`_) and formats (turtle, JSON-LD, xml-rdf).
Have you ever wanted to turn your sentiment analysis algorithms into a service? Have you ever wanted to turn your sentiment analysis algorithms into a service?
With Senpy, now you can. With senpy, now you can.
It provides all the tools so you just have to worry about improving your algorithms: It provides all the tools so you just have to worry about improving your algorithms:
`See it in action. <http://senpy.gsi.upm.es/>`_ `See it in action. <http://senpy.cluster.gsi.dit.upm.es/>`_
Installation Installation
------------ ------------
@@ -41,36 +34,20 @@ Alternatively, you can use the development version:
cd senpy cd senpy
pip install --user . pip install --user .
If you want to install Senpy globally, use sudo instead of the ``--user`` flag. If you want to install senpy globally, use sudo instead of the ``--user`` flag.
Docker Image Docker Image
************ ************
Build the image or use the pre-built one: ``docker run -ti -p 5000:5000 gsiupm/senpy``. Build the image or use the pre-built one: ``docker run -ti -p 5000:5000 gsiupm/senpy --default-plugins``.
To add custom plugins, add a volume and tell Senpy where to find the plugins: ``docker run -ti -p 5000:5000 -v <PATH OF PLUGINS>:/plugins gsiupm/senpy -f /plugins`` To add custom plugins, add a volume and tell senpy where to find the plugins: ``docker run -ti -p 5000:5000 -v <PATH OF PLUGINS>:/plugins gsiupm/senpy --default-plugins -f /plugins``
Compatibility
-------------
Senpy should run on any major operating system.
Its code is pure Python, and the only limitations are imposed by its dependencies (e.g., nltk, pandas).
Currently, the CI/CD pipeline tests the code on:
* GNU/Linux with Python versions 3.7+ (3.10+ recommended for all plugins)
* MacOS and homebrew's python3
* Windows 10 and chocolatey's python3
The latest PyPI package is verified to install on Ubuntu, Debian and Arch Linux.
If you have trouble installing Senpy on your platform, see `Having problems?`_.
Developing Developing
---------- ----------
Running/debugging Developing/debugging
***************** ********************
This command will run the senpy container using the latest image available, mounting your current folder so you get your latest code: This command will run the senpy container using the latest image available, mounting your current folder so you get your latest code:
.. code:: bash .. code:: bash
@@ -133,7 +110,7 @@ or, alternatively:
This will create a server with any modules found in the current path. This will create a server with any modules found in the current path.
For more options, see the `--help` page. For more options, see the `--help` page.
Alternatively, you can use the modules included in Senpy to build your own application. Alternatively, you can use the modules included in senpy to build your own application.
Deploying on Heroku Deploying on Heroku
------------------- -------------------
@@ -141,31 +118,13 @@ Use a free heroku instance to share your service with the world.
Just use the example Procfile in this repository, or build your own. Just use the example Procfile in this repository, or build your own.
`DEMO on heroku <http://senpy.herokuapp.com>`_
For more information, check out the `documentation <http://senpy.readthedocs.org>`_. For more information, check out the `documentation <http://senpy.readthedocs.org>`_.
------------------------------------------------------------------------------------ ------------------------------------------------------------------------------------
Python 2.x compatibility
------------------------
Keeping compatibility between python 2.7 and 3.x is not always easy, especially for a framework that deals both with text and web requests.
Hence, starting February 2019, this project will no longer make efforts to support python 2.7, which will reach its end of life in 2020.
Most of the functionality should still work, and the compatibility shims will remain for now, but we cannot make any guarantees at this point.
Instead, the maintainers will focus their efforts on keeping the codebase compatible across different Python 3.3+ versions, including upcoming ones.
We apologize for the inconvenience.
Having problems?
----------------
Please, file a new issue `on GitHub <https://github.com/gsi-upm/senpy/issues>`_ including enough details to reproduce the bug, including:
* Operating system
* Version of Senpy (or docker tag)
* Installed libraries
* Relevant logs
* A simple code example
Acknowledgement Acknowledgement
--------------- ---------------
This development has been partially funded by the European Union through the MixedEmotions Project (project number H2020 655632), as part of the `RIA ICT 15 Big data and Open Data Innovation and take-up` programme. This development has been partially funded by the European Union through the MixedEmotions Project (project number H2020 655632), as part of the `RIA ICT 15 Big data and Open Data Innovation and take-up` programme.

View File

@@ -1,9 +0,0 @@
* Upload context to www.gsi.upm.es/ontologies/senpy
* Use context from gsi.upm.es
* Add example of morality analysis
* Simplify plugin loading
* Simplify models
* Remove json schemas and generate pydantic objects
* Remove some of the meta-programming magic
* Migrate to openapi/quart

4
config.py Normal file
View File

@@ -0,0 +1,4 @@
import os
SERVER_PORT = os.environ.get("SERVER_PORT", 5000)
DEBUG = os.environ.get("DEBUG", True)

File diff suppressed because it is too large Load Diff

View File

@@ -1,592 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Evaluating Services"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Sentiment analysis plugins can also be evaluated on a series of pre-defined datasets.\n",
"This can be done in three ways: through the Web UI (playground), through the web API and programmatically.\n",
"\n",
"Regardless of the way you perform the evaluation, you will need to specify a plugin (service) that you want to evaluate, and a series of datasets on which it should be evaluated.\n",
"\n",
"to evaluate a plugin on a dataset, senpy use the plugin to predict the sentiment in each entry in the dataset.\n",
"These predictions are compared with the expected values to produce several metrics, such as: accuracy, precision and f1-score.\n",
"\n",
"**note**: the evaluation process might take long for plugins that use external services, such as `sentiment140`.\n",
"\n",
"**note**: plugins are assumed to be pre-trained and invariant. i.e., the prediction for an entry should "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Web UI (Playground)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The playground should contain a tab for Evaluation, where you can select any plugin that can be evaluated, and the set of datasets that you want to test the plugin on.\n",
"\n",
"For example, the image below shows the results of the `sentiment-vader` plugin on the `vader` and `sts` datasets:\n",
"\n",
"\n",
"![](eval_table.png)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Web API"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The api exposes an endpoint (`/evaluate`), which accents the plugin and the set of datasets on which it should be evaluated."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The following code is not necessary, but it will display the results better:"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Here is a simple call using the requests library:"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<style>.output_html .hll { background-color: #ffffcc }\n",
".output_html { background: #f8f8f8; }\n",
".output_html .c { color: #408080; font-style: italic } /* Comment */\n",
".output_html .err { border: 1px solid #FF0000 } /* Error */\n",
".output_html .k { color: #008000; font-weight: bold } /* Keyword */\n",
".output_html .o { color: #666666 } /* Operator */\n",
".output_html .ch { color: #408080; font-style: italic } /* Comment.Hashbang */\n",
".output_html .cm { color: #408080; font-style: italic } /* Comment.Multiline */\n",
".output_html .cp { color: #BC7A00 } /* Comment.Preproc */\n",
".output_html .cpf { color: #408080; font-style: italic } /* Comment.PreprocFile */\n",
".output_html .c1 { color: #408080; font-style: italic } /* Comment.Single */\n",
".output_html .cs { color: #408080; font-style: italic } /* Comment.Special */\n",
".output_html .gd { color: #A00000 } /* Generic.Deleted */\n",
".output_html .ge { font-style: italic } /* Generic.Emph */\n",
".output_html .gr { color: #FF0000 } /* Generic.Error */\n",
".output_html .gh { color: #000080; font-weight: bold } /* Generic.Heading */\n",
".output_html .gi { color: #00A000 } /* Generic.Inserted */\n",
".output_html .go { color: #888888 } /* Generic.Output */\n",
".output_html .gp { color: #000080; font-weight: bold } /* Generic.Prompt */\n",
".output_html .gs { font-weight: bold } /* Generic.Strong */\n",
".output_html .gu { color: #800080; font-weight: bold } /* Generic.Subheading */\n",
".output_html .gt { color: #0044DD } /* Generic.Traceback */\n",
".output_html .kc { color: #008000; font-weight: bold } /* Keyword.Constant */\n",
".output_html .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */\n",
".output_html .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */\n",
".output_html .kp { color: #008000 } /* Keyword.Pseudo */\n",
".output_html .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */\n",
".output_html .kt { color: #B00040 } /* Keyword.Type */\n",
".output_html .m { color: #666666 } /* Literal.Number */\n",
".output_html .s { color: #BA2121 } /* Literal.String */\n",
".output_html .na { color: #7D9029 } /* Name.Attribute */\n",
".output_html .nb { color: #008000 } /* Name.Builtin */\n",
".output_html .nc { color: #0000FF; font-weight: bold } /* Name.Class */\n",
".output_html .no { color: #880000 } /* Name.Constant */\n",
".output_html .nd { color: #AA22FF } /* Name.Decorator */\n",
".output_html .ni { color: #999999; font-weight: bold } /* Name.Entity */\n",
".output_html .ne { color: #D2413A; font-weight: bold } /* Name.Exception */\n",
".output_html .nf { color: #0000FF } /* Name.Function */\n",
".output_html .nl { color: #A0A000 } /* Name.Label */\n",
".output_html .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */\n",
".output_html .nt { color: #008000; font-weight: bold } /* Name.Tag */\n",
".output_html .nv { color: #19177C } /* Name.Variable */\n",
".output_html .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */\n",
".output_html .w { color: #bbbbbb } /* Text.Whitespace */\n",
".output_html .mb { color: #666666 } /* Literal.Number.Bin */\n",
".output_html .mf { color: #666666 } /* Literal.Number.Float */\n",
".output_html .mh { color: #666666 } /* Literal.Number.Hex */\n",
".output_html .mi { color: #666666 } /* Literal.Number.Integer */\n",
".output_html .mo { color: #666666 } /* Literal.Number.Oct */\n",
".output_html .sa { color: #BA2121 } /* Literal.String.Affix */\n",
".output_html .sb { color: #BA2121 } /* Literal.String.Backtick */\n",
".output_html .sc { color: #BA2121 } /* Literal.String.Char */\n",
".output_html .dl { color: #BA2121 } /* Literal.String.Delimiter */\n",
".output_html .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */\n",
".output_html .s2 { color: #BA2121 } /* Literal.String.Double */\n",
".output_html .se { color: #BB6622; font-weight: bold } /* Literal.String.Escape */\n",
".output_html .sh { color: #BA2121 } /* Literal.String.Heredoc */\n",
".output_html .si { color: #BB6688; font-weight: bold } /* Literal.String.Interpol */\n",
".output_html .sx { color: #008000 } /* Literal.String.Other */\n",
".output_html .sr { color: #BB6688 } /* Literal.String.Regex */\n",
".output_html .s1 { color: #BA2121 } /* Literal.String.Single */\n",
".output_html .ss { color: #19177C } /* Literal.String.Symbol */\n",
".output_html .bp { color: #008000 } /* Name.Builtin.Pseudo */\n",
".output_html .fm { color: #0000FF } /* Name.Function.Magic */\n",
".output_html .vc { color: #19177C } /* Name.Variable.Class */\n",
".output_html .vg { color: #19177C } /* Name.Variable.Global */\n",
".output_html .vi { color: #19177C } /* Name.Variable.Instance */\n",
".output_html .vm { color: #19177C } /* Name.Variable.Magic */\n",
".output_html .il { color: #666666 } /* Literal.Number.Integer.Long */</style><div class=\"highlight\"><pre><span></span><span class=\"p\">{</span>\n",
" <span class=\"nt\">&quot;@context&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;http://senpy.gsi.upm.es/api/contexts/YXBpL2V2YWx1YXRlLz9hbGdvPXNlbnRpbWVudC12YWRlciZkYXRhc2V0PXZhZGVyJTJDc3RzJm91dGZvcm1hdD1qc29uLWxkIw%3D%3D&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;@type&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;AggregatedEvaluation&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;senpy:evaluations&quot;</span><span class=\"p\">:</span> <span class=\"p\">[</span>\n",
" <span class=\"p\">{</span>\n",
" <span class=\"nt\">&quot;@type&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;Evaluation&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;evaluates&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;endpoint:plugins/sentiment-vader_0.1.1__vader&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;evaluatesOn&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;vader&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;metrics&quot;</span><span class=\"p\">:</span> <span class=\"p\">[</span>\n",
" <span class=\"p\">{</span>\n",
" <span class=\"nt\">&quot;@type&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;Accuracy&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;value&quot;</span><span class=\"p\">:</span> <span class=\"mf\">0.6907142857142857</span>\n",
" <span class=\"p\">},</span>\n",
" <span class=\"p\">{</span>\n",
" <span class=\"nt\">&quot;@type&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;Precision_macro&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;value&quot;</span><span class=\"p\">:</span> <span class=\"mf\">0.34535714285714286</span>\n",
" <span class=\"p\">},</span>\n",
" <span class=\"p\">{</span>\n",
" <span class=\"nt\">&quot;@type&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;Recall_macro&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;value&quot;</span><span class=\"p\">:</span> <span class=\"mf\">0.5</span>\n",
" <span class=\"p\">},</span>\n",
" <span class=\"p\">{</span>\n",
" <span class=\"nt\">&quot;@type&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;F1_macro&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;value&quot;</span><span class=\"p\">:</span> <span class=\"mf\">0.40853400929446554</span>\n",
" <span class=\"p\">},</span>\n",
" <span class=\"p\">{</span>\n",
" <span class=\"nt\">&quot;@type&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;F1_weighted&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;value&quot;</span><span class=\"p\">:</span> <span class=\"mf\">0.5643605528396403</span>\n",
" <span class=\"p\">},</span>\n",
" <span class=\"p\">{</span>\n",
" <span class=\"nt\">&quot;@type&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;F1_micro&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;value&quot;</span><span class=\"p\">:</span> <span class=\"mf\">0.6907142857142857</span>\n",
" <span class=\"p\">},</span>\n",
" <span class=\"p\">{</span>\n",
" <span class=\"nt\">&quot;@type&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;F1_macro&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;value&quot;</span><span class=\"p\">:</span> <span class=\"mf\">0.40853400929446554</span>\n",
" <span class=\"p\">}</span>\n",
" <span class=\"p\">]</span>\n",
" <span class=\"p\">},</span>\n",
" <span class=\"p\">{</span>\n",
" <span class=\"nt\">&quot;@type&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;Evaluation&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;evaluates&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;endpoint:plugins/sentiment-vader_0.1.1__sts&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;evaluatesOn&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;sts&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;metrics&quot;</span><span class=\"p\">:</span> <span class=\"p\">[</span>\n",
" <span class=\"p\">{</span>\n",
" <span class=\"nt\">&quot;@type&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;Accuracy&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;value&quot;</span><span class=\"p\">:</span> <span class=\"mf\">0.3107177974434612</span>\n",
" <span class=\"p\">},</span>\n",
" <span class=\"p\">{</span>\n",
" <span class=\"nt\">&quot;@type&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;Precision_macro&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;value&quot;</span><span class=\"p\">:</span> <span class=\"mf\">0.1553588987217306</span>\n",
" <span class=\"p\">},</span>\n",
" <span class=\"p\">{</span>\n",
" <span class=\"nt\">&quot;@type&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;Recall_macro&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;value&quot;</span><span class=\"p\">:</span> <span class=\"mf\">0.5</span>\n",
" <span class=\"p\">},</span>\n",
" <span class=\"p\">{</span>\n",
" <span class=\"nt\">&quot;@type&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;F1_macro&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;value&quot;</span><span class=\"p\">:</span> <span class=\"mf\">0.23705926481620407</span>\n",
" <span class=\"p\">},</span>\n",
" <span class=\"p\">{</span>\n",
" <span class=\"nt\">&quot;@type&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;F1_weighted&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;value&quot;</span><span class=\"p\">:</span> <span class=\"mf\">0.14731706525451424</span>\n",
" <span class=\"p\">},</span>\n",
" <span class=\"p\">{</span>\n",
" <span class=\"nt\">&quot;@type&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;F1_micro&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;value&quot;</span><span class=\"p\">:</span> <span class=\"mf\">0.3107177974434612</span>\n",
" <span class=\"p\">},</span>\n",
" <span class=\"p\">{</span>\n",
" <span class=\"nt\">&quot;@type&quot;</span><span class=\"p\">:</span> <span class=\"s2\">&quot;F1_macro&quot;</span><span class=\"p\">,</span>\n",
" <span class=\"nt\">&quot;value&quot;</span><span class=\"p\">:</span> <span class=\"mf\">0.23705926481620407</span>\n",
" <span class=\"p\">}</span>\n",
" <span class=\"p\">]</span>\n",
" <span class=\"p\">}</span>\n",
" <span class=\"p\">]</span>\n",
"<span class=\"p\">}</span>\n",
"</pre></div>\n"
],
"text/latex": [
"\\begin{Verbatim}[commandchars=\\\\\\{\\}]\n",
"\\PY{p}{\\PYZob{}}\n",
" \\PY{n+nt}{\\PYZdq{}@context\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}http://senpy.gsi.upm.es/api/contexts/YXBpL2V2YWx1YXRlLz9hbGdvPXNlbnRpbWVudC12YWRlciZkYXRhc2V0PXZhZGVyJTJDc3RzJm91dGZvcm1hdD1qc29uLWxkIw\\PYZpc{}3D\\PYZpc{}3D\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}AggregatedEvaluation\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}senpy:evaluations\\PYZdq{}}\\PY{p}{:} \\PY{p}{[}\n",
" \\PY{p}{\\PYZob{}}\n",
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Evaluation\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}evaluates\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}endpoint:plugins/sentiment\\PYZhy{}vader\\PYZus{}0.1.1\\PYZus{}\\PYZus{}vader\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}evaluatesOn\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}vader\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}metrics\\PYZdq{}}\\PY{p}{:} \\PY{p}{[}\n",
" \\PY{p}{\\PYZob{}}\n",
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Accuracy\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.6907142857142857}\n",
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
" \\PY{p}{\\PYZob{}}\n",
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Precision\\PYZus{}macro\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.34535714285714286}\n",
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
" \\PY{p}{\\PYZob{}}\n",
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Recall\\PYZus{}macro\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.5}\n",
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
" \\PY{p}{\\PYZob{}}\n",
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}F1\\PYZus{}macro\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.40853400929446554}\n",
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
" \\PY{p}{\\PYZob{}}\n",
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}F1\\PYZus{}weighted\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.5643605528396403}\n",
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
" \\PY{p}{\\PYZob{}}\n",
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}F1\\PYZus{}micro\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.6907142857142857}\n",
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
" \\PY{p}{\\PYZob{}}\n",
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}F1\\PYZus{}macro\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.40853400929446554}\n",
" \\PY{p}{\\PYZcb{}}\n",
" \\PY{p}{]}\n",
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
" \\PY{p}{\\PYZob{}}\n",
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Evaluation\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}evaluates\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}endpoint:plugins/sentiment\\PYZhy{}vader\\PYZus{}0.1.1\\PYZus{}\\PYZus{}sts\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}evaluatesOn\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}sts\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}metrics\\PYZdq{}}\\PY{p}{:} \\PY{p}{[}\n",
" \\PY{p}{\\PYZob{}}\n",
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Accuracy\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.3107177974434612}\n",
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
" \\PY{p}{\\PYZob{}}\n",
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Precision\\PYZus{}macro\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.1553588987217306}\n",
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
" \\PY{p}{\\PYZob{}}\n",
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Recall\\PYZus{}macro\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.5}\n",
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
" \\PY{p}{\\PYZob{}}\n",
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}F1\\PYZus{}macro\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.23705926481620407}\n",
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
" \\PY{p}{\\PYZob{}}\n",
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}F1\\PYZus{}weighted\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.14731706525451424}\n",
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
" \\PY{p}{\\PYZob{}}\n",
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}F1\\PYZus{}micro\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.3107177974434612}\n",
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
" \\PY{p}{\\PYZob{}}\n",
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}F1\\PYZus{}macro\\PYZdq{}}\\PY{p}{,}\n",
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.23705926481620407}\n",
" \\PY{p}{\\PYZcb{}}\n",
" \\PY{p}{]}\n",
" \\PY{p}{\\PYZcb{}}\n",
" \\PY{p}{]}\n",
"\\PY{p}{\\PYZcb{}}\n",
"\\end{Verbatim}\n"
],
"text/plain": [
"{\n",
" \"@context\": \"http://senpy.gsi.upm.es/api/contexts/YXBpL2V2YWx1YXRlLz9hbGdvPXNlbnRpbWVudC12YWRlciZkYXRhc2V0PXZhZGVyJTJDc3RzJm91dGZvcm1hdD1qc29uLWxkIw%3D%3D\",\n",
" \"@type\": \"AggregatedEvaluation\",\n",
" \"senpy:evaluations\": [\n",
" {\n",
" \"@type\": \"Evaluation\",\n",
" \"evaluates\": \"endpoint:plugins/sentiment-vader_0.1.1__vader\",\n",
" \"evaluatesOn\": \"vader\",\n",
" \"metrics\": [\n",
" {\n",
" \"@type\": \"Accuracy\",\n",
" \"value\": 0.6907142857142857\n",
" },\n",
" {\n",
" \"@type\": \"Precision_macro\",\n",
" \"value\": 0.34535714285714286\n",
" },\n",
" {\n",
" \"@type\": \"Recall_macro\",\n",
" \"value\": 0.5\n",
" },\n",
" {\n",
" \"@type\": \"F1_macro\",\n",
" \"value\": 0.40853400929446554\n",
" },\n",
" {\n",
" \"@type\": \"F1_weighted\",\n",
" \"value\": 0.5643605528396403\n",
" },\n",
" {\n",
" \"@type\": \"F1_micro\",\n",
" \"value\": 0.6907142857142857\n",
" },\n",
" {\n",
" \"@type\": \"F1_macro\",\n",
" \"value\": 0.40853400929446554\n",
" }\n",
" ]\n",
" },\n",
" {\n",
" \"@type\": \"Evaluation\",\n",
" \"evaluates\": \"endpoint:plugins/sentiment-vader_0.1.1__sts\",\n",
" \"evaluatesOn\": \"sts\",\n",
" \"metrics\": [\n",
" {\n",
" \"@type\": \"Accuracy\",\n",
" \"value\": 0.3107177974434612\n",
" },\n",
" {\n",
" \"@type\": \"Precision_macro\",\n",
" \"value\": 0.1553588987217306\n",
" },\n",
" {\n",
" \"@type\": \"Recall_macro\",\n",
" \"value\": 0.5\n",
" },\n",
" {\n",
" \"@type\": \"F1_macro\",\n",
" \"value\": 0.23705926481620407\n",
" },\n",
" {\n",
" \"@type\": \"F1_weighted\",\n",
" \"value\": 0.14731706525451424\n",
" },\n",
" {\n",
" \"@type\": \"F1_micro\",\n",
" \"value\": 0.3107177974434612\n",
" },\n",
" {\n",
" \"@type\": \"F1_macro\",\n",
" \"value\": 0.23705926481620407\n",
" }\n",
" ]\n",
" }\n",
" ]\n",
"}"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import requests\n",
"from IPython.display import Code\n",
"\n",
"endpoint = 'http://senpy.gsi.upm.es/api'\n",
"res = requests.get(f'{endpoint}/evaluate',\n",
" params={\"algo\": \"sentiment-vader\",\n",
" \"dataset\": \"vader,sts\",\n",
" 'outformat': 'json-ld'\n",
" })\n",
"Code(res.text, language='json')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Programmatically (expert)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"A third option is to evaluate plugins manually without launching the server.\n",
"\n",
"This option is particularly interesting for advanced users that want faster iterations and evaluation results, and for automation.\n",
"\n",
"We would first need an instance of a plugin.\n",
"In this example we will use the Sentiment140 plugin that is included in every senpy installation:"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {},
"outputs": [],
"source": [
"from senpy.plugins.sentiment import sentiment140_plugin"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {},
"outputs": [],
"source": [
"s140 = sentiment140_plugin.Sentiment140()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Then, we need to know what datasets are available.\n",
"We can list all datasets and basic stats (e.g., number of instances and labels used) like this:"
]
},
{
"cell_type": "code",
"execution_count": 32,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"vader {'instances': 4200, 'labels': [1, -1]}\n",
"sts {'instances': 4200, 'labels': [1, -1]}\n",
"imdb_unsup {'instances': 50000, 'labels': [1, -1]}\n",
"imdb {'instances': 50000, 'labels': [1, -1]}\n",
"sst {'instances': 11855, 'labels': [1, -1]}\n",
"multidomain {'instances': 38548, 'labels': [1, -1]}\n",
"sentiment140 {'instances': 1600000, 'labels': [1, -1]}\n",
"semeval07 {'instances': 'None', 'labels': [1, -1]}\n",
"semeval14 {'instances': 7838, 'labels': [1, -1]}\n",
"pl04 {'instances': 4000, 'labels': [1, -1]}\n",
"pl05 {'instances': 10662, 'labels': [1, -1]}\n",
"semeval13 {'instances': 6259, 'labels': [1, -1]}\n"
]
}
],
"source": [
"from senpy.gsitk_compat import datasets\n",
"for k, d in datasets.items():\n",
" print(k, d['stats'])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now, we will evaluate our plugin in one of the smallest datasets, `sts`:"
]
},
{
"cell_type": "code",
"execution_count": 37,
"metadata": {
"scrolled": false
},
"outputs": [
{
"data": {
"text/plain": [
"[{\n",
" \"@type\": \"Evaluation\",\n",
" \"evaluates\": \"endpoint:plugins/sentiment140_0.2\",\n",
" \"evaluatesOn\": \"sts\",\n",
" \"metrics\": [\n",
" {\n",
" \"@type\": \"Accuracy\",\n",
" \"value\": 0.872173058013766\n",
" },\n",
" {\n",
" \"@type\": \"Precision_macro\",\n",
" \"value\": 0.9035254323131467\n",
" },\n",
" {\n",
" \"@type\": \"Recall_macro\",\n",
" \"value\": 0.8021249029415483\n",
" },\n",
" {\n",
" \"@type\": \"F1_macro\",\n",
" \"value\": 0.8320673712021136\n",
" },\n",
" {\n",
" \"@type\": \"F1_weighted\",\n",
" \"value\": 0.8631351567604358\n",
" },\n",
" {\n",
" \"@type\": \"F1_micro\",\n",
" \"value\": 0.872173058013766\n",
" },\n",
" {\n",
" \"@type\": \"F1_macro\",\n",
" \"value\": 0.8320673712021136\n",
" }\n",
" ]\n",
" }]"
]
},
"execution_count": 37,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"s140.evaluate(['sts', ])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"anaconda-cloud": {},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.3"
},
"toc": {
"colors": {
"hover_highlight": "#DAA520",
"running_highlight": "#FF0000",
"selected_highlight": "#FFD700"
},
"moveMenuLeft": true,
"nav_menu": {
"height": "68px",
"width": "252px"
},
"navigate_menu": true,
"number_sections": true,
"sideBar": true,
"threshold": 4,
"toc_cell": false,
"toc_section_display": "block",
"toc_window_display": false
}
},
"nbformat": 4,
"nbformat_minor": 1
}

View File

@@ -24,7 +24,6 @@ I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
help: help:
@echo "Please use \`make <target>' where <target> is one of" @echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files" @echo " html to make standalone HTML files"
@echo " entr to watch for changes and continuously make HTML files"
@echo " dirhtml to make HTML files named index.html in directories" @echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file" @echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files" @echo " pickle to make pickle files"
@@ -50,9 +49,6 @@ help:
clean: clean:
rm -rf $(BUILDDIR)/* rm -rf $(BUILDDIR)/*
entr:
while true; do ag -g rst | entr -d make html; done
html: html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo @echo

File diff suppressed because it is too large Load Diff

317
docs/SenpyClientUse.ipynb Normal file
View File

@@ -0,0 +1,317 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:05:31.465571Z",
"start_time": "2017-04-10T19:05:31.458282+02:00"
},
"deletable": true,
"editable": true
},
"source": [
"# Client"
]
},
{
"cell_type": "markdown",
"metadata": {
"collapsed": true,
"deletable": true,
"editable": true
},
"source": [
"The built-in senpy client allows you to query any Senpy endpoint. We will illustrate how to use it with the public demo endpoint, and then show you how to spin up your own endpoint using docker."
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"Demo Endpoint\n",
"-------------"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"To start using senpy, simply create a new Client and point it to your endpoint. In this case, the latest version of Senpy at GSI."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:29:12.827640Z",
"start_time": "2017-04-10T19:29:12.818617+02:00"
},
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"from senpy.client import Client\n",
"\n",
"c = Client('http://latest.senpy.cluster.gsi.dit.upm.es/api')\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"Now, let's use that client analyse some queries:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:29:14.011657Z",
"start_time": "2017-04-10T19:29:13.701808+02:00"
},
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"r = c.analyse('I like sugar!!', algorithm='sentiment140')\n",
"r"
]
},
{
"cell_type": "markdown",
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:08:19.616754Z",
"start_time": "2017-04-10T19:08:19.610767+02:00"
},
"deletable": true,
"editable": true
},
"source": [
"As you can see, that gave us the full JSON result. A more concise way to print it would be:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:29:14.854213Z",
"start_time": "2017-04-10T19:29:14.842068+02:00"
},
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"for entry in r.entries:\n",
" print('{} -> {}'.format(entry['text'], entry['sentiments'][0]['marl:hasPolarity']))"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"We can also obtain a list of available plugins with the client:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:29:16.245198Z",
"start_time": "2017-04-10T19:29:16.056545+02:00"
},
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"c.plugins()"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"Or, more concisely:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:29:17.663275Z",
"start_time": "2017-04-10T19:29:17.484623+02:00"
},
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"c.plugins().keys()"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"Local Endpoint\n",
"--------------"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"To run your own instance of senpy, just create a docker container with the latest Senpy image. Using `--default-plugins` you will get some extra plugins to start playing with the API."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:29:20.637539Z",
"start_time": "2017-04-10T19:29:19.938322+02:00"
},
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"!docker run -ti --name 'SenpyEndpoint' -d -p 6000:5000 gsiupm/senpy:0.8.6 --host 0.0.0.0 --default-plugins"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"To use this endpoint:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:29:21.263976Z",
"start_time": "2017-04-10T19:29:21.260595+02:00"
},
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"c_local = Client('http://127.0.0.1:6000/api')"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"That's all! After you are done with your analysis, stop the docker container:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:29:33.226686Z",
"start_time": "2017-04-10T19:29:22.392121+02:00"
},
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"!docker stop SenpyEndpoint\n",
"!docker rm SenpyEndpoint"
]
}
],
"metadata": {
"anaconda-cloud": {},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.0"
},
"toc": {
"colors": {
"hover_highlight": "#DAA520",
"running_highlight": "#FF0000",
"selected_highlight": "#FFD700"
},
"moveMenuLeft": true,
"nav_menu": {
"height": "68px",
"width": "252px"
},
"navigate_menu": true,
"number_sections": true,
"sideBar": true,
"threshold": 4,
"toc_cell": false,
"toc_section_display": "block",
"toc_window_display": false
}
},
"nbformat": 4,
"nbformat_minor": 1
}

106
docs/SenpyClientUse.rst Normal file
View File

@@ -0,0 +1,106 @@
Client
======
Demo Endpoint
-------------
Import Client and send a request
.. code:: python
from senpy.client import Client
c = Client('http://latest.senpy.cluster.gsi.dit.upm.es/api')
r = c.analyse('I like Pizza', algorithm='sentiment140')
Print response
.. code:: python
for entry in r.entries:
print('{} -> {}'.format(entry['text'], entry['sentiments'][0]['marl:hasPolarity']))
.. parsed-literal::
I like Pizza -> marl:Positive
Obtain a list of available plugins
.. code:: python
for plugin in c.request('/plugins')['plugins']:
print(plugin['name'])
.. parsed-literal::
emoRand
rand
sentiment140
Local Endpoint
--------------
Run a docker container with Senpy image and default plugins
.. code::
docker run -ti --name 'SenpyEndpoint' -d -p 5000:5000 gsiupm/senpy:0.8.6 --host 0.0.0.0 --default-plugins
.. parsed-literal::
a0157cd98057072388bfebeed78a830da7cf0a796f4f1a3fd9188f9f2e5fe562
Import client and send a request to localhost
.. code:: python
c_local = Client('http://127.0.0.1:5000/api')
r = c_local.analyse('Hello world', algorithm='sentiment140')
Print response
.. code:: python
for entry in r.entries:
print('{} -> {}'.format(entry['text'], entry['sentiments'][0]['marl:hasPolarity']))
.. parsed-literal::
Hello world -> marl:Neutral
Obtain a list of available plugins deployed locally
.. code:: python
c_local.plugins().keys()
.. parsed-literal::
rand
sentiment140
emoRand
Stop the docker container
.. code:: python
!docker stop SenpyEndpoint
!docker rm SenpyEndpoint
.. parsed-literal::
SenpyEndpoint
SenpyEndpoint

11
docs/about.rst Normal file
View File

@@ -0,0 +1,11 @@
About
--------
If you use Senpy in your research, please cite `Senpy: A Pragmatic Linked Sentiment Analysis Framework <http://gsi.dit.upm.es/index.php/es/investigacion/publicaciones?view=publication&task=show&id=417>`__ (`BibTex <http://gsi.dit.upm.es/index.php/es/investigacion/publicaciones?controller=publications&task=export&format=bibtex&id=417>`__):
.. code-block:: text
Sánchez-Rada, J. F., Iglesias, C. A., Corcuera, I., & Araque, Ó. (2016, October).
Senpy: A Pragmatic Linked Sentiment Analysis Framework.
In Data Science and Advanced Analytics (DSAA),
2016 IEEE International Conference on (pp. 735-742). IEEE.

View File

@@ -25,7 +25,7 @@ NIF API
"@context":"http://127.0.0.1/api/contexts/Results.jsonld", "@context":"http://127.0.0.1/api/contexts/Results.jsonld",
"@id":"_:Results_11241245.22", "@id":"_:Results_11241245.22",
"@type":"results" "@type":"results"
"activities": [ "analysis": [
"plugins/sentiment-140_0.1" "plugins/sentiment-140_0.1"
], ],
"entries": [ "entries": [
@@ -73,7 +73,7 @@ NIF API
.. http:get:: /api/plugins .. http:get:: /api/plugins
Returns a list of installed plugins. Returns a list of installed plugins.
**Example request and response**: **Example request**:
.. sourcecode:: http .. sourcecode:: http
@@ -82,6 +82,10 @@ NIF API
Accept: application/json, text/javascript Accept: application/json, text/javascript
**Example response**:
.. sourcecode:: http
{ {
"@id": "plugins/sentiment-140_0.1", "@id": "plugins/sentiment-140_0.1",
"@type": "sentimentPlugin", "@type": "sentimentPlugin",
@@ -139,14 +143,19 @@ NIF API
.. http:get:: /api/plugins/<pluginname> .. http:get:: /api/plugins/<pluginname>
Returns the information of a specific plugin. Returns the information of a specific plugin.
**Example request and response**: **Example request**:
.. sourcecode:: http .. sourcecode:: http
GET /api/plugins/sentiment-random/ HTTP/1.1 GET /api/plugins/rand/ HTTP/1.1
Host: localhost Host: localhost
Accept: application/json, text/javascript Accept: application/json, text/javascript
**Example response**:
.. sourcecode:: http
{ {
"@context": "http://127.0.0.1/api/contexts/ExamplePlugin.jsonld", "@context": "http://127.0.0.1/api/contexts/ExamplePlugin.jsonld",
"@id": "plugins/ExamplePlugin_0.1", "@id": "plugins/ExamplePlugin_0.1",

View File

@@ -1,6 +1,5 @@
API and vocabularies API and Examples
#################### ################
.. toctree:: .. toctree::
vocabularies.rst vocabularies.rst

View File

@@ -2,7 +2,7 @@
"@context": "http://mixedemotions-project.eu/ns/context.jsonld", "@context": "http://mixedemotions-project.eu/ns/context.jsonld",
"@id": "me:Result1", "@id": "me:Result1",
"@type": "results", "@type": "results",
"activities": [ "analysis": [
"me:SAnalysis1", "me:SAnalysis1",
"me:SgAnalysis1", "me:SgAnalysis1",
"me:EmotionAnalysis1", "me:EmotionAnalysis1",

View File

@@ -2,13 +2,17 @@
"@context": "http://mixedemotions-project.eu/ns/context.jsonld", "@context": "http://mixedemotions-project.eu/ns/context.jsonld",
"@id": "http://example.com#NIFExample", "@id": "http://example.com#NIFExample",
"@type": "results", "@type": "results",
"activities": [ "analysis": [
], ],
"entries": [ "entries": [
{ {
"@type": [
"nif:RFC5147String",
"nif:Context"
],
"nif:beginIndex": 0, "nif:beginIndex": 0,
"nif:endIndex": 40, "nif:endIndex": 40,
"text": "An entry should have a nif:isString key" "nif:isString": "My favourite actress is Natalie Portman"
} }
] ]
} }

9
docs/commandline.rst Normal file
View File

@@ -0,0 +1,9 @@
Command line
============
This video shows how to analyse text directly on the command line using the senpy tool.
.. image:: https://asciinema.org/a/9uwef1ghkjk062cw2t4mhzpyk.png
:width: 100%
:target: https://asciinema.org/a/9uwef1ghkjk062cw2t4mhzpyk
:alt: CLI demo

View File

@@ -38,8 +38,6 @@ extensions = [
'sphinxcontrib.httpdomain', 'sphinxcontrib.httpdomain',
'sphinx.ext.coverage', 'sphinx.ext.coverage',
'sphinx.ext.autosectionlabel', 'sphinx.ext.autosectionlabel',
'nbsphinx',
'sphinx.ext.mathjax',
] ]
# Add any paths that contain templates here, relative to this directory. # Add any paths that contain templates here, relative to this directory.
@@ -56,7 +54,7 @@ master_doc = 'index'
# General information about the project. # General information about the project.
project = u'Senpy' project = u'Senpy'
copyright = u'2019, J. Fernando Sánchez' copyright = u'2016, J. Fernando Sánchez'
description = u'A framework for sentiment and emotion analysis services' description = u'A framework for sentiment and emotion analysis services'
# The version info for the project you're documenting, acts as replacement for # The version info for the project you're documenting, acts as replacement for
@@ -81,9 +79,7 @@ language = None
# List of patterns, relative to source directory, that match files and # List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files. # directories to ignore when looking for source files.
exclude_patterns = ['_build', '**.ipynb_checkpoints'] exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all # The reST default role (used for this markup: `text`) to use for all
# documents. # documents.
@@ -130,7 +126,6 @@ html_theme_options = {
'github_user': 'gsi-upm', 'github_user': 'gsi-upm',
'github_repo': 'senpy', 'github_repo': 'senpy',
'github_banner': True, 'github_banner': True,
'sidebar_collapse': True,
} }
@@ -291,12 +286,3 @@ texinfo_documents = [
# If true, do not generate a @detailmenu in the "Top" node's menu. # If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False #texinfo_no_detailmenu = False
nbsphinx_prolog = """
.. note:: This is an `auto-generated <https://nbsphinx.readthedocs.io>`_ static view of a Jupyter notebook.
To run the code examples in your computer, you may download the original notebook from the repository: https://github.com/gsi-upm/senpy/tree/master/docs/{{ env.doc2path(env.docname, base=None) }}
----
"""

View File

@@ -1,152 +1,93 @@
Automatic Model Conversion Conversion
-------------------------- ----------
Senpy includes support for emotion and sentiment conversion. Senpy includes experimental support for emotion/sentiment conversion plugins.
When a user requests a specific model, senpy will choose a strategy to convert the model that the service usually outputs and the model requested by the user.
Out of the box, senpy can convert from the `emotionml:pad` (pleasure-arousal-dominance) dimensional model to `emoml:big6` (Ekman's big-6) categories, and vice versa.
This specific conversion uses a series of dimensional centroids (`emotionml:pad`) for each emotion category (`emotionml:big6`).
A dimensional value is converted to a category by looking for the nearest centroid.
The centroids are calculated according to this article:
.. code-block:: text
Kim, S. M., Valitutti, A., & Calvo, R. A. (2010, June).
Evaluation of unsupervised emotion models to textual affect recognition.
In Proceedings of the NAACL HLT 2010 Workshop on Computational Approaches to Analysis and Generation of Emotion in Text (pp. 62-70).
Association for Computational Linguistics.
It is possible to add new conversion strategies by `Developing a conversion plugin`_.
Use Use
=== ===
Consider the following query to an emotion service: http://senpy.gsi.upm.es/api/emotion-anew?i=good Consider the original query: http://127.0.0.1:5000/api/?i=hello&algo=emoRand
The requested plugin (emotion-random) returns emotions using the VAD space (FSRE dimensions in EmotionML): The requested plugin (emoRand) returns emotions using Ekman's model (or big6 in EmotionML):
.. code:: json .. code:: json
[ ... rest of the document ...
{ {
"@type": "EmotionSet", "@type": "emotionSet",
"onyx:hasEmotion": [ "onyx:hasEmotion": {
{ "@type": "emotion",
"@type": "Emotion", "onyx:hasEmotionCategory": "emoml:big6anger"
"emoml:pad-dimensions_arousal": 5.43, },
"emoml:pad-dimensions_dominance": 6.41, "prov:wasGeneratedBy": "plugins/emoRand_0.1"
"emoml:pad-dimensions_pleasure": 7.47,
"prov:wasGeneratedBy": "prefix:Analysis_1562744784.8789825"
} }
],
"prov:wasGeneratedBy": "prefix:Analysis_1562744784.8789825"
}
]
To get these emotions in VAD space (FSRE dimensions in EmotionML), we'd do this:
To get the equivalent of these emotions in Ekman's categories (i.e., Ekman's Big 6 in EmotionML), we'd do this: http://127.0.0.1:5000/api/?i=hello&algo=emoRand&emotionModel=emoml:fsre-dimensions
http://senpy.gsi.upm.es/api/emotion-anew?i=good&emotion-model=emoml:big6
This call, provided there is a valid conversion plugin from Ekman's to VAD, would return something like this: This call, provided there is a valid conversion plugin from Ekman's to VAD, would return something like this:
.. code:: json .. code:: json
[
... rest of the document ...
{ {
"@type": "EmotionSet", "@type": "emotionSet",
"onyx:hasEmotion": [ "onyx:hasEmotion": {
{ "@type": "emotion",
"@type": "Emotion", "onyx:hasEmotionCategory": "emoml:big6anger"
"onyx:algorithmConfidence": 4.4979,
"onyx:hasEmotionCategory": "emoml:big6happiness"
}
],
"prov:wasDerivedFrom": {
"@id": "Emotions0",
"@type": "EmotionSet",
"onyx:hasEmotion": [
{
"@id": "Emotion0",
"@type": "Emotion",
"emoml:pad-dimensions_arousal": 5.43,
"emoml:pad-dimensions_dominance": 6.41,
"emoml:pad-dimensions_pleasure": 7.47,
"prov:wasGeneratedBy": "prefix:Analysis_1562745220.1553965"
}
],
"prov:wasGeneratedBy": "prefix:Analysis_1562745220.1553965"
}, },
"prov:wasGeneratedBy": "prefix:Analysis_1562745220.1570725" "prov:wasGeneratedBy": "plugins/emoRand_0.1"
}, {
"@type": "emotionSet",
"onyx:hasEmotion": {
"@type": "emotion",
"A": 7.22,
"D": 6.28,
"V": 8.6
},
"prov:wasGeneratedBy": "plugins/Ekman2VAD_0.1"
} }
]
That is called a *full* response, as it simply adds the converted emotion alongside. That is called a *full* response, as it simply adds the converted emotion alongside.
It is also possible to get the original emotion nested within the new converted emotion, using the `conversion=nested` parameter: It is also possible to get the original emotion nested within the new converted emotion, using the `conversion=nested` parameter:
http://senpy.gsi.upm.es/api/emotion-anew?i=good&emotion-model=emoml:big6&conversion=nested
.. code:: json .. code:: json
[
{
"@type": "EmotionSet",
"onyx:hasEmotion": [
{
"@type": "Emotion",
"onyx:algorithmConfidence": 4.4979,
"onyx:hasEmotionCategory": "emoml:big6happiness"
}
],
"prov:wasDerivedFrom": {
"@id": "Emotions0",
"@type": "EmotionSet",
"onyx:hasEmotion": [
{
"@id": "Emotion0",
"@type": "Emotion",
"emoml:pad-dimensions_arousal": 5.43,
"emoml:pad-dimensions_dominance": 6.41,
"emoml:pad-dimensions_pleasure": 7.47,
"prov:wasGeneratedBy": "prefix:Analysis_1562744962.896306"
}
],
"prov:wasGeneratedBy": "prefix:Analysis_1562744962.896306"
},
"prov:wasGeneratedBy": "prefix:Analysis_1562744962.8978968"
}
]
... rest of the document ...
{
"@type": "emotionSet",
"onyx:hasEmotion": {
"@type": "emotion",
"onyx:hasEmotionCategory": "emoml:big6anger"
},
"prov:wasGeneratedBy": "plugins/emoRand_0.1"
"onyx:wasDerivedFrom": {
"@type": "emotionSet",
"onyx:hasEmotion": {
"@type": "emotion",
"A": 7.22,
"D": 6.28,
"V": 8.6
},
"prov:wasGeneratedBy": "plugins/Ekman2VAD_0.1"
}
}
Lastly, `conversion=filtered` would only return the converted emotions. Lastly, `conversion=filtered` would only return the converted emotions.
.. code:: json
[
{
"@type": "EmotionSet",
"onyx:hasEmotion": [
{
"@type": "Emotion",
"onyx:algorithmConfidence": 4.4979,
"onyx:hasEmotionCategory": "emoml:big6happiness"
}
],
"prov:wasGeneratedBy": "prefix:Analysis_1562744925.7322266"
}
]
Developing a conversion plugin Developing a conversion plugin
============================== ================================
Conversion plugins are discovered by the server just like any other plugin. Conversion plugins are discovered by the server just like any other plugin.
The difference is the slightly different API, and the need to specify the `source` and `target` of the conversion. The difference is the slightly different API, and the need to specify the `source` and `target` of the conversion.
@@ -165,6 +106,7 @@ For instance, an emotion conversion plugin needs the following:
.. code:: python .. code:: python
@@ -172,6 +114,3 @@ For instance, an emotion conversion plugin needs the following:
def convert(self, emotionSet, fromModel, toModel, params): def convert(self, emotionSet, fromModel, toModel, params):
pass pass
More implementation details are shown in the `centroids plugin <https://github.com/gsi-upm/senpy/blob/master/senpy/plugins/postprocessing/emotion/centroids.py>`_.

View File

@@ -1,13 +1,16 @@
Demo Demo
---- ----
There is a demo available on http://senpy.gsi.upm.es/, where you can test a live instance of Senpy, with several open source plugins. There is a demo available on http://senpy.cluster.gsi.dit.upm.es/, where you can test a serie of different plugins.
You can use the playground (a web interface) or the HTTP API. You can use the playground (a web interface) or make HTTP requests to the service API.
.. image:: playground-0.20.png .. image:: senpy-playground.png
:target: http://senpy.gsi.upm.es :height: 400px
:width: 800px :width: 800px
:scale: 100 %
:align: center :align: center
Plugins Demo
============
The source code and description of the plugins used in the demo are available here: https://github.com/gsi-upm/senpy-plugins-community/. The source code and description of the plugins used in the demo is available here: https://lab.cluster.gsi.dit.upm.es/senpy/senpy-plugins-community/.

View File

@@ -1,25 +0,0 @@
Developing new services
-----------------------
Developing web services can be hard.
A text analysis service must implement all the typical features, such as: extraction of parameters, validation, format conversion, visualization...
Senpy implements all the common blocks, so developers can focus on what really matters: great analysis algorithms that solve real problems.
Among other things, Senpy takes care of these tasks:
* Interfacing with the user: parameter validation, error handling.
* Formatting: JSON-LD, Turtle/n-triples input and output, or simple text input
* Linked Data: senpy results are semantically annotated, using a series of well established vocabularies, and sane default URIs.
* User interface: a web UI where users can explore your service and test different settings
* A client to interact with the service. Currently only available in Python.
You only need to provide the algorithm to turn a piece of text into an annotation
Sharing your sentiment analysis with the world has never been easier!
.. toctree::
:maxdepth: 1
server-cli
plugins-quickstart
plugins-faq
plugins-definition

Binary file not shown.

Before

Width:  |  Height:  |  Size: 77 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 76 KiB

View File

@@ -1,6 +1,5 @@
Examples Examples
-------- ------
All the examples in this page use the :download:`the main schema <_static/schemas/definitions.json>`. All the examples in this page use the :download:`the main schema <_static/schemas/definitions.json>`.
Simple NIF annotation Simple NIF annotation
@@ -18,7 +17,6 @@ Sentiment Analysis
..................... .....................
Description Description
,,,,,,,,,,, ,,,,,,,,,,,
This annotation corresponds to the sentiment analysis of an input. The example shows the sentiment represented according to Marl format. This annotation corresponds to the sentiment analysis of an input. The example shows the sentiment represented according to Marl format.
The sentiments detected are contained in the Sentiments array with their related part of the text. The sentiments detected are contained in the Sentiments array with their related part of the text.
@@ -26,7 +24,20 @@ Representation
,,,,,,,,,,,,,, ,,,,,,,,,,,,,,
.. literalinclude:: examples/results/example-sentiment.json .. literalinclude:: examples/results/example-sentiment.json
:emphasize-lines: 5-11,20-30 :emphasize-lines: 5-10,25-33
:language: json-ld
Suggestion Mining
.................
Description
,,,,,,,,,,,
The suggestions schema represented below shows the suggestions detected in the text. Within it, we can find the NIF fields highlighted that corresponds to the text of the detected suggestion.
Representation
,,,,,,,,,,,,,,
.. literalinclude:: examples/results/example-suggestion.json
:emphasize-lines: 5-8,22-27
:language: json-ld :language: json-ld
Emotion Analysis Emotion Analysis
@@ -40,6 +51,28 @@ Representation
.. literalinclude:: examples/results/example-emotion.json .. literalinclude:: examples/results/example-emotion.json
:language: json-ld :language: json-ld
:emphasize-lines: 5-11,22-36 :emphasize-lines: 5-8,25-37
Named Entity Recognition
........................
Description
,,,,,,,,,,,
The Named Entity Recognition is represented as follows. In this particular case, it can be seen within the entities array the entities recognised. For the example input, Microsoft and Windows Phone are the ones detected.
Representation
,,,,,,,,,,,,,,
.. literalinclude:: examples/results/example-ner.json
:emphasize-lines: 5-8,19-34
:language: json-ld
Complete example
................
Description
,,,,,,,,,,,
This example covers all of the above cases, integrating all the annotations in the same document.
Representation
,,,,,,,,,,,,,,
.. literalinclude:: examples/results/example-complete.json
:language: json-ld

View File

@@ -2,22 +2,11 @@
"@context": "http://mixedemotions-project.eu/ns/context.jsonld", "@context": "http://mixedemotions-project.eu/ns/context.jsonld",
"@id": "me:Result1", "@id": "me:Result1",
"@type": "results", "@type": "results",
"activities": [ "analysis": [
{ "me:SAnalysis1",
"@id": "_:SAnalysis1_Activity", "me:SgAnalysis1",
"@type": "marl:SentimentAnalysis", "me:EmotionAnalysis1",
"prov:wasAssociatedWith": "me:SAnalysis1" "me:NER1"
},
{
"@id": "_:EmotionAnalysis1_Activity",
"@type": "onyx:EmotionAnalysis",
"prov:wasAssociatedWith": "me:EmotionAnalysis1"
},
{
"@id": "_:NER1_Activity",
"@type": "me:NER",
"prov:wasAssociatedWith": "me:NER1"
}
], ],
"entries": [ "entries": [
{ {
@@ -34,7 +23,7 @@
"nif:endIndex": 13, "nif:endIndex": 13,
"nif:anchorOf": "Microsoft", "nif:anchorOf": "Microsoft",
"me:references": "http://dbpedia.org/page/Microsoft", "me:references": "http://dbpedia.org/page/Microsoft",
"prov:wasGeneratedBy": "_:NER1_Activity" "prov:wasGeneratedBy": "me:NER1"
}, },
{ {
"@id": "http://micro.blog/status1#char=25,37", "@id": "http://micro.blog/status1#char=25,37",
@@ -42,7 +31,7 @@
"nif:endIndex": 37, "nif:endIndex": 37,
"nif:anchorOf": "Windows Phone", "nif:anchorOf": "Windows Phone",
"me:references": "http://dbpedia.org/page/Windows_Phone", "me:references": "http://dbpedia.org/page/Windows_Phone",
"prov:wasGeneratedBy": "_:NER1_Activity" "prov:wasGeneratedBy": "me:NER1"
} }
], ],
"suggestions": [ "suggestions": [
@@ -51,7 +40,7 @@
"nif:beginIndex": 16, "nif:beginIndex": 16,
"nif:endIndex": 77, "nif:endIndex": 77,
"nif:anchorOf": "put your Windows Phone on your newest #open technology program", "nif:anchorOf": "put your Windows Phone on your newest #open technology program",
"prov:wasGeneratedBy": "_:SgAnalysis1_Activity" "prov:wasGeneratedBy": "me:SgAnalysis1"
} }
], ],
"sentiments": [ "sentiments": [
@@ -62,14 +51,14 @@
"nif:anchorOf": "You'll be awesome.", "nif:anchorOf": "You'll be awesome.",
"marl:hasPolarity": "marl:Positive", "marl:hasPolarity": "marl:Positive",
"marl:polarityValue": 0.9, "marl:polarityValue": 0.9,
"prov:wasGeneratedBy": "_:SgAnalysis1_Activity" "prov:wasGeneratedBy": "me:SAnalysis1"
} }
], ],
"emotions": [ "emotions": [
{ {
"@id": "http://micro.blog/status1#char=0,109", "@id": "http://micro.blog/status1#char=0,109",
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource", "nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
"prov:wasGeneratedBy": "_:EmotionAnalysis1_Activity", "prov:wasGeneratedBy": "me:EAnalysis1",
"onyx:hasEmotion": [ "onyx:hasEmotion": [
{ {
"onyx:hasEmotionCategory": "wna:liking" "onyx:hasEmotionCategory": "wna:liking"

View File

@@ -0,0 +1,78 @@
{
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
"@id": "me:Result1",
"@type": "results",
"analysis": [
"me:SAnalysis1",
"me:SgAnalysis1",
"me:EmotionAnalysis1",
"me:NER1",
{
"@type": "analysis",
"@id": "anonymous"
}
],
"entries": [
{
"@id": "http://micro.blog/status1",
"@type": [
"nif:RFC5147String",
"nif:Context"
],
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
"entities": [
{
"@id": "http://micro.blog/status1#char=5,13",
"nif:beginIndex": 5,
"nif:endIndex": 13,
"nif:anchorOf": "Microsoft",
"me:references": "http://dbpedia.org/page/Microsoft",
"prov:wasGeneratedBy": "me:NER1"
},
{
"@id": "http://micro.blog/status1#char=25,37",
"nif:beginIndex": 25,
"nif:endIndex": 37,
"nif:anchorOf": "Windows Phone",
"me:references": "http://dbpedia.org/page/Windows_Phone",
"prov:wasGeneratedBy": "me:NER1"
}
],
"suggestions": [
{
"@id": "http://micro.blog/status1#char=16,77",
"nif:beginIndex": 16,
"nif:endIndex": 77,
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
"prov:wasGeneratedBy": "me:SgAnalysis1"
}
],
"sentiments": [
{
"@id": "http://micro.blog/status1#char=80,97",
"nif:beginIndex": 80,
"nif:endIndex": 97,
"nif:anchorOf": "You'll be awesome.",
"marl:hasPolarity": "marl:Positive",
"marl:polarityValue": 0.9,
"prov:wasGeneratedBy": "me:SAnalysis1"
}
],
"emotions": [
{
"@id": "http://micro.blog/status1#char=0,109",
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
"prov:wasGeneratedBy": "me:EAnalysis1",
"onyx:hasEmotion": [
{
"onyx:hasEmotionCategory": "wna:liking"
},
{
"onyx:hasEmotionCategory": "wna:excitement"
}
]
}
]
}
]
}

View File

@@ -1,8 +1,9 @@
{ {
"@context": "http://mixedemotions-project.eu/ns/context.jsonld", "@context": "http://mixedemotions-project.eu/ns/context.jsonld",
"@id": "me:Result1", "@id": "http://example.com#NIFExample",
"@type": "results", "@type": "results",
"activities": [ ], "analysis": [
],
"entries": [ "entries": [
{ {
"@id": "http://example.org#char=0,40", "@id": "http://example.org#char=0,40",

View File

@@ -2,36 +2,24 @@
"@context": "http://mixedemotions-project.eu/ns/context.jsonld", "@context": "http://mixedemotions-project.eu/ns/context.jsonld",
"@id": "me:Result1", "@id": "me:Result1",
"@type": "results", "@type": "results",
"activities": [ "analysis": [
{ {
"@id": "_:SAnalysis1_Activity", "@id": "me:SAnalysis1",
"@type": "marl:SentimentAnalysis", "@type": "marl:SentimentAnalysis",
"prov:wasAssociatedWith": "me:SentimentAnalysis", "marl:maxPolarityValue": 1,
"prov:used": [ "marl:minPolarityValue": 0
{
"name": "marl:maxPolarityValue",
"prov:value": "1"
}, },
{ {
"name": "marl:minPolarityValue", "@id": "me:SgAnalysis1",
"prov:value": "0"
}
]
},
{
"@id": "_:SgAnalysis1_Activity",
"prov:wasAssociatedWith": "me:SgAnalysis1",
"@type": "me:SuggestionAnalysis" "@type": "me:SuggestionAnalysis"
}, },
{ {
"@id": "_:EmotionAnalysis1_Activity", "@id": "me:EmotionAnalysis1",
"@type": "me:EmotionAnalysis", "@type": "me:EmotionAnalysis"
"prov:wasAssociatedWith": "me:EmotionAnalysis1"
}, },
{ {
"@id": "_:NER1_Activity", "@id": "me:NER1",
"@type": "me:NER", "@type": "me:NER"
"prov:wasAssociatedWith": "me:EmotionNER1"
} }
], ],
"entries": [ "entries": [

View File

@@ -2,11 +2,10 @@
"@context": "http://mixedemotions-project.eu/ns/context.jsonld", "@context": "http://mixedemotions-project.eu/ns/context.jsonld",
"@id": "me:Result1", "@id": "me:Result1",
"@type": "results", "@type": "results",
"activities": [ "analysis": [
{ {
"@id": "me:EmotionAnalysis1_Activity", "@id": "me:EmotionAnalysis1",
"@type": "me:EmotionAnalysis1", "@type": "onyx:EmotionAnalysis"
"prov:wasAssociatedWith": "me:EmotionAnalysis1"
} }
], ],
"entries": [ "entries": [
@@ -17,13 +16,17 @@
"nif:Context" "nif:Context"
], ],
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource", "nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
"entities": [
],
"suggestions": [
],
"sentiments": [ "sentiments": [
], ],
"emotions": [ "emotions": [
{ {
"@id": "http://micro.blog/status1#char=0,109", "@id": "http://micro.blog/status1#char=0,109",
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource", "nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
"prov:wasGeneratedBy": "_:EmotionAnalysis1_Activity", "prov:wasGeneratedBy": "me:EmotionAnalysis1",
"onyx:hasEmotion": [ "onyx:hasEmotion": [
{ {
"onyx:hasEmotionCategory": "wna:liking" "onyx:hasEmotionCategory": "wna:liking"

View File

@@ -1,42 +0,0 @@
{
"@context": "http://senpy.gsi.upm.es/api/contexts/YXBpP2FsZ289ZW1",
"@id": ":Result1",
"@type": "results",
"activities": [
{
"@id": ":MoralAnalysis1_Activity",
"@type": "amor:MoralValueAnalysis",
"prov:wasAssociatedWith": ":MoralAnalysis1",
"amor:usedMoralValueModel": "amor-mft:MoralFoundationsTheory",
"amor:analysed": "news1",
"amor:usedMLModel": ":model1",
"prov:generated": ":annotation3"
}
],
"entries": [
{
"@id": "http://micro.blog/status1",
"@type": [
"nif:RFC5147String",
"nif:Context"
],
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
"morality": [
{
"@id": ":annotation3",
"@type": "amor:MoralValueAnnotation",
"nif:beginIndex": 80,
"nif:endIndex": 97,
"amor:hasMoralValueCategory": "mft:Authority",
"amor:confidence": 0.75,
"amor-mft:hasPolarityIntensity": 0.2,
"amor:annotated": "http://micro.blog/status1",
"nif:anchorOf": "You'll be awesome.",
"prov:wasGeneratedBy": ":MoralAnalysis1_Activity"
}
],
"emotions": [
]
}
]
}

View File

@@ -2,11 +2,10 @@
"@context": "http://mixedemotions-project.eu/ns/context.jsonld", "@context": "http://mixedemotions-project.eu/ns/context.jsonld",
"@id": "me:Result1", "@id": "me:Result1",
"@type": "results", "@type": "results",
"activities": [ "analysis": [
{ {
"@id": "_:NER1_Activity", "@id": "me:NER1",
"@type": "me:NERAnalysis", "@type": "me:NERAnalysis"
"prov:wasAssociatedWith": "me:NER1"
} }
], ],
"entries": [ "entries": [

View File

@@ -2,22 +2,16 @@
"@context": [ "@context": [
"http://mixedemotions-project.eu/ns/context.jsonld", "http://mixedemotions-project.eu/ns/context.jsonld",
{ {
"emovoc": "http://www.gsi.upm.es/ontologies/onyx/vocabularies/emotionml/ns#" "emovoc": "http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/emotionml/ns#"
} }
], ],
"@id": "me:Result1", "@id": "me:Result1",
"@type": "results", "@type": "results",
"activities": [ "analysis": [
{ {
"@id": "me:HesamsAnalysis_Activity", "@id": "me:HesamsAnalysis",
"@type": "onyx:EmotionAnalysis", "@type": "onyx:EmotionAnalysis",
"prov:wasAssociatedWith": "me:HesamsAnalysis", "onyx:usesEmotionModel": "emovoc:pad-dimensions"
"prov:used": [
{
"name": "emotion-model",
"prov:value": "emovoc:pad-dimensions"
}
]
} }
], ],
"entries": [ "entries": [
@@ -38,7 +32,7 @@
{ {
"@id": "Entry1#char=0,21", "@id": "Entry1#char=0,21",
"nif:anchorOf": "This is a test string", "nif:anchorOf": "This is a test string",
"prov:wasGeneratedBy": "_:HesamAnalysis_Activity", "prov:wasGeneratedBy": "me:HesamAnalysis",
"onyx:hasEmotion": [ "onyx:hasEmotion": [
{ {
"emovoc:pleasure": 0.5, "emovoc:pleasure": 0.5,

View File

@@ -2,11 +2,12 @@
"@context": "http://mixedemotions-project.eu/ns/context.jsonld", "@context": "http://mixedemotions-project.eu/ns/context.jsonld",
"@id": "me:Result1", "@id": "me:Result1",
"@type": "results", "@type": "results",
"activities": [ "analysis": [
{ {
"@id": "_:SAnalysis1_Activity", "@id": "me:SAnalysis1",
"@type": "marl:SentimentAnalysis", "@type": "marl:SentimentAnalysis",
"prov:wasAssociatedWith": "me:SAnalysis1" "marl:maxPolarityValue": 1,
"marl:minPolarityValue": 0
} }
], ],
"entries": [ "entries": [
@@ -17,6 +18,10 @@
"nif:Context" "nif:Context"
], ],
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource", "nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
"entities": [
],
"suggestions": [
],
"sentiments": [ "sentiments": [
{ {
"@id": "http://micro.blog/status1#char=80,97", "@id": "http://micro.blog/status1#char=80,97",
@@ -25,10 +30,10 @@
"nif:anchorOf": "You'll be awesome.", "nif:anchorOf": "You'll be awesome.",
"marl:hasPolarity": "marl:Positive", "marl:hasPolarity": "marl:Positive",
"marl:polarityValue": 0.9, "marl:polarityValue": 0.9,
"prov:wasGeneratedBy": "_:SAnalysis1_Activity" "prov:wasGeneratedBy": "me:SAnalysis1"
} }
], ],
"emotions": [ "emotionSets": [
] ]
} }
] ]

View File

@@ -2,12 +2,8 @@
"@context": "http://mixedemotions-project.eu/ns/context.jsonld", "@context": "http://mixedemotions-project.eu/ns/context.jsonld",
"@id": "me:Result1", "@id": "me:Result1",
"@type": "results", "@type": "results",
"activities": [ "analysis": [
{ "me:SgAnalysis1"
"@id": "_:SgAnalysis1_Activity",
"@type": "me:SuggestionAnalysis",
"prov:wasAssociatedWith": "me:SgAnalysis1"
}
], ],
"entries": [ "entries": [
{ {
@@ -16,6 +12,7 @@
"nif:RFC5147String", "nif:RFC5147String",
"nif:Context" "nif:Context"
], ],
"prov:wasGeneratedBy": "me:SAnalysis1",
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource", "nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
"entities": [ "entities": [
], ],
@@ -25,7 +22,7 @@
"nif:beginIndex": 16, "nif:beginIndex": 16,
"nif:endIndex": 77, "nif:endIndex": 77,
"nif:anchorOf": "put your Windows Phone on your newest #open technology program", "nif:anchorOf": "put your Windows Phone on your newest #open technology program",
"prov:wasGeneratedBy": "_:SgAnalysis1_Activity" "prov:wasGeneratedBy": "me:SgAnalysis1"
} }
], ],
"sentiments": [ "sentiments": [

View File

@@ -1,106 +1,35 @@
Welcome to Senpy's documentation! Welcome to Senpy's documentation!
================================= =================================
.. image:: https://readthedocs.org/projects/senpy/badge/?version=latest .. image:: https://readthedocs.org/projects/senpy/badge/?version=latest
:target: http://senpy.readthedocs.io/en/latest/ :target: http://senpy.readthedocs.io/en/latest/
.. image:: https://badge.fury.io/py/senpy.svg .. image:: https://badge.fury.io/py/senpy.svg
:target: https://badge.fury.io/py/senpy :target: https://badge.fury.io/py/senpy
.. image:: https://travis-ci.org/gsi-upm/senpy.svg .. image:: https://lab.cluster.gsi.dit.upm.es/senpy/senpy/badges/master/build.svg
:target: https://github.com/gsi-upm/senpy/senpy/tree/master :target: https://lab.cluster.gsi.dit.upm.es/senpy/senpy/commits/master
.. image:: https://lab.cluster.gsi.dit.upm.es/senpy/senpy/badges/master/coverage.svg
:target: https://lab.cluster.gsi.dit.upm.es/senpy/senpy/commits/master
.. image:: https://img.shields.io/pypi/l/requests.svg .. image:: https://img.shields.io/pypi/l/requests.svg
:target: https://lab.gsi.upm.es/senpy/senpy/ :target: https://lab.cluster.gsi.dit.upm.es/senpy/senpy/
Senpy is a framework to build sentiment and emotion analysis services.
It provides functionalities for:
- developing sentiment and emotion classifier and exposing them as an HTTP service
- requesting sentiment and emotion analysis from different providers (i.e. Vader, Sentimet140, ...) using the same interface (:doc:`apischema`). In this way, applications do not depend on the API offered for these services.
- combining services that use different sentiment model (e.g. polarity between [-1, 1] or [0,1] or emotion models (e.g. Ekkman or VAD)
- evaluating sentiment algorithms with well known datasets
Using senpy services is as simple as sending an HTTP request with your favourite tool or library.
Let's analyze the sentiment of the text "Senpy is awesome".
We can call the `Sentiment140 <http://www.sentiment140.com/>`_ service with an HTTP request using curl:
.. code:: shell
:emphasize-lines: 14,18
$ curl "http://senpy.gsi.upm.es/api/sentiment140" \
--data-urlencode "input=Senpy is awesome"
{
"@context": "http://senpy.gsi.upm.es/api/contexts/YXBpL3NlbnRpbWVudDE0MD8j",
"@type": "Results",
"entries": [
{
"@id": "prefix:",
"@type": "Entry",
"marl:hasOpinion": [
{
"@type": "Sentiment",
"marl:hasPolarity": "marl:Positive",
"prov:wasGeneratedBy": "prefix:Analysis_1554389334.6431913"
}
],
"nif:isString": "Senpy is awesome",
"onyx:hasEmotionSet": []
}
]
}
Congratulations, youve used your first senpy service!
You can observe the result: the polarity is positive (marl:Positive). The reason of this prefix is that Senpy follows a linked data approach.
You can analyze the same sentence using a different sentiment service (e.g. Vader) and requesting a different format (e.g. turtle):
.. code:: shell Senpy is a framework for sentiment and emotion analysis services.
Services built with senpy are interchangeable and easy to use because they share a common :doc:`apischema`.
It also simplifies service development.
$ curl "http://senpy.gsi.upm.es/api/sentiment-vader" \ .. image:: senpy-architecture.png
--data-urlencode "input=Senpy is awesome" \ :width: 100%
--data-urlencode "outformat=turtle" :align: center
@prefix : <http://www.gsi.upm.es/onto/senpy/ns#> .
@prefix endpoint: <http://senpy.gsi.upm.es/api/> .
@prefix marl: <http://www.gsi.upm.es/ontologies/marl/ns#> .
@prefix nif: <http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core#> .
@prefix prefix: <http://senpy.invalid/> .
@prefix prov: <http://www.w3.org/ns/prov#> .
@prefix senpy: <http://www.gsi.upm.es/onto/senpy/ns#> .
prefix: a senpy:Entry ;
nif:isString "Senpy is awesome" ;
marl:hasOpinion [ a senpy:Sentiment ;
marl:hasPolarity "marl:Positive" ;
marl:polarityValue 6.72e-01 ;
prov:wasGeneratedBy prefix:Analysis_1562668175.9808676 ] .
[] a senpy:Results ;
prov:used prefix: .
As you see, Vader returns also the polarity value (0.67) in addition to the category (positive).
If you are interested in consuming Senpy services, read :doc:`Quickstart`.
To get familiar with the concepts behind Senpy, and what it can offer for service developers, check out :doc:`development`.
:doc:`apischema` contains information about the semantic models and vocabularies used by Senpy.
.. toctree:: .. toctree::
:caption: Learn more about senpy: :caption: Learn more about senpy:
:maxdepth: 2 :maxdepth: 2
:hidden:
senpy senpy
demo
Quickstart.ipynb
installation installation
conversion demo
Evaluation.ipynb usage
apischema apischema
development plugins
publications conversion
projects about

View File

@@ -1,10 +1,10 @@
Installation Installation
------------ ------------
The stable version can be used in two ways: as a system/user library through pip, or from a docker image. The stable version can be used in two ways: as a system/user library through pip, or as a docker image.
Using docker is recommended because the image is self-contained, reproducible and isolated from the system, which means: The docker image is the recommended way because it is self-contained and isolated from the system, which means:
* It can be downloaded and run with just one simple command * Downloading and using it is just one command
* All dependencies are included * All dependencies are included
* It is OS-independent (MacOS, Windows, GNU/Linux) * It is OS-independent (MacOS, Windows, GNU/Linux)
* Several versions may coexist in the same machine without additional virtual environments * Several versions may coexist in the same machine without additional virtual environments
@@ -17,13 +17,9 @@ Through PIP
.. code:: bash .. code:: bash
pip install senpy
# Or with --user if you get permission errors:
pip install --user senpy pip install --user senpy
..
Alternatively, you can use the development version: Alternatively, you can use the development version:
.. code:: bash .. code:: bash
@@ -32,24 +28,31 @@ Through PIP
cd senpy cd senpy
pip install --user . pip install --user .
Each version is automatically tested on GNU/Linux, macOS and Windows 10. If you want to install senpy globally, use sudo instead of the ``--user`` flag.
If you have trouble with the installation, please file an `issue on GitHub <https://github.com/gsi-upm/senpy/issues>`_.
Docker Image Docker Image
************ ************
Build the image or use the pre-built one:
The base image of senpy comes with some built-in plugins that you can use:
.. code:: bash .. code:: bash
docker run -ti -p 5000:5000 gsiupm/senpy --host 0.0.0.0 docker run -ti -p 5000:5000 gsiupm/senpy --host 0.0.0.0 --default-plugins
To use your custom plugins, you can add volume to the container: To add custom plugins, use a docker volume:
.. code:: bash .. code:: bash
docker run -ti -p 5000:5000 -v <PATH OF PLUGINS>:/plugins gsiupm/senpy --host 0.0.0.0 --plugins-folder /plugins docker run -ti -p 5000:5000 -v <PATH OF PLUGINS>:/plugins gsiupm/senpy --host 0.0.0.0 --default-plugins -f /plugins
Python 2
........
There is a Senpy version for python2 too:
.. code:: bash
docker run -ti -p 5000:5000 gsiupm/senpy:python2.7 --host 0.0.0.0 --default-plugins
Alias Alias
@@ -59,7 +62,7 @@ If you are using the docker approach regularly, it is advisable to use a script
.. code:: bash .. code:: bash
alias senpy='docker run --rm -ti -p 5000:5000 -v $PWD:/senpy-plugins gsiupm/senpy' alias senpy='docker run --rm -ti -p 5000:5000 -v $PWD:/senpy-plugins gsiupm/senpy --default-plugins'
Now, you may run senpy from any folder in your computer like so: Now, you may run senpy from any folder in your computer like so:

Binary file not shown.

Before

Width:  |  Height:  |  Size: 68 KiB

View File

@@ -9,7 +9,6 @@ Lastly, it is also possible to add new plugins programmatically.
.. contents:: :local: .. contents:: :local:
..
What is a plugin? What is a plugin?
================= =================
@@ -110,3 +109,5 @@ Now, in a file named ``helloworld.py``:
sentiment['marl:hasPolarity'] = 'marl:Negative' sentiment['marl:hasPolarity'] = 'marl:Negative'
entry.sentiments.append(sentiment) entry.sentiments.append(sentiment)
yield entry yield entry
The complete code of the example plugin is available `here <https://lab.cluster.gsi.dit.upm.es/senpy/plugin-prueba>`__.

View File

@@ -1,87 +0,0 @@
Quickstart for service developers
=================================
This document contains the minimum to get you started with developing new services using Senpy.
For an example of conversion plugins, see :doc:`conversion`.
For a description of definition files, see :doc:`plugins-definition`.
A more step-by-step tutorial with slides is available `here <https://lab.gsi.upm.es/senpy/senpy-tutorial>`__
.. contents:: :local:
Installation
############
First of all, you need to install the package.
See :doc:`installation` for instructions.
Once installed, the `senpy` command should be available.
Architecture
############
The main component of a sentiment analysis service is the algorithm itself. However, for the algorithm to work, it needs to get the appropriate parameters from the user, format the results according to the defined API, interact with the user whn errors occur or more information is needed, etc.
Senpy proposes a modular and dynamic architecture that allows:
* Implementing different algorithms in a extensible way, yet offering a common interface.
* Offering common services that facilitate development, so developers can focus on implementing new and better algorithms.
The framework consists of two main modules: Senpy core, which is the building block of the service, and Senpy plugins, which consist of the analysis algorithm. The next figure depicts a simplified version of the processes involved in an analysis with the Senpy framework.
.. image:: senpy-architecture.png
:width: 100%
:align: center
What is a plugin?
#################
A plugin is a python object that can process entries.
Given an entry, it will modify it, add annotations to it, or generate new entries.
What is an entry?
#################
Entries are objects that can be annotated.
In general, they will be a piece of text.
By default, entries are `NIF contexts <http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core/nif-core.html>`_ represented in JSON-LD format.
It is a dictionary/JSON object that looks like this:
.. code:: python
{
"@id": "<unique identifier or blank node name>",
"nif:isString": "input text",
"sentiments": [ {
...
}
],
...
}
Annotations are added to the object like this:
.. code:: python
entry = Entry()
entry.vocabulary__annotationName = 'myvalue'
entry['vocabulary:annotationName'] = 'myvalue'
entry['annotationNameURI'] = 'myvalue'
Where vocabulary is one of the prefixes defined in the default senpy context, and annotationURI is a full URI.
The value may be any valid JSON-LD dictionary.
For simplicity, senpy includes a series of models by default in the ``senpy.models`` module.
Plugins Code
############
The basic methods in a plugin are:
* analyse_entry: called in every user requests. It takes two parameters: ``Entry``, the entry object, and ``params``, the parameters supplied by the user. It should yield one or more ``Entry`` objects.
* activate: used to load memory-hungry resources. For instance, to train a classifier.
* deactivate: used to free up resources when the plugin is no longer needed.
Plugins are loaded asynchronously, so don't worry if the activate method takes too long. The plugin will be marked as activated once it is finished executing the method.

View File

@@ -1,18 +1,61 @@
F.A.Q. Developing new plugins
====== ----------------------
This document contains the minimum to get you started with developing new analysis plugin.
For an example of conversion plugins, see :doc:`conversion`.
For a description of definition files, see :doc:`plugins-definition`.
A more step-by-step tutorial with slides is available `here <https://lab.cluster.gsi.dit.upm.es/senpy/senpy-tutorial>`__
.. contents:: :local: .. contents:: :local:
What is a plugin?
=================
A plugin is a python object that can process entries. Given an entry, it will modify it, add annotations to it, or generate new entries.
What is an entry?
=================
Entries are objects that can be annotated.
In general, they will be a piece of text.
By default, entries are `NIF contexts <http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core/nif-core.html>`_ represented in JSON-LD format.
It is a dictionary/JSON object that looks like this:
.. code:: python
{
"@id": "<unique identifier or blank node name>",
"nif:isString": "input text",
"sentiments": [ {
...
}
],
...
}
Annotations are added to the object like this:
.. code:: python
entry = Entry()
entry.vocabulary__annotationName = 'myvalue'
entry['vocabulary:annotationName'] = 'myvalue'
entry['annotationNameURI'] = 'myvalue'
Where vocabulary is one of the prefixes defined in the default senpy context, and annotationURI is a full URI.
The value may be any valid JSON-LD dictionary.
For simplicity, senpy includes a series of models by default in the ``senpy.models`` module.
What are annotations? What are annotations?
##################### =====================
They are objects just like entries. They are objects just like entries.
Senpy ships with several default annotations, including ``Sentiment`` and ``Emotion``. Senpy ships with several default annotations, including: ``Sentiment``, ``Emotion``, ``EmotionSet``...jk bb
What's a plugin made of? What's a plugin made of?
######################## ========================
When receiving a query, senpy selects what plugin or plugins should process each entry, and in what order. When receiving a query, senpy selects what plugin or plugins should process each entry, and in what order.
It also makes sure the every entry and the parameters provided by the user meet the plugin requirements. It also makes sure the every entry and the parameters provided by the user meet the plugin requirements.
@@ -22,33 +65,45 @@ Hence, two parts are necessary: 1) the code that will process the entry, and 2)
In practice, this is what a plugin looks like, tests included: In practice, this is what a plugin looks like, tests included:
.. literalinclude:: ../example-plugins/rand_plugin.py .. literalinclude:: ../senpy/plugins/example/rand_plugin.py
:emphasize-lines: 21-28 :emphasize-lines: 5-11
:language: python :language: python
The lines highlighted contain some information about the plugin. The lines highlighted contain some information about the plugin.
In particular, the following information is mandatory: In particular, the following information is mandatory:
* A unique name for the class. In our example, sentiment-random. * A unique name for the class. In our example, Rand.
* The subclass/type of plugin. This is typically either `SentimentPlugin` or `EmotionPlugin`. However, new types of plugin can be created for different annotations. The only requirement is that these new types inherit from `senpy.Analysis` * The subclass/type of plugin. This is typically either `SentimentPlugin` or `EmotionPlugin`. However, new types of plugin can be created for different annotations. The only requirement is that these new types inherit from `senpy.Analysis`
* A description of the plugin. This can be done simply by adding a doc to the class. * A description of the plugin. This can be done simply by adding a doc to the class.
* A version, which should get updated. * A version, which should get updated.
* An author name. * An author name.
Plugins Code
============
The basic methods in a plugin are:
* analyse_entry: called in every user requests. It takes two parameters: ``Entry``, the entry object, and ``params``, the parameters supplied by the user. It should yield one or more ``Entry`` objects.
* activate: used to load memory-hungry resources. For instance, to train a classifier.
* deactivate: used to free up resources when the plugin is no longer needed.
Plugins are loaded asynchronously, so don't worry if the activate method takes too long. The plugin will be marked as activated once it is finished executing the method.
How does senpy find modules? How does senpy find modules?
############################ ============================
Senpy looks for files of two types: Senpy looks for files of two types:
* Python files of the form `senpy_<NAME>.py` or `<NAME>_plugin.py`. In these files, it will look for: 1) Instances that inherit from `senpy.Plugin`, or subclasses of `senpy.Plugin` that can be initialized without a configuration file. i.e. classes that contain all the required attributes for a plugin. * Python files of the form `senpy_<NAME>.py` or `<NAME>_plugin.py`. In these files, it will look for: 1) Instances that inherit from `senpy.Plugin`, or subclasses of `senpy.Plugin` that can be initialized without a configuration file. i.e. classes that contain all the required attributes for a plugin.
* Plugin definition files (see :doc:`plugins-definition`) * Plugin definition files (see :doc:`advanced-plugins`)
How can I define additional parameters for my plugin? Defining additional parameters
##################################################### ==============================
Your plugin may ask for additional parameters from users by using the attribute ``extra_params`` in your plugin definition. Your plugin may ask for additional parameters from the users of the service by using the attribute ``extra_params`` in your plugin definition.
It takes a dictionary, where the keys are the name of the argument/parameter, and the value has the following fields: It takes a dictionary, where the keys are the name of the argument/parameter, and the value has the following fields:
* aliases: the different names which can be used in the request to use the parameter. * aliases: the different names which can be used in the request to use the parameter.
@@ -69,16 +124,15 @@ It takes a dictionary, where the keys are the name of the argument/parameter, an
How should I load external data and files Loading data and files
######################################### ======================
Most plugins will need access to files (dictionaries, lexicons, etc.). Most plugins will need access to files (dictionaries, lexicons, etc.).
These files are usually heavy or under a license that does not allow redistribution. These files are usually heavy or under a license that does not allow redistribution.
For this reason, senpy has a `data_folder` that is separated from the source files. For this reason, senpy has a `data_folder` that is separated from the source files.
The location of this folder is controlled programmatically or by setting the `SENPY_DATA` environment variable. The location of this folder is controlled programmatically or by setting the `SENPY_DATA` environment variable.
You can use the `self.path(filepath)` function to get the path of a given `filepath` within the data folder.
Plugins have a convenience function `self.open` which will automatically look for the file if it exists, or open a new one if it doesn't: Plugins have a convenience function `self.open` which will automatically prepend the data folder to relative paths:
.. code:: python .. code:: python
@@ -90,7 +144,7 @@ Plugins have a convenience function `self.open` which will automatically look fo
file_in_data = <FILE PATH> file_in_data = <FILE PATH>
file_in_sources = <FILE PATH> file_in_sources = <FILE PATH>
def on activate(self): def activate(self):
with self.open(self.file_in_data) as f: with self.open(self.file_in_data) as f:
self._classifier = train_from_file(f) self._classifier = train_from_file(f)
file_in_source = os.path.join(self.get_folder(), self.file_in_sources) file_in_source = os.path.join(self.get_folder(), self.file_in_sources)
@@ -101,8 +155,8 @@ Plugins have a convenience function `self.open` which will automatically look fo
It is good practice to specify the paths of these files in the plugin configuration, so the same code can be reused with different resources. It is good practice to specify the paths of these files in the plugin configuration, so the same code can be reused with different resources.
Can I build a docker image for my plugin? Docker image
######################################### ============
Add the following dockerfile to your project to generate a docker image with your plugin: Add the following dockerfile to your project to generate a docker image with your plugin:
@@ -133,7 +187,7 @@ And you can run it with:
docker run -p 5000:5000 gsiupm/exampleplugin docker run -p 5000:5000 gsiupm/exampleplugin
If the plugin uses non-source files (:ref:`How should I load external data and files`), the recommended way is to use `SENPY_DATA` folder. If the plugin uses non-source files (:ref:`loading data and files`), the recommended way is to use `SENPY_DATA` folder.
Data can then be mounted in the container or added to the image. Data can then be mounted in the container or added to the image.
The former is recommended for open source plugins with licensed resources, whereas the latter is the most convenient and can be used for private images. The former is recommended for open source plugins with licensed resources, whereas the latter is the most convenient and can be used for private images.
@@ -150,15 +204,17 @@ Adding data to the image:
FROM gsiupm/senpy:1.0.1 FROM gsiupm/senpy:1.0.1
COPY data / COPY data /
F.A.Q.
======
What annotations can I use? What annotations can I use?
########################### ???????????????????????????
You can add almost any annotation to an entry. You can add almost any annotation to an entry.
The most common use cases are covered in the :doc:`apischema`. The most common use cases are covered in the :doc:`apischema`.
Why does the analyse function yield instead of return? Why does the analyse function yield instead of return?
###################################################### ??????????????????????????????????????????????????????
This is so that plugins may add new entries to the response or filter some of them. This is so that plugins may add new entries to the response or filter some of them.
For instance, a chunker may split one entry into several. For instance, a chunker may split one entry into several.
@@ -166,7 +222,7 @@ On the other hand, a conversion plugin may leave out those entries that do not c
If I'm using a classifier, where should I train it? If I'm using a classifier, where should I train it?
################################################### ???????????????????????????????????????????????????
Training a classifier can be time time consuming. To avoid running the training unnecessarily, you can use ShelfMixin to store the classifier. For instance: Training a classifier can be time time consuming. To avoid running the training unnecessarily, you can use ShelfMixin to store the classifier. For instance:
@@ -200,7 +256,7 @@ A corrupt shelf prevents the plugin from loading.
If you do not care about the data in the shelf, you can force your plugin to remove the corrupted file and load anyway, set the 'force_shelf' to True in your plugin and start it again. If you do not care about the data in the shelf, you can force your plugin to remove the corrupted file and load anyway, set the 'force_shelf' to True in your plugin and start it again.
How can I turn an external service into a plugin? How can I turn an external service into a plugin?
################################################# ?????????????????????????????????????????????????
This example ilustrate how to implement a plugin that accesses the Sentiment140 service. This example ilustrate how to implement a plugin that accesses the Sentiment140 service.
@@ -236,8 +292,8 @@ This example ilustrate how to implement a plugin that accesses the Sentiment140
yield entry yield entry
How can I activate a DEBUG mode for my plugin? Can I activate a DEBUG mode for my plugin?
############################################### ???????????????????????????????????????????
You can activate the DEBUG mode by the command-line tool using the option -d. You can activate the DEBUG mode by the command-line tool using the option -d.
@@ -253,6 +309,6 @@ Additionally, with the ``--pdb`` option you will be dropped into a pdb post mort
python -m pdb yourplugin.py python -m pdb yourplugin.py
Where can I find more code examples? Where can I find more code examples?
#################################### ????????????????????????????????????
See: `<http://github.com/gsi-upm/senpy-plugins-community>`_. See: `<http://github.com/gsi-upm/senpy-plugins-community>`_.

View File

@@ -1,49 +0,0 @@
Projects using Senpy
--------------------
Are you using Senpy in your work?, we would love to hear from you!
Here is a list of on-going and past projects that have benefited from senpy:
MixedEmotions
,,,,,,,,,,,,,
`MixedEmotions <https://mixedemotions-project.eu/>`_ develops innovative multilingual multi-modal Big Data analytics applications.
The analytics relies on a common toolbox for multi-modal sentiment and emotion analysis.
The NLP parts of the toolbox are based on senpy and its API.
The toolbox is featured in this publication:
.. code-block:: text
Buitelaar, P., Wood, I. D., Arcan, M., McCrae, J. P., Abele, A., Robin, C., … Tummarello, G. (2018).
MixedEmotions: An Open-Source Toolbox for Multi-Modal Emotion Analysis.
IEEE Transactions on Multimedia.
EuroSentiment
,,,,,,,,,,,,,
The aim of the EUROSENTIMENT project was to create a pool for multilingual language resources and services for Sentiment Analysis.
The EuroSentiment project was the main motivation behind the development of Senpy, and some early versions were used:
.. code-block:: text
Sánchez-Rada, J. F., Vulcu, G., Iglesias, C. A., & Buitelaar, P. (2014).
EUROSENTIMENT: Linked Data Sentiment Analysis.
Proceedings of the ISWC 2014 Posters & Demonstrations Track
13th International Semantic Web Conference (ISWC 2014) (Vol. 1272, pp. 145148).
SoMeDi
,,,,,,
`SoMeDi <https://itea3.org/project/somedi.html>`_ is an ITEA3 project to research machine learning and artificial intelligence techniques that can be used to turn digital interaction data into Digital Interaction Intelligence and approaches that can be used to effectively enter and act in social media, and to automate this process.
SoMeDi exploits senpy's interoperability of services in their customizable data enrichment and NLP workflows.
TRIVALENT
,,,,,,,,,
`TRIVALENT <https://trivalent-project.eu/>`_ is an EU funded project which aims to a better understanding of root causes of the phenomenon of violent radicalisation in Europe in order to develop appropriate countermeasures, ranging from early detection methodologies to techniques of counter-narrative.
In addition to sentiment and emotion analysis services, trivalent provides other types of senpy services such as radicalism and writing style analysis.

View File

@@ -1,36 +0,0 @@
Publications
============
And if you use Senpy in your research, please cite `Senpy: A Pragmatic Linked Sentiment Analysis Framework <http://gsi.upm.es/index.php/es/investigacion/publicaciones?view=publication&task=show&id=417>`__ (`BibTex <http://gsi.upm.es/index.php/es/investigacion/publicaciones?controller=publications&task=export&format=bibtex&id=417>`__):
.. code-block:: text
Sánchez-Rada, J. F., Iglesias, C. A., Corcuera, I., & Araque, Ó. (2016, October).
Senpy: A Pragmatic Linked Sentiment Analysis Framework.
In Data Science and Advanced Analytics (DSAA),
2016 IEEE International Conference on (pp. 735-742). IEEE.
Senpy uses Onyx for emotion representation, first introduced in:
.. code-block:: text
Sánchez-Rada, J. F., & Iglesias, C. A. (2016).
Onyx: A linked data approach to emotion representation.
Information Processing & Management, 52(1), 99-114.
Senpy uses Marl for sentiment representation, which was presented in:
.. code-block:: text
Westerski, A., Iglesias Fernandez, C. A., & Tapia Rico, F. (2011).
Linked opinions: Describing sentiments on the structured web of data.
The representation models, formats and challenges are partially covered in a chapter of the book Sentiment Analysis in Social Networks:
.. code-block:: text
Iglesias, C. A., Sánchez-Rada, J. F., Vulcu, G., & Buitelaar, P. (2017).
Linked Data Models for Sentiment and Emotion Analysis in Social Networks.
In Sentiment Analysis in Social Networks (pp. 49-69).

View File

@@ -1,3 +1,2 @@
sphinxcontrib-httpdomain>=1.4 sphinxcontrib-httpdomain>=1.4
ipykernel
nbsphinx nbsphinx

View File

@@ -1,27 +1,54 @@
What is Senpy? What is Senpy?
-------------- --------------
Senpy is a framework for sentiment and emotion analysis services. Senpy is a framework for text analysis using Linked Data. There are three main applications of Senpy so far: sentiment and emotion analysis, user profiling and entity recoginition. Annotations and Services are compliant with NIF (NLP Interchange Format).
Its goal is to produce analysis services that are interchangeable and fully interoperable.
Senpy aims at providing a framework where analysis modules can be integrated easily as plugins, and providing a core functionality for managing tasks such as data validation, user interaction, formatting, logging, translation to linked data, etc.
The figure below summarizes the typical features in a text analysis service.
Senpy implements all the common blocks, so developers can focus on what really matters: great analysis algorithms that solve real problems.
.. image:: senpy-framework.png
:width: 60%
:align: center
Senpy for end users
===================
All services built using senpy share a common interface.
This allows users to use them (almost) interchangeably.
Senpy comes with a :ref:`built-in client`.
Senpy for service developers
============================
Senpy is a framework that turns your sentiment or emotion analysis algorithm into a full blown semantic service.
Senpy takes care of:
* Interfacing with the user: parameter validation, error handling.
* Formatting: JSON-LD, Turtle/n-triples input and output, or simple text input
* Linked Data: senpy results are semantically annotated, using a series of well established vocabularies, and sane default URIs.
* User interface: a web UI where users can explore your service and test different settings
* A client to interact with the service. Currently only available in Python.
Sharing your sentiment analysis with the world has never been easier!
Check out the :doc:`plugins` if you have developed an analysis algorithm (e.g. sentiment analysis) and you want to publish it as a service.
Architecture
============
The main component of a sentiment analysis service is the algorithm itself. However, for the algorithm to work, it needs to get the appropriate parameters from the user, format the results according to the defined API, interact with the user whn errors occur or more information is needed, etc.
Senpy proposes a modular and dynamic architecture that allows:
* Implementing different algorithms in a extensible way, yet offering a common interface.
* Offering common services that facilitate development, so developers can focus on implementing new and better algorithms.
The framework consists of two main modules: Senpy core, which is the building block of the service, and Senpy plugins, which consist of the analysis algorithm. The next figure depicts a simplified version of the processes involved in an analysis with the Senpy framework.
.. image:: senpy-architecture.png .. image:: senpy-architecture.png
:width: 100% :width: 100%
:align: center :align: center
All services built using senpy share a common interface.
This allows users to use them (almost) interchangeably, with the same API and tools, simply by pointing to a different URL or changing a parameter.
The common schema also makes it easier to evaluate the performance of different algorithms and services.
In fact, Senpy has a built-in evaluation API you can use to compare results with different algorithms.
Services can also use the common interface to communicate with each other.
And higher level features can be built on top of these services, such as automatic fusion of results, emotion model conversion, and service discovery.
These benefits are not limited to new services.
The community has developed wrappers for some proprietary and commercial services (such as sentiment140 and Meaning Cloud), so you can consult them as.
Senpy comes with a built-in client in the client package.
To achieve this goal, Senpy uses a Linked Data principled approach, based on the NIF (NLP Interchange Format) specification, and open vocabularies such as Marl and Onyx.
You can learn more about this in :doc:`vocabularies`.
Check out :doc:`development` if you have developed an analysis algorithm (e.g. sentiment analysis) and you want to publish it as a service.

View File

@@ -1,18 +1,14 @@
Command line tool Server
================= ======
Basic usage
-----------
The senpy server is launched via the `senpy` command: The senpy server is launched via the `senpy` command:
.. code:: text .. code:: text
usage: senpy [-h] [--level logging_level] [--log-format log_format] [--debug] usage: senpy [-h] [--level logging_level] [--debug] [--default-plugins]
[--no-default-plugins] [--host HOST] [--port PORT] [--host HOST] [--port PORT] [--plugins-folder PLUGINS_FOLDER]
[--plugins-folder PLUGINS_FOLDER] [--install] [--only-install] [--only-list] [--data-folder DATA_FOLDER]
[--test] [--no-run] [--data-folder DATA_FOLDER] [--threaded] [--version]
[--no-threaded] [--no-deps] [--version] [--allow-fail]
Run a Senpy server Run a Senpy server
@@ -20,24 +16,20 @@ The senpy server is launched via the `senpy` command:
-h, --help show this help message and exit -h, --help show this help message and exit
--level logging_level, -l logging_level --level logging_level, -l logging_level
Logging level Logging level
--log-format log_format
Logging format
--debug, -d Run the application in debug mode --debug, -d Run the application in debug mode
--no-default-plugins Do not load the default plugins --default-plugins Load the default plugins
--host HOST Use 0.0.0.0 to accept requests from any host. --host HOST Use 0.0.0.0 to accept requests from any host.
--port PORT, -p PORT Port to listen on. --port PORT, -p PORT Port to listen on.
--plugins-folder PLUGINS_FOLDER, -f PLUGINS_FOLDER --plugins-folder PLUGINS_FOLDER, -f PLUGINS_FOLDER
Where to look for plugins. Where to look for plugins.
--install, -i Install plugin dependencies before launching the server. --only-install, -i Do not run a server, only install plugin dependencies
--test, -t Test all plugins before launching the server --only-list, --list Do not run a server, only list plugins found
--no-run Do not launch the server
--data-folder DATA_FOLDER, --data DATA_FOLDER --data-folder DATA_FOLDER, --data DATA_FOLDER
Where to look for data. It be set with the SENPY_DATA Where to look for data. It be set with the SENPY_DATA
environment variable as well. environment variable as well.
--no-threaded Run the server without threading --threaded Run a threaded server
--no-deps, -n Skip installing dependencies
--version, -v Output the senpy version and exit --version, -v Output the senpy version and exit
--allow-fail, --fail Do not exit if some plugins fail to activate
When launched, the server will recursively look for plugins in the specified plugins folder (the current working directory by default). When launched, the server will recursively look for plugins in the specified plugins folder (the current working directory by default).
@@ -48,9 +40,9 @@ Let's run senpy with the default plugins:
.. code:: bash .. code:: bash
senpy -f . senpy -f . --default-plugins
Now open your browser and go to `http://localhost:5000 <http://localhost:5000>`_, where you should be greeted by the senpy playground: Now go to `http://localhost:5000 <http://localhost:5000>`_, you should be greeted by the senpy playground:
.. image:: senpy-playground.png .. image:: senpy-playground.png
:width: 100% :width: 100%
@@ -59,9 +51,9 @@ Now open your browser and go to `http://localhost:5000 <http://localhost:5000>`_
The playground is a user-friendly way to test your plugins, but you can always use the service directly: `http://localhost:5000/api?input=hello <http://localhost:5000/api?input=hello>`_. The playground is a user-friendly way to test your plugins, but you can always use the service directly: `http://localhost:5000/api?input=hello <http://localhost:5000/api?input=hello>`_.
By default, senpy will listen only on `127.0.0.1`. By default, senpy will listen only on the `127.0.0.1` address.
That means you can only access the API from your PC (i.e. localhost). That means you can only access the API from your (or localhost).
You can listen on a different address using the `--host` flag (e.g., 0.0.0.0, to allow any computer to access it). You can listen on a different address using the `--host` flag (e.g., 0.0.0.0).
The default port is 5000. The default port is 5000.
You can change it with the `--port` flag. You can change it with the `--port` flag.
@@ -72,14 +64,3 @@ For instance, to accept connections on port 6000 on any interface:
senpy --host 0.0.0.0 --port 6000 senpy --host 0.0.0.0 --port 6000
For more options, see the `--help` page. For more options, see the `--help` page.
Sentiment analysis in the command line
--------------------------------------
Although the main use of senpy is to publish services, the tool can also be used locally to analyze text in the command line.
This is a short video demonstration:
.. image:: https://asciinema.org/a/9uwef1ghkjk062cw2t4mhzpyk.png
:width: 100%
:target: https://asciinema.org/a/9uwef1ghkjk062cw2t4mhzpyk
:alt: CLI demo

15
docs/usage.rst Normal file
View File

@@ -0,0 +1,15 @@
Usage
-----
First of all, you need to install the package.
See :doc:`installation` for instructions.
Once installed, the `senpy` command should be available.
.. toctree::
:maxdepth: 1
server
SenpyClientUse
commandline

View File

@@ -13,9 +13,9 @@ An overview of the vocabularies and their use can be found in [4].
[1] Guidelines for developing NIF-based NLP services, Final Community Group Report 22 December 2015 Available at: https://www.w3.org/2015/09/bpmlod-reports/nif-based-nlp-webservices/ [1] Guidelines for developing NIF-based NLP services, Final Community Group Report 22 December 2015 Available at: https://www.w3.org/2015/09/bpmlod-reports/nif-based-nlp-webservices/
[2] Marl Ontology Specification, available at http://www.gsi.upm.es/ontologies/marl/ [2] Marl Ontology Specification, available at http://www.gsi.dit.upm.es/ontologies/marl/
[3] Onyx Ontology Specification, available at http://www.gsi.upm.es/ontologies/onyx/ [3] Onyx Ontology Specification, available at http://www.gsi.dit.upm.es/ontologies/onyx/
[4] Iglesias, C. A., Sánchez-Rada, J. F., Vulcu, G., & Buitelaar, P. (2017). Linked Data Models for Sentiment and Emotion Analysis in Social Networks. In Sentiment Analysis in Social Networks (pp. 49-69). [4] Iglesias, C. A., Sánchez-Rada, J. F., Vulcu, G., & Buitelaar, P. (2017). Linked Data Models for Sentiment and Emotion Analysis in Social Networks. In Sentiment Analysis in Social Networks (pp. 49-69).

View File

@@ -1,6 +1,6 @@
This is a collection of plugins that exemplify certain aspects of plugin development with senpy. This is a collection of plugins that exemplify certain aspects of plugin development with senpy.
The first series of plugins are the `basic` ones. The first series of plugins the `basic` ones.
Their starting point is a classification function defined in `basic.py`. Their starting point is a classification function defined in `basic.py`.
They all include testing and running them as a script will run all tests. They all include testing and running them as a script will run all tests.
In ascending order of customization, the plugins are: In ascending order of customization, the plugins are:
@@ -19,5 +19,5 @@ In rest of the plugins show advanced topics:
All of the plugins in this folder include a set of test cases and they are periodically tested with the latest version of senpy. All of the plugins in this folder include a set of test cases and they are periodically tested with the latest version of senpy.
Additioanlly, for an example of stand-alone plugin that can be tested and deployed with docker, take a look at: lab.gsi.upm.es/senpy/plugin-example Additioanlly, for an example of stand-alone plugin that can be tested and deployed with docker, take a look at: lab.cluster.gsi.dit.upm.es/senpy/plugin-example
bbm bbm

View File

@@ -0,0 +1,37 @@
from senpy import AnalysisPlugin
import multiprocessing
def _train(process_number):
return process_number
class Async(AnalysisPlugin):
'''An example of an asynchronous module'''
author = '@balkian'
version = '0.2'
async = True
def _do_async(self, num_processes):
pool = multiprocessing.Pool(processes=num_processes)
values = sorted(pool.map(_train, range(num_processes)))
return values
def activate(self):
self.value = self._do_async(4)
def analyse_entry(self, entry, params):
values = self._do_async(2)
entry.async_values = values
yield entry
test_cases = [
{
'input': 'any',
'expected': {
'async_values': [0, 1]
}
}
]

View File

@@ -1,21 +1,5 @@
#!/usr/local/bin/python #!/usr/local/bin/python
# -*- coding: utf-8 -*- # coding: utf-8
#
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
emoticons = { emoticons = {
'pos': [':)', ':]', '=)', ':D'], 'pos': [':)', ':]', '=)', ':D'],
@@ -23,19 +7,17 @@ emoticons = {
} }
emojis = { emojis = {
'pos': [u'😁', u'😂', u'😃', u'😄', u'😆', u'😅', u'😄', u'😍'], 'pos': ['😁', '😂', '😃', '😄', '😆', '😅', '😄' '😍'],
'neg': [u'😢', u'😡', u'😠', u'😞', u'😖', u'😔', u'😓', u'😒'] 'neg': ['😢', '😡', '😠', '😞', '😖', '😔', '😓', '😒']
} }
def get_polarity(text, dictionaries=[emoticons, emojis]): def get_polarity(text, dictionaries=[emoticons, emojis]):
polarity = 'marl:Neutral' polarity = 'marl:Neutral'
print('Input for get_polarity', text)
for dictionary in dictionaries: for dictionary in dictionaries:
for label, values in dictionary.items(): for label, values in dictionary.items():
for emoticon in values: for emoticon in values:
if emoticon and emoticon in text: if emoticon and emoticon in text:
polarity = label polarity = label
break break
print('Polarity', polarity)
return polarity return polarity

View File

@@ -1,20 +1,5 @@
#!/usr/local/bin/python #!/usr/local/bin/python
# -*- coding: utf-8 -*- # coding: utf-8
#
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from senpy import easy_test, models, plugins from senpy import easy_test, models, plugins
@@ -24,8 +9,8 @@ import basic
class BasicAnalyseEntry(plugins.SentimentPlugin): class BasicAnalyseEntry(plugins.SentimentPlugin):
'''Equivalent to Basic, implementing the analyse_entry method''' '''Equivalent to Basic, implementing the analyse_entry method'''
author: str = '@balkian' author = '@balkian'
version: str = '0.1' version = '0.1'
mappings = { mappings = {
'pos': 'marl:Positive', 'pos': 'marl:Positive',
@@ -33,17 +18,17 @@ class BasicAnalyseEntry(plugins.SentimentPlugin):
'default': 'marl:Neutral' 'default': 'marl:Neutral'
} }
def analyse_entry(self, entry, activity): def analyse_entry(self, entry, params):
polarity = basic.get_polarity(entry.text) polarity = basic.get_polarity(entry.text)
polarity = self.mappings.get(polarity, self.mappings['default']) polarity = self.mappings.get(polarity, self.mappings['default'])
s = models.Sentiment(marl__hasPolarity=polarity) s = models.Sentiment(marl__hasPolarity=polarity)
s.prov(activity) s.prov(self)
entry.sentiments.append(s) entry.sentiments.append(s)
yield entry yield entry
test_cases: list[dict] = [{ test_cases = [{
'input': 'Hello :)', 'input': 'Hello :)',
'polarity': 'marl:Positive' 'polarity': 'marl:Positive'
}, { }, {

View File

@@ -1,20 +1,5 @@
#!/usr/local/bin/python #!/usr/local/bin/python
# -*- coding: utf-8 -*- # coding: utf-8
#
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from senpy import easy_test, SentimentBox from senpy import easy_test, SentimentBox
@@ -24,18 +9,20 @@ import basic
class BasicBox(SentimentBox): class BasicBox(SentimentBox):
''' A modified version of Basic that also does converts annotations manually''' ''' A modified version of Basic that also does converts annotations manually'''
author: str = '@balkian' author = '@balkian'
version: str = '0.1' version = '0.1'
def predict_one(self, features, **kwargs): mappings = {
output = basic.get_polarity(features[0]) 'pos': 'marl:Positive',
if output == 'pos': 'neg': 'marl:Negative',
return [1, 0, 0] 'default': 'marl:Neutral'
if output == 'neg': }
return [0, 0, 1]
return [0, 1, 0]
test_cases: list[dict] = [{ def predict_one(self, input):
output = basic.get_polarity(input)
return self.mappings.get(output, self.mappings['default'])
test_cases = [{
'input': 'Hello :)', 'input': 'Hello :)',
'polarity': 'marl:Positive' 'polarity': 'marl:Positive'
}, { }, {

View File

@@ -1,52 +1,37 @@
#!/usr/local/bin/python #!/usr/local/bin/python
# -*- coding: utf-8 -*- # coding: utf-8
#
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from senpy import easy_test, SentimentBox, MappingMixin
from senpy import easy_test, SentimentBox
import basic import basic
class Basic(SentimentBox): class Basic(MappingMixin, SentimentBox):
'''Provides sentiment annotation using a lexicon''' '''Provides sentiment annotation using a lexicon'''
author: str = '@balkian' author = '@balkian'
version: str = '0.1' version = '0.1'
def predict_one(self, features, **kwargs): mappings = {
output = basic.get_polarity(features[0]) 'pos': 'marl:Positive',
if output == 'pos': 'neg': 'marl:Negative',
return [1, 0, 0] 'default': 'marl:Neutral'
if output == 'neu': }
return [0, 1, 0]
return [0, 0, 1]
test_cases: list[dict] = [{ def predict_one(self, input):
'input': u'Hello :)', return basic.get_polarity(input)
test_cases = [{
'input': 'Hello :)',
'polarity': 'marl:Positive' 'polarity': 'marl:Positive'
}, { }, {
'input': u'So sad :(', 'input': 'So sad :(',
'polarity': 'marl:Negative' 'polarity': 'marl:Negative'
}, { }, {
'input': u'Yay! Emojis 😁', 'input': 'Yay! Emojis 😁',
'polarity': 'marl:Positive' 'polarity': 'marl:Positive'
}, { }, {
'input': u'But no emoticons 😢', 'input': 'But no emoticons 😢',
'polarity': 'marl:Negative' 'polarity': 'marl:Negative'
}] }]

View File

@@ -1,21 +1,5 @@
#!/usr/local/bin/python #!/usr/local/bin/python
# -*- coding: utf-8 -*- # coding: utf-8
#
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from senpy import easy_test, models, plugins from senpy import easy_test, models, plugins
@@ -25,14 +9,14 @@ import basic
class Dictionary(plugins.SentimentPlugin): class Dictionary(plugins.SentimentPlugin):
'''Sentiment annotation using a configurable lexicon''' '''Sentiment annotation using a configurable lexicon'''
author: str = '@balkian' author = '@balkian'
version: str = '0.2' version = '0.2'
dictionaries = [basic.emojis, basic.emoticons] dictionaries = [basic.emojis, basic.emoticons]
mappings = {'pos': 'marl:Positive', 'neg': 'marl:Negative'} mappings = {'pos': 'marl:Positive', 'neg': 'marl:Negative'}
def analyse_entry(self, entry, *args, **kwargs): def analyse_entry(self, entry, params):
polarity = basic.get_polarity(entry.text, self.dictionaries) polarity = basic.get_polarity(entry.text, self.dictionaries)
if polarity in self.mappings: if polarity in self.mappings:
polarity = self.mappings[polarity] polarity = self.mappings[polarity]
@@ -42,7 +26,7 @@ class Dictionary(plugins.SentimentPlugin):
entry.sentiments.append(s) entry.sentiments.append(s)
yield entry yield entry
test_cases: list[dict] = [{ test_cases = [{
'input': 'Hello :)', 'input': 'Hello :)',
'polarity': 'marl:Positive' 'polarity': 'marl:Positive'
}, { }, {
@@ -61,7 +45,7 @@ class EmojiOnly(Dictionary):
'''Sentiment annotation with a basic lexicon of emojis''' '''Sentiment annotation with a basic lexicon of emojis'''
dictionaries = [basic.emojis] dictionaries = [basic.emojis]
test_cases: list[dict] = [{ test_cases = [{
'input': 'Hello :)', 'input': 'Hello :)',
'polarity': 'marl:Neutral' 'polarity': 'marl:Neutral'
}, { }, {
@@ -80,7 +64,7 @@ class EmoticonsOnly(Dictionary):
'''Sentiment annotation with a basic lexicon of emoticons''' '''Sentiment annotation with a basic lexicon of emoticons'''
dictionaries = [basic.emoticons] dictionaries = [basic.emoticons]
test_cases: list[dict] = [{ test_cases = [{
'input': 'Hello :)', 'input': 'Hello :)',
'polarity': 'marl:Positive' 'polarity': 'marl:Positive'
}, { }, {
@@ -102,7 +86,7 @@ class Salutes(Dictionary):
'marl:Negative': ['Good bye', ] 'marl:Negative': ['Good bye', ]
}] }]
test_cases: list[dict] = [{ test_cases = [{
'input': 'Hello :)', 'input': 'Hello :)',
'polarity': 'marl:Positive' 'polarity': 'marl:Positive'
}, { }, {

View File

@@ -1,33 +1,17 @@
#
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from senpy import AnalysisPlugin, easy from senpy import AnalysisPlugin, easy
class Dummy(AnalysisPlugin): class Dummy(AnalysisPlugin):
'''This is a dummy self-contained plugin''' '''This is a dummy self-contained plugin'''
author: str = '@balkian' author = '@balkian'
version: str = '0.1' version = '0.1'
def analyse_entry(self, entry, params): def analyse_entry(self, entry, params):
entry.text = entry.text[::-1] entry['nif:isString'] = entry['nif:isString'][::-1]
entry.reversed = entry.get('reversed', 0) + 1 entry.reversed = entry.get('reversed', 0) + 1
yield entry yield entry
test_cases: list[dict] = [{ test_cases = [{
'entry': { 'entry': {
'nif:isString': 'Hello', 'nif:isString': 'Hello',
}, },

View File

@@ -1,27 +1,11 @@
#
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from senpy import AnalysisPlugin, easy from senpy import AnalysisPlugin, easy
class DummyRequired(AnalysisPlugin): class DummyRequired(AnalysisPlugin):
'''This is a dummy self-contained plugin''' '''This is a dummy self-contained plugin'''
author: str = '@balkian' author = '@balkian'
version: str = '0.1' version = '0.1'
extra_params: dict = { extra_params = {
'example': { 'example': {
'description': 'An example parameter', 'description': 'An example parameter',
'required': True, 'required': True,
@@ -30,11 +14,11 @@ class DummyRequired(AnalysisPlugin):
} }
def analyse_entry(self, entry, params): def analyse_entry(self, entry, params):
entry.text = entry.text[::-1] entry['nif:isString'] = entry['nif:isString'][::-1]
entry.reversed = entry.get('reversed', 0) + 1 entry.reversed = entry.get('reversed', 0) + 1
yield entry yield entry
test_cases: list[dict] = [{ test_cases = [{
'entry': { 'entry': {
'nif:isString': 'Hello', 'nif:isString': 'Hello',
}, },

View File

@@ -1,49 +0,0 @@
#
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
from senpy.plugins import EmotionPlugin
from senpy.models import EmotionSet, Emotion, Entry
class EmoRand(EmotionPlugin):
'''A sample plugin that returns a random emotion annotation'''
name: str = 'emotion-random'
author: str = '@balkian'
version: str = '0.1'
url: str = "https://github.com/gsi-upm/senpy-plugins-community"
usesEmotionModel: str = "emoml:big6"
def analyse_entry(self, entry, activity):
category = "emoml:big6happiness"
number = max(-1, min(1, random.gauss(0, 0.5)))
if number > 0:
category = "emoml:big6anger"
emotionSet = EmotionSet()
emotion = Emotion({"onyx:hasEmotionCategory": category})
emotionSet.onyx__hasEmotion.append(emotion)
emotionSet.prov(activity)
entry.emotions.append(emotionSet)
yield entry
def test(self):
params = dict()
results = list()
for i in range(100):
res = next(self.analyse_entry(Entry(nif__isString="Hello"), self.activity(params)))
res.validate()
results.append(res.emotions[0]['onyx:hasEmotion'][0]['onyx:hasEmotionCategory'])

View File

@@ -1,63 +0,0 @@
@prefix : <http://www.gsi.upm.es/ontologies/amor/examples#> .
@prefix amor: <http://www.gsi.upm.es/ontologies/amor/ns#> .
@prefix amor-bhv: <http://www.gsi.upm.es/ontologies/amor/models/bhv/ns#> .
@prefix amor-mft: <http://www.gsi.upm.es/ontologies/amor/models/mft/ns#> .
@prefix bhv: <http://www.gsi.upm.es/ontologies/bhv#> .
@prefix mft: <http://www.gsi.upm.es/ontologies/mft/ns#> .
@prefix mls: <http://www.w3.org/ns/mls#> .
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix prov: <http://www.w3.org/ns/prov#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix schema: <http://schema.org/> .
:news1 a owl:NamedIndividual, schema:NewsArticle ;
schema:articleBody "Director Comey says the probe into last year's US election would assess if crimes were committed."^^xsd:string ;
schema:datePublished "2017-03-20T20:30:54+00:00"^^schema:Date ;
schema:headline "Trump Russia claims: FBI's Comey confirms investigation of election 'interference'"^^xsd:string ;
schema:image <http://ichef-1.bbci.co.uk/news/560/media/images/75306000/jpg/_75306515_line976.jpg>,
<http://ichef.bbci.co.uk/news/560/cpsprodpb/8AB9/production/_95231553_comey2.jpg>,
<http://ichef.bbci.co.uk/news/560/cpsprodpb/17519/production/_95231559_committee.jpg>,
<http://ichef.bbci.co.uk/news/560/cpsprodpb/CC81/production/_95235325_f704a6dc-c017-4971-aac3-04c03eb097fb.jpg>,
<http://ichef-1.bbci.co.uk/news/560/cpsprodpb/11AA1/production/_95235327_c0b59f9e-316e-4641-aa7e-3fec6daea62b.jpg>,
<http://ichef.bbci.co.uk/news/560/cpsprodpb/0F99/production/_95239930_trumptweet.png>,
<http://ichef-1.bbci.co.uk/news/560/cpsprodpb/10DFA/production/_95241196_mediaitem95241195.jpg>,
<http://ichef.bbci.co.uk/news/560/cpsprodpb/2CA0/production/_95242411_comey.jpg>,
<http://ichef.bbci.co.uk/news/560/cpsprodpb/11318/production/_95242407_mediaitem95242406.jpg>,
<http://ichef-1.bbci.co.uk/news/560/cpsprodpb/BCED/production/_92856384_line976.jpg>,
<http://ichef-1.bbci.co.uk/news/560/cpsprodpb/12B64/production/_95244667_mediaitem95244666.jpg> ;
schema:mainEntityOfPage <http://www.bbc.com/news/world-us-canada-39324587> ;
schema:publisher :bbc ;
schema:url <http://www.bbc.com/news/world-us-canada-39324587> .
:bbc a schema:Organization ;
schema:logo <http://www.bbc.co.uk/news/special/2015/newsspec_10857/bbc_news_logo.png?cb=1> ;
schema:name "BBC News"^^xsd:string .
:robot1 a prov:SoftwareAgent .
:model1 a mls:Model .
:logisticRegression a mls:Algorithm ;
rdfs:label "Logistic Regression"@en ,
"Regresión Logística"@es .
:run1 a mls:Run ;
mls:executes :wekaLogistic ;
mls:hasInput :credit-a ;
mls:hasOutput :model1 ;
mls:realizes :logisticRegression .
:analysis3 a amor:MoralValueAnalysis ;
prov:wasAssociatedWith :robot1 ;
amor:usedMoralValueModel amor-mft:MoralFoundationsTheory ;
amor:analysed :news1 ;
amor:usedMLModel :model1 ;
prov:generated :annotation3 .
:annotation3 a amor:MoralValueAnnotation ;
amor:hasMoralValueCategory mft:Authority ;
amor:confidence "0.75"^^xsd:float ;
amor-mft:hasPolarityIntensity "0.2"^^xsd:float ;
amor:annotated :news1 .

View File

@@ -1,10 +0,0 @@
from senpy.ns import amor, amor_bhv, amor_mft, prov
from senpy.plugins import MoralityPlugin
class DummyMoralityPlugin(MoralityPlugin):
moralValueModel: str = amor_mft["MoralFoundationTHeory"]
def annotate(self, entry, activity, **kwargs):
yield MoralAnnotation(amor_bhv['Conservation'],
confidence=1.8)

View File

@@ -1,19 +1,3 @@
#
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import noop import noop
from senpy.plugins import SentimentPlugin from senpy.plugins import SentimentPlugin
@@ -21,7 +5,7 @@ from senpy.plugins import SentimentPlugin
class NoOp(SentimentPlugin): class NoOp(SentimentPlugin):
'''This plugin does nothing. Literally nothing.''' '''This plugin does nothing. Literally nothing.'''
version: str = 0 version = 0
def analyse_entry(self, entry, *args, **kwargs): def analyse_entry(self, entry, *args, **kwargs):
yield entry yield entry

View File

@@ -1,4 +1,3 @@
module: mynoop module: mynoop
optional: true
requirements: requirements:
- noop - noop

View File

@@ -1,21 +1,5 @@
#!/usr/local/bin/python #!/usr/local/bin/python
# -*- coding: utf-8 -*- # coding: utf-8
#
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from senpy import easy_test, models, plugins from senpy import easy_test, models, plugins
@@ -24,10 +8,11 @@ import basic
class ParameterizedDictionary(plugins.SentimentPlugin): class ParameterizedDictionary(plugins.SentimentPlugin):
'''This is a basic self-contained plugin''' '''This is a basic self-contained plugin'''
author: str = '@balkian'
version: str = '0.2'
extra_params: dict = { author = '@balkian'
version = '0.2'
extra_params = {
'positive-words': { 'positive-words': {
'description': 'Comma-separated list of words that are considered positive', 'description': 'Comma-separated list of words that are considered positive',
'aliases': ['positive'], 'aliases': ['positive'],
@@ -40,8 +25,7 @@ class ParameterizedDictionary(plugins.SentimentPlugin):
} }
} }
def analyse_entry(self, entry, activity): def analyse_entry(self, entry, params):
params = activity.params
positive_words = params['positive-words'].split(',') positive_words = params['positive-words'].split(',')
negative_words = params['negative-words'].split(',') negative_words = params['negative-words'].split(',')
dictionary = { dictionary = {
@@ -51,11 +35,11 @@ class ParameterizedDictionary(plugins.SentimentPlugin):
polarity = basic.get_polarity(entry.text, [dictionary]) polarity = basic.get_polarity(entry.text, [dictionary])
s = models.Sentiment(marl__hasPolarity=polarity) s = models.Sentiment(marl__hasPolarity=polarity)
s.prov(activity) s.prov(self)
entry.sentiments.append(s) entry.sentiments.append(s)
yield entry yield entry
test_cases: list[dict] = [ test_cases = [
{ {
'input': 'Hello :)', 'input': 'Hello :)',
'polarity': 'marl:Positive', 'polarity': 'marl:Positive',

View File

@@ -1,54 +0,0 @@
#
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
from senpy import SentimentPlugin, Sentiment, Entry
class RandSent(SentimentPlugin):
'''A sample plugin that returns a random sentiment annotation'''
name: str = 'sentiment-random'
author: str = "@balkian"
version: str = '0.1'
url: str = "https://github.com/gsi-upm/senpy-plugins-community"
maxPolarityValue: float = 1
minPolarityValue: float = -1
def analyse_entry(self, entry, activity):
polarity_value = max(-1, min(1, random.gauss(0.2, 0.2)))
polarity = "marl:Neutral"
if polarity_value > 0:
polarity = "marl:Positive"
elif polarity_value < 0:
polarity = "marl:Negative"
sentiment = Sentiment(marl__hasPolarity=polarity,
marl__polarityValue=polarity_value)
sentiment.prov(activity)
entry.sentiments.append(sentiment)
yield entry
def test(self):
'''Run several random analyses.'''
params = dict()
results = list()
for i in range(50):
activity = self.activity(params)
res = next(self.analyse_entry(Entry(nif__isString="Hello"),
activity))
res.validate()
results.append(res.sentiments[0]['marl:hasPolarity'])
assert 'marl:Positive' in results
assert 'marl:Negative' in results

View File

@@ -1,19 +1,3 @@
#
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' '''
Create a dummy dataset. Create a dummy dataset.
Messages with a happy emoticon are labelled positive Messages with a happy emoticon are labelled positive

View File

@@ -1,19 +1,3 @@
#
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sklearn.pipeline import Pipeline from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split
@@ -31,7 +15,7 @@ pipeline = Pipeline([('cv', count_vec),
('clf', clf3)]) ('clf', clf3)])
pipeline.fit(X_train, y_train) pipeline.fit(X_train, y_train)
print('Feature names: {}'.format(count_vec.get_feature_names_out())) print('Feature names: {}'.format(count_vec.get_feature_names()))
print('Class count: {}'.format(clf3.class_count_)) print('Class count: {}'.format(clf3.class_count_))

View File

@@ -1,38 +1,27 @@
# from senpy import SentimentBox, MappingMixin, easy_test
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from senpy import SentimentBox, easy_test
from mypipeline import pipeline from mypipeline import pipeline
class PipelineSentiment(SentimentBox): class PipelineSentiment(MappingMixin, SentimentBox):
'''This is a pipeline plugin that wraps a classifier defined in another module '''
(mypipeline).''' This is a pipeline plugin that wraps a classifier defined in another module
author: str = '@balkian' (mypipeline).
version: str = 0.1 '''
author = '@balkian'
version = 0.1
maxPolarityValue = 1 maxPolarityValue = 1
minPolarityValue = -1 minPolarityValue = -1
def predict_one(self, features, **kwargs): mappings = {
if pipeline.predict(features) > 0: 1: 'marl:Positive',
return [1, 0, 0] -1: 'marl:Negative'
return [0, 0, 1] }
test_cases: list[dict] = [ def predict_one(self, input):
return pipeline.predict([input, ])[0]
test_cases = [
{ {
'input': 'The sentiment for senpy should be positive :)', 'input': 'The sentiment for senpy should be positive :)',
'polarity': 'marl:Positive' 'polarity': 'marl:Positive'

View File

@@ -1,29 +1,13 @@
#
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from senpy.plugins import AnalysisPlugin from senpy.plugins import AnalysisPlugin
from time import sleep from time import sleep
class Sleep(AnalysisPlugin): class Sleep(AnalysisPlugin):
'''Dummy plugin to test async''' '''Dummy plugin to test async'''
author: str = "@balkian" author = "@balkian"
version: str = "0.2" version = "0.2"
timeout = 0.05 timeout = 0.05
extra_params: dict = { extra_params = {
"timeout": { "timeout": {
"@id": "timeout_sleep", "@id": "timeout_sleep",
"aliases": ["timeout", "to"], "aliases": ["timeout", "to"],
@@ -32,8 +16,7 @@ class Sleep(AnalysisPlugin):
} }
} }
def __init__(self, **kwargs): def activate(self, *args, **kwargs):
super().__init__(**kwargs)
sleep(self.timeout) sleep(self.timeout)
def analyse_entry(self, entry, params): def analyse_entry(self, entry, params):

View File

@@ -1,6 +1 @@
gsitk>0.1.9.1 gsitk
flask_cors==3.0.10
lxml==4.9.3
pandas==2.1.1
textblob==0.17.1
git+https://github.com/uob-vil/pattern.git#pattern

View File

@@ -1,24 +1,22 @@
--- ---
apiVersion: apps/v1 apiVersion: extensions/v1beta1
kind: Deployment kind: Deployment
metadata: metadata:
name: senpy-latest name: senpy-latest
spec: spec:
replicas: 1 replicas: 1
selector:
matchLabels:
app: senpy-latest
template: template:
metadata: metadata:
labels: labels:
app: senpy-latest
role: senpy-latest role: senpy-latest
app: test
spec: spec:
containers: containers:
- name: senpy-latest - name: senpy-latest
image: $IMAGEWTAG image: $IMAGEWTAG
imagePullPolicy: Always imagePullPolicy: Always
args: ["--enable-cors"] args:
- "--default-plugins"
resources: resources:
limits: limits:
memory: "512Mi" memory: "512Mi"
@@ -26,11 +24,3 @@ spec:
ports: ports:
- name: web - name: web
containerPort: 5000 containerPort: 5000
volumeMounts:
- name: senpy-data
mountPath: /senpy-data
subPath: data
volumes:
- name: senpy-data
persistentVolumeClaim:
claimName: pvc-senpy

View File

@@ -1,29 +1,14 @@
--- ---
apiVersion: networking.k8s.io/v1 apiVersion: extensions/v1beta1
kind: Ingress kind: Ingress
metadata: metadata:
name: senpy-ingress name: senpy-ingress
labels:
app: senpy-latest
spec: spec:
rules: rules:
- host: senpy-latest.gsi.upm.es - host: latest.senpy.cluster.gsi.dit.upm.es
http: http:
paths: paths:
- path: / - path: /
pathType: Prefix
backend: backend:
service: serviceName: senpy-latest
name: senpy-latest servicePort: 5000
port:
number: 5000
- host: latest.senpy.gsi.upm.es
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: senpy-latest
port:
number: 5000

View File

@@ -3,12 +3,10 @@ apiVersion: v1
kind: Service kind: Service
metadata: metadata:
name: senpy-latest name: senpy-latest
labels:
app: senpy-latest
spec: spec:
type: ClusterIP type: ClusterIP
ports: ports:
- port: 5000 - port: 5000
protocol: TCP protocol: TCP
selector: selector:
app: senpy-latest role: senpy-latest

View File

@@ -7,10 +7,9 @@ future
jsonschema jsonschema
jsonref jsonref
PyYAML PyYAML
rdflib==6.1.1 rdflib
rdflib-jsonld
numpy numpy
scipy scipy
scikit-learn>=0.20 scikit-learn
responses responses
jmespath
deprecation==2.1.0

View File

@@ -1,7 +1,7 @@
#!/usr/bin/python #!/usr/bin/python
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
# # Copyright 2014 J. Fernando Sánchez Rada - Grupo de Sistemas Inteligentes
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM # DIT, UPM
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
@@ -14,7 +14,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
#
""" """
Sentiment analysis server in Python Sentiment analysis server in Python
""" """

View File

@@ -1,6 +1,7 @@
#!/usr/bin/python #!/usr/bin/python
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM # Copyright 2014 J. Fernando Sánchez Rada - Grupo de Sistemas Inteligentes
# DIT, UPM
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
@@ -22,8 +23,6 @@ the server.
from flask import Flask from flask import Flask
from senpy.extensions import Senpy from senpy.extensions import Senpy
from senpy.utils import easy_test from senpy.utils import easy_test
from senpy.plugins import list_dependencies
from senpy import config
import logging import logging
import os import os
@@ -41,19 +40,8 @@ def main():
'-l', '-l',
metavar='logging_level', metavar='logging_level',
type=str, type=str,
default="INFO", default="WARN",
help='Logging level') help='Logging level')
parser.add_argument(
'--no-proxy-fix',
action='store_true',
default=False,
help='Do not assume senpy will be running behind a proxy (e.g., nginx)')
parser.add_argument(
'--log-format',
metavar='log_format',
type=str,
default='%(asctime)s %(levelname)-10s %(name)-30s \t %(message)s',
help='Logging format')
parser.add_argument( parser.add_argument(
'--debug', '--debug',
'-d', '-d',
@@ -61,10 +49,10 @@ def main():
default=False, default=False,
help='Run the application in debug mode') help='Run the application in debug mode')
parser.add_argument( parser.add_argument(
'--no-default-plugins', '--default-plugins',
action='store_true', action='store_true',
default=False, default=False,
help='Do not load the default plugins') help='Load the default plugins')
parser.add_argument( parser.add_argument(
'--host', '--host',
type=str, type=str,
@@ -80,24 +68,19 @@ def main():
'--plugins-folder', '--plugins-folder',
'-f', '-f',
type=str, type=str,
action='append', default='.',
help='Where to look for plugins.') help='Where to look for plugins.')
parser.add_argument( parser.add_argument(
'--install', '--only-install',
'-i', '-i',
action='store_true', action='store_true',
default=False, default=False,
help='Install plugin dependencies before running.') help='Do not run a server, only install plugin dependencies')
parser.add_argument( parser.add_argument(
'--dependencies', '--only-test',
action='store_true', action='store_true',
default=False, default=False,
help='List plugin dependencies') help='Do not run a server, just test all plugins')
parser.add_argument(
'--strict',
action='store_true',
default=config.strict,
help='Fail if optional plugins cannot be loaded.')
parser.add_argument( parser.add_argument(
'--test', '--test',
'-t', '-t',
@@ -105,10 +88,11 @@ def main():
default=False, default=False,
help='Test all plugins before launching the server') help='Test all plugins before launching the server')
parser.add_argument( parser.add_argument(
'--no-run', '--only-list',
'--list',
action='store_true', action='store_true',
default=False, default=False,
help='Do not launch the server.') help='Do not run a server, only list plugins found')
parser.add_argument( parser.add_argument(
'--data-folder', '--data-folder',
'--data', '--data',
@@ -116,10 +100,10 @@ def main():
default=None, default=None,
help='Where to look for data. It be set with the SENPY_DATA environment variable as well.') help='Where to look for data. It be set with the SENPY_DATA environment variable as well.')
parser.add_argument( parser.add_argument(
'--no-threaded', '--threaded',
action='store_true', action='store_false',
default=False, default=True,
help='Run a single-threaded server') help='Run a threaded server')
parser.add_argument( parser.add_argument(
'--no-deps', '--no-deps',
'-n', '-n',
@@ -137,44 +121,23 @@ def main():
'--fail', '--fail',
action='store_true', action='store_true',
default=False, default=False,
help='Do not exit if some plugins can not be instantiated') help='Do not exit if some plugins fail to activate')
parser.add_argument(
'--enable-cors',
'--cors',
action='store_true',
default=False,
help='Enable CORS for all domains (requires flask-cors to be installed)')
args = parser.parse_args() args = parser.parse_args()
if args.version:
print('Senpy version {}'.format(senpy.__version__)) print('Senpy version {}'.format(senpy.__version__))
print(sys.version) print(sys.version)
if args.version:
exit(1) exit(1)
rl = logging.getLogger() rl = logging.getLogger()
rl.setLevel(getattr(logging, args.level)) rl.setLevel(getattr(logging, args.level))
logger_handler = rl.handlers[0]
# First, generic formatter:
logger_handler.setFormatter(logging.Formatter(args.log_format))
app = Flask(__name__) app = Flask(__name__)
app.debug = args.debug app.debug = args.debug
sp = Senpy(app, args.plugins_folder,
sp = Senpy(app, default_plugins=args.default_plugins,
plugin_folder=None,
default_plugins=not args.no_default_plugins,
install=args.install,
strict=args.strict,
data_folder=args.data_folder) data_folder=args.data_folder)
if args.only_list:
folders = list(args.plugins_folder) if args.plugins_folder else [] plugins = sp.plugins()
if not folders:
folders.append(".")
for p in folders:
sp.add_folder(p)
plugins = sp.plugins(plugin_type=None)
maxname = max(len(x.name) for x in plugins) maxname = max(len(x.name) for x in plugins)
maxversion = max(len(str(x.version)) for x in plugins) maxversion = max(len(x.version) for x in plugins)
print('Found {} plugins:'.format(len(plugins))) print('Found {} plugins:'.format(len(plugins)))
for plugin in plugins: for plugin in plugins:
import inspect import inspect
@@ -184,55 +147,25 @@ def main():
fpath, fpath,
maxname=maxname, maxname=maxname,
maxversion=maxversion)) maxversion=maxversion))
if args.dependencies:
print('Listing dependencies')
missing = []
installed = []
for plug in sp.plugins():
inst, miss, nltkres = list_dependencies(plug)
if not any([inst, miss, nltkres]):
continue
print(f'Plugin: {plug.id}')
for m in miss:
missing.append(f'{m} # {plug.id}')
for i in inst:
installed.append(f'{i} # {plug.id}')
if installed:
print('Installed packages:')
for i in installed:
print(f'\t{i}')
if missing:
print('Missing packages:')
for m in missing:
print(f'\t{m}')
if args.install:
sp.install_deps()
if args.test:
easy_test(sp.plugins(), debug=args.debug)
if args.no_run:
return return
if not args.no_deps:
sp.install_deps()
if args.only_install:
return
sp.activate_all(allow_fail=args.allow_fail)
if args.test or args.only_test:
easy_test(sp.plugins(), debug=args.debug)
if args.only_test:
return
print('Senpy version {}'.format(senpy.__version__)) print('Senpy version {}'.format(senpy.__version__))
print('Server running on port %s:%d. Ctrl+C to quit' % (args.host, print('Server running on port %s:%d. Ctrl+C to quit' % (args.host,
args.port)) args.port))
if args.enable_cors:
from flask_cors import CORS
CORS(app)
if not args.no_proxy_fix:
from werkzeug.middleware.proxy_fix import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
try:
app.run(args.host, app.run(args.host,
args.port, args.port,
threaded=not args.no_threaded, threaded=args.threaded,
debug=app.debug) debug=app.debug)
except KeyboardInterrupt: sp.deactivate_all()
print('Bye!')
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View File

@@ -1,52 +1,29 @@
#
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from future.utils import iteritems from future.utils import iteritems
from .models import Results, Entry, from_string from .models import Error, Results, Entry, from_string
from .errors import Error, InvalidParams
import logging import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
boolean = [True, False] boolean = [True, False]
processors = {
'string_to_tuple': lambda p: p if isinstance(p, (tuple, list)) else tuple(p.split(','))
}
API_PARAMS = { API_PARAMS = {
"algorithm": { "algorithm": {
"aliases": ["algorithms", "a", "algo"], "aliases": ["algorithms", "a", "algo"],
"required": True, "required": False,
"default": 'default',
"processor": 'string_to_tuple',
"description": ("Algorithms that will be used to process the request." "description": ("Algorithms that will be used to process the request."
"It may be a list of comma-separated names."), "It may be a list of comma-separated names."),
}, },
"expanded-jsonld": { "expanded-jsonld": {
"@id": "expanded-jsonld", "@id": "expanded-jsonld",
"description": "use JSON-LD expansion to get full URIs", "aliases": ["expanded"],
"aliases": ["expanded", "expanded_jsonld"],
"options": boolean, "options": boolean,
"required": True, "required": True,
"default": False "default": False
}, },
"with-parameters": { "with_parameters": {
"aliases": ['withparameters', "aliases": ['withparameters',
'with_parameters'], 'with-parameters'],
"description": "include initial parameters in the response",
"options": boolean, "options": boolean,
"default": False, "default": False,
"required": True "required": True
@@ -55,67 +32,9 @@ API_PARAMS = {
"@id": "outformat", "@id": "outformat",
"aliases": ["o"], "aliases": ["o"],
"default": "json-ld", "default": "json-ld",
"description": """The data can be semantically formatted (JSON-LD, turtle or n-triples),
given as a list of comma-separated fields (see the fields option) or constructed from a Jinja2
template (see the template option).""",
"required": True, "required": True,
"options": ["json-ld", "turtle", "ntriples"], "options": ["json-ld", "turtle", "ntriples"],
}, },
"template": {
"@id": "template",
"required": False,
"description": """Jinja2 template for the result. The input data for the template will
be the results as a dictionary.
For example:
Consider the results before templating:
```
[{
"@type": "entry",
"onyx:hasEmotionSet": [],
"nif:isString": "testing the template",
"marl:hasOpinion": [
{
"@type": "sentiment",
"marl:hasPolarity": "marl:Positive"
}
]
}]
```
And the template:
```
{% for entry in entries %}
{{ entry["nif:isString"] | upper }},{{entry.sentiments[0]["marl:hasPolarity"].split(":")[1]}}
{% endfor %}
```
The final result would be:
```
TESTING THE TEMPLATE,Positive
```
"""
},
"fields": {
"@id": "fields",
"required": False,
"description": """A jmespath selector, that can be used to extract a new dictionary, array or value
from the results.
jmespath is a powerful query language for json and/or dictionaries.
It allows you to change the structure (and data) of your objects through queries.
e.g., the following expression gets a list of `[emotion label, intensity]` for each entry:
`entries[]."onyx:hasEmotionSet"[]."onyx:hasEmotion"[]["onyx:hasEmotionCategory","onyx:hasEmotionIntensity"]`
For more information, see: https://jmespath.org
"""
},
"help": { "help": {
"@id": "help", "@id": "help",
"description": "Show additional help to know more about the possible parameters", "description": "Show additional help to know more about the possible parameters",
@@ -124,41 +43,14 @@ For more information, see: https://jmespath.org
"options": boolean, "options": boolean,
"default": False "default": False
}, },
"verbose": { "emotionModel": {
"@id": "verbose",
"description": "Show all properties in the result",
"aliases": ["v"],
"required": True,
"options": boolean,
"default": False
},
"aliases": {
"@id": "aliases",
"description": "Replace JSON properties with their aliases",
"aliases": [],
"required": True,
"options": boolean,
"default": False
},
"emotion-model": {
"@id": "emotionModel", "@id": "emotionModel",
"description": """Emotion model to use in the response. "aliases": ["emoModel"],
Senpy will try to convert the output to this model automatically.
Examples: `wna:liking` and `emoml:big6`.
""",
"aliases": ["emoModel", "emotionModel"],
"required": False "required": False
}, },
"conversion": { "conversion": {
"@id": "conversion", "@id": "conversion",
"description": """How to show the elements that have (not) been converted. "description": "How to show the elements that have (not) been converted",
* full: converted and original elements will appear side-by-side
* filtered: only converted elements will be shown
* nested: converted elements will be shown, and they will include a link to the original element
(using `prov:wasGeneratedBy`).
""",
"required": True, "required": True,
"options": ["filtered", "nested", "full"], "options": ["filtered", "nested", "full"],
"default": "full" "default": "full"
@@ -168,10 +60,9 @@ Examples: `wna:liking` and `emoml:big6`.
EVAL_PARAMS = { EVAL_PARAMS = {
"algorithm": { "algorithm": {
"aliases": ["plug", "p", "plugins", "algorithms", 'algo', 'a', 'plugin'], "aliases": ["plug", "p", "plugins", "algorithms", 'algo', 'a', 'plugin'],
"description": "Plugins to evaluate", "description": "Plugins to be evaluated",
"required": True, "required": True,
"help": "See plugins at /plugins", "help": "See activated plugins in /plugins"
"processor": API_PARAMS['algorithm']['processor']
}, },
"dataset": { "dataset": {
"aliases": ["datasets", "data", "d"], "aliases": ["datasets", "data", "d"],
@@ -182,19 +73,18 @@ EVAL_PARAMS = {
} }
PLUGINS_PARAMS = { PLUGINS_PARAMS = {
"plugin-type": { "plugin_type": {
"@id": "pluginType", "@id": "pluginType",
"description": 'What kind of plugins to list', "description": 'What kind of plugins to list',
"aliases": ["pluginType", "plugin_type"], "aliases": ["pluginType"],
"required": True, "required": True,
"default": 'analysisPlugin' "default": 'analysisPlugin'
} }
} }
WEB_PARAMS = { WEB_PARAMS = {
"in-headers": { "inHeaders": {
"aliases": ["headers", "inheaders", "inHeaders", "in-headers", "in_headers"], "aliases": ["headers"],
"description": "Only include the JSON-LD context in the headers",
"required": True, "required": True,
"default": False, "default": False,
"options": boolean "options": boolean
@@ -202,8 +92,8 @@ WEB_PARAMS = {
} }
CLI_PARAMS = { CLI_PARAMS = {
"plugin-folder": { "plugin_folder": {
"aliases": ["folder", "plugin_folder"], "aliases": ["folder"],
"required": True, "required": True,
"default": "." "default": "."
}, },
@@ -218,7 +108,6 @@ NIF_PARAMS = {
}, },
"intype": { "intype": {
"@id": "intype", "@id": "intype",
"description": "input type",
"aliases": ["t"], "aliases": ["t"],
"required": False, "required": False,
"default": "direct", "default": "direct",
@@ -226,7 +115,6 @@ NIF_PARAMS = {
}, },
"informat": { "informat": {
"@id": "informat", "@id": "informat",
"description": "input format",
"aliases": ["f"], "aliases": ["f"],
"required": False, "required": False,
"default": "text", "default": "text",
@@ -234,20 +122,17 @@ NIF_PARAMS = {
}, },
"language": { "language": {
"@id": "language", "@id": "language",
"description": "language of the input",
"aliases": ["l"], "aliases": ["l"],
"required": False, "required": False,
}, },
"prefix": { "prefix": {
"@id": "prefix", "@id": "prefix",
"description": "prefix to use for new entities",
"aliases": ["p"], "aliases": ["p"],
"required": True, "required": True,
"default": "", "default": "",
}, },
"urischeme": { "urischeme": {
"@id": "urischeme", "@id": "urischeme",
"description": "scheme for NIF URIs",
"aliases": ["u"], "aliases": ["u"],
"required": False, "required": False,
"default": "RFC5147String", "default": "RFC5147String",
@@ -255,15 +140,6 @@ NIF_PARAMS = {
} }
} }
BUILTIN_PARAMS = {}
for d in [
NIF_PARAMS, CLI_PARAMS, WEB_PARAMS, PLUGINS_PARAMS, EVAL_PARAMS,
API_PARAMS
]:
for k, v in d.items():
BUILTIN_PARAMS[k] = v
def parse_params(indict, *specs): def parse_params(indict, *specs):
if not specs: if not specs:
@@ -278,7 +154,7 @@ def parse_params(indict, *specs):
if alias in indict and alias != param: if alias in indict and alias != param:
outdict[param] = indict[alias] outdict[param] = indict[alias]
del outdict[alias] del outdict[alias]
break continue
if param not in outdict: if param not in outdict:
if "default" in options: if "default" in options:
# We assume the default is correct # We assume the default is correct
@@ -286,135 +162,44 @@ def parse_params(indict, *specs):
elif options.get("required", False): elif options.get("required", False):
wrong_params[param] = spec[param] wrong_params[param] = spec[param]
continue continue
if 'processor' in options:
outdict[param] = processors[options['processor']](outdict[param])
if "options" in options: if "options" in options:
if options["options"] == boolean: if options["options"] == boolean:
outdict[param] = str(outdict[param]).lower() in ['true', '1', ''] outdict[param] = outdict[param] in [None, True, 'true', '1']
elif outdict[param] not in options["options"]: elif outdict[param] not in options["options"]:
wrong_params[param] = spec[param] wrong_params[param] = spec[param]
if wrong_params: if wrong_params:
logger.debug("Error parsing: %s", wrong_params) logger.debug("Error parsing: %s", wrong_params)
raise InvalidParams(wrong_params) message = Error(
status=400,
message='Missing or invalid parameters',
parameters=outdict,
errors=wrong_params)
raise message
if 'algorithm' in outdict and not isinstance(outdict['algorithm'], list):
outdict['algorithm'] = list(outdict['algorithm'].split(','))
return outdict return outdict
def get_all_params(plugins, *specs): def parse_extra_params(request, plugin=None):
'''Return a list of parameters for a given set of specifications and plugins.''' params = request.parameters.copy()
dic = {} if plugin:
for s in specs: extra_params = parse_params(params, plugin.get('extra_params', {}))
dic.update(s) params.update(extra_params)
dic.update(get_extra_params(plugins))
return dic
def get_extra_params(plugins):
'''Get a list of possible parameters given a list of plugins'''
params = {}
extra_params: dict = {}
for plugin in plugins:
for k, v in plugin.extra_params.items():
if k not in extra_params:
extra_params[k] = {}
extra_params[k][plugin.name] = v
for k, v in extra_params.items(): # Resolve conflicts
if len(v) == 1: # Add the extra options that do not collide
params[k] = list(v.values())[0]
else:
required = False
aliases = None
options = None
default = None
nodefault = False # Set when defaults are not compatible
for plugin, opt in v.items():
params['{}.{}'.format(plugin, k)] = opt
required = required or opt.get('required', False)
newaliases = set(opt.get('aliases', []))
if aliases is None:
aliases = newaliases
else:
aliases = aliases & newaliases
if 'options' in opt:
newoptions = set(opt['options'])
options = newoptions if options is None else options & newoptions
if 'default' in opt:
newdefault = opt['default']
if newdefault:
if default is None and not nodefault:
default = newdefault
elif newdefault != default:
nodefault = True
default = None
# Check for incompatibilities
if options != set():
params[k] = {
'default': default,
'aliases': list(aliases),
'required': required,
'options': list(options)
}
return params return params
def parse_analyses(params, plugins):
'''
Parse the given parameters individually for each plugin, and get a list of the parameters that
belong to each of the plugins. Each item can then be used in the plugin.analyse_entries method.
'''
analysis_list = []
for i, plugin in enumerate(plugins):
if not plugin:
continue
this_params = filter_params(params, plugin, i)
parsed = parse_params(this_params, plugin.extra_params)
analysis = plugin.activity(parsed)
analysis_list.append(analysis)
return analysis_list
def filter_params(params, plugin, ith=-1):
'''
Get the values within params that apply to a plugin.
More specific names override more general names, in this order:
<index_order>.parameter > <plugin.name>.parameter > parameter
Example:
>>> filter_params({'0.hello': True, 'hello': False}, Plugin(), 0)
{ '0.hello': True, 'hello': True}
'''
thisparams = {}
if ith >= 0:
ith = '{}.'.format(ith)
else:
ith = ""
for k, v in params.items():
if ith and k.startswith(str(ith)):
thisparams[k[len(ith):]] = v
elif k.startswith(plugin.name):
thisparams[k[len(plugin.name) + 1:]] = v
elif k not in thisparams:
thisparams[k] = v
return thisparams
def parse_call(params): def parse_call(params):
''' '''Return a results object based on the parameters used in a call/request.
Return a results object based on the parameters used in a call/request.
''' '''
params = parse_params(params, NIF_PARAMS) params = parse_params(params, NIF_PARAMS)
if params['informat'] == 'text': if params['informat'] == 'text':
results = Results() results = Results()
entry = Entry(text=params['input'], id='prefix:') # Use @base entry = Entry(nif__isString=params['input'],
id='#') # Use @base
results.entries.append(entry) results.entries.append(entry)
elif params['informat'] == 'json-ld': elif params['informat'] == 'json-ld':
results = from_string(params['input'], cls=Results) results = from_string(params['input'], cls=Results)
else: # pragma: no cover else: # pragma: no cover
raise NotImplementedError('Informat {} is not implemented'.format( raise NotImplementedError('Informat {} is not implemented'.format(params['informat']))
params['informat']))
results.parameters = params results.parameters = params
return results return results

View File

@@ -1,7 +1,7 @@
#!/usr/bin/python #!/usr/bin/python
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
# # Copyright 2014 J. Fernando Sánchez Rada - Grupo de Sistemas Inteligentes
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM # DIT, UPM
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
@@ -14,20 +14,16 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
#
""" """
Blueprints for Senpy Blueprints for Senpy
""" """
from flask import (Blueprint, request, current_app, render_template, url_for, from flask import (Blueprint, request, current_app, render_template, url_for,
jsonify, redirect) jsonify, redirect)
from .models import BASE_CONTEXT, Help, Plugins, Datasets from .models import Error, Response, Help, Plugins, read_schema, dump_schema, Datasets
from .errors import Error
from . import api from . import api
from .version import __version__ from .version import __version__
from functools import wraps from functools import wraps
from .gsitk_compat import GSITK_AVAILABLE, datasets
import logging import logging
import json import json
import base64 import base64
@@ -67,44 +63,44 @@ def get_params(req):
return indict return indict
def encode_url(url=None): def encoded_url(url=None, base=None):
code = '' code = ''
if not url: if not url:
url = request.parameters.get('prefix', request.full_path[1:] + '#') if request.method == 'GET':
return code or base64.urlsafe_b64encode(url.encode()).decode() url = request.full_path[1:] # Remove the first slash
else:
hash(frozenset(request.form.params().items()))
code = 'hash:{}'.format(hash)
code = code or base64.urlsafe_b64encode(url.encode()).decode()
def url_for_code(code, base=None): if base:
# if base: return base + code
# return base + code return url_for('api.decode', code=code, _external=True)
# return url_for('api.decode', code=code, _external=True)
# This was producing unique yet very long URIs, which wasn't ideal for visualization.
return 'http://senpy.invalid/'
def decoded_url(code, base=None): def decoded_url(code, base=None):
path = base64.urlsafe_b64decode(code.encode()).decode() if code.startswith('hash:'):
if path[:4] == 'http': raise Exception('Can not decode a URL for a POST request')
return path
base = base or request.url_root base = base or request.url_root
path = base64.urlsafe_b64decode(code.encode()).decode()
return base + path return base + path
@demo_blueprint.route('/') @demo_blueprint.route('/')
def index(): def index():
# ev = str(get_params(request).get('evaluation', True)) ev = str(get_params(request).get('evaluation', False))
# evaluation_enabled = ev.lower() not in ['false', 'no', 'none'] evaluation_enabled = ev.lower() not in ['false', 'no', 'none']
evaluation_enabled = GSITK_AVAILABLE
return render_template("index.html", return render_template("index.html",
evaluation=evaluation_enabled, evaluation=evaluation_enabled,
version=__version__) version=__version__)
@api_blueprint.route('/contexts/<code>') @api_blueprint.route('/contexts/<entity>.jsonld')
def context(code=''): def context(entity="context"):
context = BASE_CONTEXT context = Response._context
context['@base'] = url_for('api.decode', code=code, _external=True) context['@vocab'] = url_for('ns.index', _external=True)
context['endpoint'] = url_for('api.api_root', _external=True) context['endpoint'] = url_for('api.api_root', _external=True)
return jsonify({"@context": context}) return jsonify({"@context": context})
@@ -114,115 +110,46 @@ def decode(code):
try: try:
return redirect(decoded_url(code)) return redirect(decoded_url(code))
except Exception: except Exception:
return to_flask(Error('invalid URL')) return Error('invalid URL').flask()
@ns_blueprint.route('/') # noqa: F811 @ns_blueprint.route('/') # noqa: F811
def index(): def index():
context = BASE_CONTEXT.copy() context = Response._context.copy()
context['endpoint'] = url_for('api.api_root', _external=True) context['endpoint'] = url_for('api.api_root', _external=True)
return jsonify({"@context": context}) return jsonify({"@context": context})
# from .models import read_schema, dump_schema @api_blueprint.route('/schemas/<schema>')
#@api_blueprint.route('/schemas/<schema>') def schema(schema="definitions"):
#def schema(schema="definitions"): try:
# try: return dump_schema(read_schema(schema))
# return dump_schema(read_schema(schema)) except Exception as ex: # Should be FileNotFoundError, but it's missing from py2
# except Exception as ex: # Should be FileNotFoundError, but it's missing from py2 return Error(message="Schema not found: {}".format(ex), status=404).flask()
# return Error(message="Schema not found: {}".format(ex), status=404).flask()
def to_flask(resp,
in_headers=False,
headers=None,
outformat='json-ld',
**kwargs):
"""
Return the values and error to be used in flask.
So far, it returns a fixed context. We should store/generate different
contexts if the plugin adds more aliases.
"""
headers = headers or {}
if isinstance(resp, Error):
status = resp.code
resp = resp.to_response()
else:
status = 200
kwargs["with_context"] = not in_headers
content, mimetype = resp.serialize(format=outformat,
with_mime=True,
**kwargs)
if outformat == 'json-ld' and in_headers:
headers.update({
"Link":
('<%s>;'
'rel="http://www.w3.org/ns/json-ld#context";'
' type="application/ld+json"' % kwargs.get('context_uri'))
})
return FlaskResponse(
response=content,
status=status,
headers=headers,
mimetype=mimetype)
def basic_api(f): def basic_api(f):
default_params = { default_params = {
'in-headers': False, 'inHeaders': False,
'expanded-jsonld': False, 'expanded-jsonld': False,
'outformat': None, 'outformat': None,
'with-parameters': True, 'with_parameters': True,
} }
@wraps(f) @wraps(f)
def decorated_function(*args, **kwargs): def decorated_function(*args, **kwargs):
raw_params = get_params(request) raw_params = get_params(request)
# logger.info('Getting request: {}'.format(raw_params)) logger.info('Getting request: {}'.format(raw_params))
logger.debug('Getting request. Params: {}'.format(raw_params))
headers = {'X-ORIGINAL-PARAMS': json.dumps(raw_params)} headers = {'X-ORIGINAL-PARAMS': json.dumps(raw_params)}
params = default_params params = default_params
mime = request.accept_mimetypes\
.best_match(MIMETYPES.keys(),
DEFAULT_MIMETYPE)
mimeformat = MIMETYPES.get(mime, DEFAULT_FORMAT)
outformat = mimeformat
try: try:
params = api.parse_params(raw_params, api.WEB_PARAMS, api.API_PARAMS) params = api.parse_params(raw_params, api.WEB_PARAMS, api.API_PARAMS)
outformat = params.get('outformat', mimeformat)
if hasattr(request, 'parameters'): if hasattr(request, 'parameters'):
request.parameters.update(params) request.parameters.update(params)
else: else:
request.parameters = params request.parameters = params
response = f(*args, **kwargs) response = f(*args, **kwargs)
if 'parameters' in response and not params['with-parameters']:
del response.parameters
logger.debug('Response: {}'.format(response))
prefix = params.get('prefix')
code = encode_url(prefix)
return to_flask(response,
in_headers=params['in-headers'],
headers=headers,
prefix=prefix or url_for_code(code),
base=prefix,
context_uri=url_for('api.context',
code=code,
_external=True),
outformat=outformat,
expanded=params['expanded-jsonld'],
template=params.get('template'),
verbose=params['verbose'],
aliases=params['aliases'],
fields=params.get('fields'))
except (Exception) as ex: except (Exception) as ex:
if current_app.debug or current_app.config['TESTING']: if current_app.debug or current_app.config['TESTING']:
raise raise
@@ -232,51 +159,48 @@ def basic_api(f):
response = ex response = ex
response.parameters = raw_params response.parameters = raw_params
logger.exception(ex) logger.exception(ex)
return to_flask(response,
if 'parameters' in response and not params['with_parameters']:
del response.parameters
logger.info('Response: {}'.format(response))
mime = request.accept_mimetypes\
.best_match(MIMETYPES.keys(),
DEFAULT_MIMETYPE)
mimeformat = MIMETYPES.get(mime, DEFAULT_FORMAT)
outformat = params['outformat'] or mimeformat
return response.flask(
in_headers=params['inHeaders'],
headers=headers,
prefix=params.get('prefix', encoded_url()),
context_uri=url_for('api.context',
entity=type(response).__name__,
_external=True),
outformat=outformat, outformat=outformat,
expanded=params['expanded-jsonld'], expanded=params['expanded-jsonld'])
verbose=params.get('verbose', True))
return decorated_function return decorated_function
@api_blueprint.route('/', defaults={'plugins': None}, methods=['POST', 'GET'], strict_slashes=False) @api_blueprint.route('/', defaults={'plugin': None}, methods=['POST', 'GET'])
@api_blueprint.route('/<path:plugins>', methods=['POST', 'GET'], strict_slashes=False) @api_blueprint.route('/<path:plugin>', methods=['POST', 'GET'])
@basic_api @basic_api
def api_root(plugins): def api_root(plugin):
if plugins:
if request.parameters['algorithm'] != api.API_PARAMS['algorithm']['default']:
raise Error('You cannot specify the algorithm with a parameter and a URL variable.'
' Please, remove one of them')
plugins = plugins.replace('+', ',').replace('/', ',')
plugins = api.processors['string_to_tuple'](plugins)
else:
plugins = request.parameters['algorithm']
print(plugins)
sp = current_app.senpy
plugins = sp.get_plugins(plugins)
if request.parameters['help']: if request.parameters['help']:
apis = [api.WEB_PARAMS, api.API_PARAMS, api.NIF_PARAMS] dic = dict(api.API_PARAMS, **api.NIF_PARAMS)
# Verbose is set to False as default, but we want it to default to response = Help(valid_parameters=dic)
# True for help. This checks the original value, to make sure it wasn't
# set by default.
if not request.parameters['verbose'] and get_params(request).get('verbose'):
apis = []
if request.parameters['algorithm'] == ['default', ]:
plugins = []
allparameters = api.get_all_params(plugins, *apis)
response = Help(valid_parameters=allparameters)
return response return response
req = api.parse_call(request.parameters) req = api.parse_call(request.parameters)
analyses = api.parse_analyses(req.parameters, plugins) if plugin:
results = current_app.senpy.analyse(req, analyses) plugin = plugin.replace('+', '/')
return results plugin = plugin.split('/')
req.parameters['algorithm'] = plugin
return current_app.senpy.analyse(req)
@api_blueprint.route('/evaluate', methods=['POST', 'GET'], strict_slashes=False) @api_blueprint.route('/evaluate/', methods=['POST', 'GET'])
@basic_api @basic_api
def evaluate(): def evaluate():
if request.parameters['help']: if request.parameters['help']:
@@ -289,26 +213,28 @@ def evaluate():
return response return response
@api_blueprint.route('/plugins', methods=['POST', 'GET'], strict_slashes=False) @api_blueprint.route('/plugins/', methods=['POST', 'GET'])
@basic_api @basic_api
def plugins(): def plugins():
sp = current_app.senpy sp = current_app.senpy
params = api.parse_params(request.parameters, api.PLUGINS_PARAMS) params = api.parse_params(request.parameters, api.PLUGINS_PARAMS)
ptype = params.get('plugin-type') ptype = params.get('plugin_type')
plugins = list(sp.analysis_plugins(plugin_type=ptype)) plugins = list(sp.plugins(plugin_type=ptype))
dic = Plugins(plugins=plugins) dic = Plugins(plugins=plugins)
return dic return dic
@api_blueprint.route('/plugins/<plugin>', methods=['POST', 'GET'], strict_slashes=False) @api_blueprint.route('/plugins/<plugin>/', methods=['POST', 'GET'])
@basic_api @basic_api
def plugin(plugin): def plugin(plugin):
sp = current_app.senpy sp = current_app.senpy
return sp.get_plugin(plugin) return sp.get_plugin(plugin)
@api_blueprint.route('/datasets', methods=['POST', 'GET'], strict_slashes=False) @api_blueprint.route('/datasets/', methods=['POST', 'GET'])
@basic_api @basic_api
def get_datasets(): def datasets():
sp = current_app.senpy
datasets = sp.datasets
dic = Datasets(datasets=list(datasets.values())) dic = Datasets(datasets=list(datasets.values()))
return dic return dic

View File

@@ -1,22 +1,5 @@
#
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys import sys
from .errors import Error from .models import Error
from .extensions import Senpy from .extensions import Senpy
from . import api from . import api
@@ -44,11 +27,16 @@ def main_function(argv):
api.CLI_PARAMS, api.CLI_PARAMS,
api.API_PARAMS, api.API_PARAMS,
api.NIF_PARAMS) api.NIF_PARAMS)
plugin_folder = params['plugin-folder'] plugin_folder = params['plugin_folder']
default_plugins = not params.get('no-default-plugins', False) default_plugins = params.get('default-plugins', False)
sp = Senpy(default_plugins=default_plugins, plugin_folder=plugin_folder) sp = Senpy(default_plugins=default_plugins, plugin_folder=plugin_folder)
request = api.parse_call(params) request = api.parse_call(params)
algos = sp.get_plugins(request.parameters.get('algorithm', None)) algos = request.parameters.get('algorithm', None)
if algos:
for algo in algos:
sp.activate_plugin(algo)
else:
sp.activate_all()
res = sp.analyse(request) res = sp.analyse(request)
return res return res
@@ -60,7 +48,7 @@ def main():
res = main_function(sys.argv[1:]) res = main_function(sys.argv[1:])
print(res.serialize()) print(res.serialize())
except Error as err: except Error as err:
print(err.serialize(), file=sys.stderr) print(err.serialize())
sys.exit(2) sys.exit(2)

View File

@@ -1,19 +1,3 @@
#
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import requests import requests
import logging import logging
from . import models from . import models
@@ -41,11 +25,7 @@ class Client(object):
def request(self, path=None, method='GET', **params): def request(self, path=None, method='GET', **params):
url = '{}{}'.format(self.endpoint.rstrip('/'), path) url = '{}{}'.format(self.endpoint.rstrip('/'), path)
if method == 'POST':
response = requests.post(url=url, data=params)
else:
response = requests.request(method=method, url=url, params=params) response = requests.request(method=method, url=url, params=params)
try: try:
resp = models.from_dict(response.json()) resp = models.from_dict(response.json())
except Exception as ex: except Exception as ex:

View File

@@ -1,7 +0,0 @@
import os
strict = os.environ.get('SENPY_STRICT', '').lower() not in ["", "false", "f"]
data_folder = os.environ.get('SENPY_DATA', None)
if data_folder:
data_folder = os.path.abspath(data_folder)
testing = os.environ.get('SENPY_TESTING', "") != ""

View File

@@ -1,23 +0,0 @@
from .models import ErrorResponse
class Error(Exception):
def __init__(self, message='Generic senpy exception', errors=[]):
Exception.__init__(self, message)
self.message = message
self.errors = errors
def toResponse(self) -> ErrorResponse:
return ErrorResponse(self.message)
def __str__(self):
if not hasattr(self, 'errors'):
return self.message
return '{}:\n\t{}'.format(self.message, self.errors)
def __hash__(self):
return Exception.__hash__(self)
class InvalidParams(Error):
def __init__(self, wrong_params):
super().__init__(message='Wrong parameters:\n\t{}'.format(wrong_params))

View File

@@ -1,30 +1,13 @@
#
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" """
Main class for Senpy. Main class for Senpy.
It orchestrates plugin discovery, creation and analysis. It orchestrates plugin (de)activation and analysis.
""" """
from future import standard_library from future import standard_library
standard_library.install_aliases() standard_library.install_aliases()
from . import config
from . import plugins, api from . import plugins, api
from .models import AggregatedEvaluation from .plugins import Plugin, evaluate
from .errors import Error from .models import Error, AggregatedEvaluation
from .plugins import AnalysisPlugin
from .blueprints import api_blueprint, demo_blueprint, ns_blueprint from .blueprints import api_blueprint, demo_blueprint, ns_blueprint
from threading import Thread from threading import Thread
@@ -34,6 +17,7 @@ import copy
import errno import errno
import logging import logging
from . import gsitk_compat from . import gsitk_compat
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -41,16 +25,12 @@ logger = logging.getLogger(__name__)
class Senpy(object): class Senpy(object):
""" Default Senpy extension for Flask """ """ Default Senpy extension for Flask """
def __init__(self, def __init__(self,
app=None, app=None,
plugin_folders=[".", ], plugin_folder=".",
data_folder=None, data_folder=None,
install=False,
strict=None,
default_plugins=False): default_plugins=False):
default_data = os.path.join(os.getcwd(), 'senpy_data') default_data = os.path.join(os.getcwd(), 'senpy_data')
self.data_folder = data_folder or os.environ.get('SENPY_DATA', default_data) self.data_folder = data_folder or os.environ.get('SENPY_DATA', default_data)
try: try:
@@ -62,26 +42,19 @@ class Senpy(object):
raise raise
self._default = None self._default = None
self.strict = strict if strict is not None else config.strict
self.strict = True # TODO: remove this after tests pass
self.install = install
self._plugins = {} self._plugins = {}
self.plugin_folders = plugin_folders if plugin_folder:
for folder in plugin_folders: self.add_folder(plugin_folder)
self._add_folder(folder)
if default_plugins: if default_plugins:
self._add_folder('plugins', from_root=True) self.add_folder('plugins', from_root=True)
else: else:
# Add only conversion plugins # Add only conversion plugins
self._add_folder(os.path.join('plugins', 'postprocessing'), self.add_folder(os.path.join('plugins', 'conversion'),
from_root=True) from_root=True)
self.app = app self.app = app
if app is not None: if app is not None:
self.init_app(app) self.init_app(app)
self._conversion_candidates = {}
if self.install:
self.install_deps()
def init_app(self, app): def init_app(self, app):
""" Initialise a flask app to add plugins to its context """ """ Initialise a flask app to add plugins to its context """
@@ -101,111 +74,169 @@ class Senpy(object):
app.register_blueprint(demo_blueprint, url_prefix="/") app.register_blueprint(demo_blueprint, url_prefix="/")
def add_plugin(self, plugin): def add_plugin(self, plugin):
name = plugin.name.lower() self._plugins[plugin.name.lower()] = plugin
assert name
self._plugins[name] = plugin
self._conversion_candidates = {}
def delete_plugin(self, plugin): def delete_plugin(self, plugin):
del self._plugins[plugin.name.lower()] del self._plugins[plugin.name.lower()]
def plugins(self, plugin_type=None, **kwargs): def plugins(self, **kwargs):
""" Return the plugins registered for a given application. Filtered by criteria """ """ Return the plugins registered for a given application. Filtered by criteria """
return sorted(plugins.pfilter(self._plugins, return list(plugins.pfilter(self._plugins, **kwargs))
plugin_type=plugin_type,
**kwargs),
key=lambda x: x.id)
def get_plugin(self, name, default=None): def get_plugin(self, name, default=None):
if name == 'default': if name == 'default':
return self.default_plugin return self.default_plugin
elif name == 'conversion': plugin = name.lower()
return None if plugin in self._plugins:
return self._plugins[plugin]
if name.lower() in self._plugins: results = self.plugins(id='endpoint:plugins/{}'.format(name))
return self._plugins[name.lower()]
results = self.plugins(id='endpoint:plugins/{}'.format(name.lower()), if not results:
plugin_type=None) return Error(message="Plugin not found", status=404)
if results:
return results[0] return results[0]
results = self.plugins(id=name, @property
plugin_type=None) def analysis_plugins(self):
if results: """ Return only the analysis plugins """
return results[0] return self.plugins(plugin_type='analysisPlugin')
msg = ("Plugin not found: '{}'\n" def add_folder(self, folder, from_root=False):
"Make sure it is ACTIVATED\n"
"Valid algorithms: {}").format(name,
self._plugins.keys())
raise Error(message=msg, status=404)
def get_plugins(self, name):
try:
name = name.split(',')
except AttributeError:
pass # Assume it is a tuple or a list
return tuple(self.get_plugin(n) for n in name)
def analysis_plugins(self, **kwargs):
""" Return only the analysis plugins that are active"""
candidates = self.plugins(**kwargs)
return list(plugins.pfilter(candidates, plugin_type=AnalysisPlugin))
def _add_folder(self, folder, from_root=False):
""" Find plugins in this folder and add them to this instance """ """ Find plugins in this folder and add them to this instance """
if from_root: if from_root:
folder = os.path.join(os.path.dirname(__file__), folder) folder = os.path.join(os.path.dirname(__file__), folder)
logger.debug("Adding folder: %s", folder) logger.debug("Adding folder: %s", folder)
if os.path.isdir(folder): if os.path.isdir(folder):
new_plugins = plugins.from_folder([folder], new_plugins = plugins.from_folder([folder],
data_folder=self.data_folder, data_folder=self.data_folder)
strict=self.strict)
for plugin in new_plugins: for plugin in new_plugins:
self.add_plugin(plugin) self.add_plugin(plugin)
else: else:
raise AttributeError("Not a folder or does not exist: %s", folder) raise AttributeError("Not a folder or does not exist: %s", folder)
def _process(self, req, pending): def _get_plugins(self, request):
if not self.analysis_plugins:
raise Error(
status=404,
message=("No plugins found."
" Please install one."))
algos = request.parameters.get('algorithm', None)
if not algos:
if self.default_plugin:
algos = [self.default_plugin.name, ]
else:
raise Error(
status=404,
message="No default plugin found, and None provided")
plugins = list()
for algo in algos:
algo = algo.lower()
if algo not in self._plugins:
msg = ("The algorithm '{}' is not valid\n"
"Valid algorithms: {}").format(algo,
self._plugins.keys())
logger.debug(msg)
raise Error(
status=404,
message=msg)
plugins.append(self._plugins[algo])
return plugins
def _process_entries(self, entries, req, plugins):
""" """
Recursively process the entries with the first plugin in the list, and pass the results Recursively process the entries with the first plugin in the list, and pass the results
to the rest of the plugins. to the rest of the plugins.
""" """
if not pending: if not plugins:
return req for i in entries:
yield i
analysis = pending[0] return
results = analysis.run(req) plugin = plugins[0]
results.activities.append(analysis) specific_params = api.parse_extra_params(req, plugin)
return self._process(results, pending[1:]) req.analysis.append({'plugin': plugin,
'parameters': specific_params})
results = plugin.analyse_entries(entries, specific_params)
for i in self._process_entries(results, req, plugins[1:]):
yield i
def install_deps(self): def install_deps(self):
logger.info('Installing dependencies') plugins.install_deps(*self.plugins())
plugins.install_deps(*self._plugins.values())
def analyse(self, request, analyses=None): def analyse(self, request):
""" """
Main method that analyses a request, either from CLI or HTTP. Main method that analyses a request, either from CLI or HTTP.
It takes a processed request, provided by the user, as returned It takes a processed request, provided by the user, as returned
by api.parse_call(). by api.parse_call().
""" """
if not self.plugins():
raise Error(
status=404,
message=("No plugins found."
" Please install one."))
if analyses is None:
plugins = self.get_plugins(request.parameters['algorithm'])
analyses = api.parse_analyses(request.parameters, plugins)
logger.debug("analysing request: {}".format(request)) logger.debug("analysing request: {}".format(request))
results = self._process(request, analyses) entries = request.entries
logger.debug("Got analysis result: {}".format(results)) request.entries = []
results = self.postprocess(results, analyses) plugins = self._get_plugins(request)
logger.debug("Returning post-processed result: {}".format(results)) results = request
for i in self._process_entries(entries, results, plugins):
results.entries.append(i)
self.convert_emotions(results)
logger.debug("Returning analysis result: {}".format(results))
results.analysis = [i['plugin'].id for i in results.analysis]
return results return results
def convert_emotions(self, resp, analyses): def _get_datasets(self, request):
if not self.datasets:
raise Error(
status=404,
message=("No datasets found."
" Please verify DatasetManager"))
datasets_name = request.parameters.get('dataset', None).split(',')
for dataset in datasets_name:
if dataset not in self.datasets:
logger.debug(("The dataset '{}' is not valid\n"
"Valid datasets: {}").format(dataset,
self.datasets.keys()))
raise Error(
status=404,
message="The dataset '{}' is not valid".format(dataset))
dm = gsitk_compat.DatasetManager()
datasets = dm.prepare_datasets(datasets_name)
return datasets
@property
def datasets(self):
self._dataset_list = {}
dm = gsitk_compat.DatasetManager()
for item in dm.get_datasets():
for key in item:
if key in self._dataset_list:
continue
properties = item[key]
properties['@id'] = key
self._dataset_list[key] = properties
return self._dataset_list
def evaluate(self, params):
logger.debug("evaluating request: {}".format(params))
results = AggregatedEvaluation()
results.parameters = params
datasets = self._get_datasets(results)
plugins = self._get_plugins(results)
for eval in evaluate(plugins, datasets):
results.evaluations.append(eval)
if 'with_parameters' not in results.parameters:
del results.parameters
logger.debug("Returning evaluation result: {}".format(results))
return results
def _conversion_candidates(self, fromModel, toModel):
candidates = self.plugins(plugin_type='emotionConversionPlugin')
for candidate in candidates:
for pair in candidate.onyx__doesConversion:
logging.debug(pair)
if pair['onyx:conversionFrom'] == fromModel \
and pair['onyx:conversionTo'] == toModel:
yield candidate
def convert_emotions(self, resp):
""" """
Conversion of all emotions in a response **in place**. Conversion of all emotions in a response **in place**.
In addition to converting from one model to another, it has In addition to converting from one model to another, it has
@@ -213,126 +244,52 @@ class Senpy(object):
Needless to say, this is far from an elegant solution, but it works. Needless to say, this is far from an elegant solution, but it works.
@todo refactor and clean up @todo refactor and clean up
""" """
plugins = [i['plugin'] for i in resp.analysis]
logger.debug("Converting emotions") params = resp.parameters
if 'parameters' not in resp: toModel = params.get('emotionModel', None)
logger.debug("NO PARAMETERS")
return resp
params = resp['parameters']
toModel = params.get('emotion-model', None)
if not toModel: if not toModel:
logger.debug("NO tomodel PARAMETER") return
return resp
logger.debug('Asked for model: {}'.format(toModel)) logger.debug('Asked for model: {}'.format(toModel))
output = params.get('conversion', None) output = params.get('conversion', None)
candidates = {}
newentries = [] for plugin in plugins:
done = [] try:
for i in resp.entries: fromModel = plugin.get('onyx:usesEmotionModel', None)
candidates[plugin.id] = next(self._conversion_candidates(fromModel, toModel))
if output == "full": logger.debug('Analysis plugin {} uses model: {}'.format(plugin.id, fromModel))
newemotions = copy.deepcopy(i.emotions) except StopIteration:
else:
newemotions = []
for j in i.emotions:
activity = j['prov:wasGeneratedBy']
act = resp.activity(activity)
if not act:
raise Error('Could not find the emotion model for {}'.format(activity))
fromModel = act.plugin['onyx:usesEmotionModel']
if toModel == fromModel:
continue
candidate = self._conversion_candidate(fromModel, toModel)
if not candidate:
e = Error(('No conversion plugin found for: ' e = Error(('No conversion plugin found for: '
'{} -> {}'.format(fromModel, toModel)), '{} -> {}'.format(fromModel, toModel)),
status=404) status=404)
e.original_response = resp e.original_response = resp
e.parameters = params e.parameters = params
raise e raise e
newentries = []
analysis = candidate.activity(params) for i in resp.entries:
done.append(analysis) if output == "full":
newemotions = copy.deepcopy(i.emotions)
else:
newemotions = []
for j in i.emotions:
plugname = j['prov:wasGeneratedBy']
candidate = candidates[plugname]
resp.analysis.append({'plugin': candidate,
'parameters': params})
for k in candidate.convert(j, fromModel, toModel, params): for k in candidate.convert(j, fromModel, toModel, params):
k.prov__wasGeneratedBy = analysis.id k.prov__wasGeneratedBy = candidate.id
if output == 'nested': if output == 'nested':
k.prov__wasDerivedFrom = j k.prov__wasDerivedFrom = j
newemotions.append(k) newemotions.append(k)
i.emotions = newemotions i.emotions = newemotions
newentries.append(i) newentries.append(i)
resp.entries = newentries resp.entries = newentries
resp.activities.extend(done)
return resp
def _conversion_candidate(self, fromModel, toModel):
if not self._conversion_candidates:
candidates = {}
for conv in self.plugins(plugin_type=plugins.EmotionConversion):
for pair in conv.onyx__doesConversion:
logging.debug(pair)
key = (pair['onyx:conversionFrom'], pair['onyx:conversionTo'])
if key not in candidates:
candidates[key] = []
candidates[key].append(conv)
self._conversion_candidates = candidates
key = (fromModel, toModel)
if key not in self._conversion_candidates:
return None
return self._conversion_candidates[key][0]
def postprocess(self, response, analyses):
'''
Transform the results from the analysis plugins.
It has some pre-defined post-processing like emotion conversion,
and it also allows plugins to auto-select themselves.
'''
response = self.convert_emotions(response, analyses)
for plug in self.plugins(plugin_type=plugins.PostProcessing):
if plug.check(response, response.activities):
activity = plug.activity(response.parameters)
response = plug.process(response, activity)
return response
def _get_datasets(self, request):
datasets_name = request.parameters.get('dataset', None).split(',')
for dataset in datasets_name:
if dataset not in gsitk_compat.datasets:
logger.debug(("The dataset '{}' is not valid\n"
"Valid datasets: {}").format(
dataset, gsitk_compat.datasets.keys()))
raise Error(
status=404,
message="The dataset '{}' is not valid".format(dataset))
return datasets_name
def evaluate(self, params):
logger.debug("evaluating request: {}".format(params))
results = AggregatedEvaluation()
results.parameters = params
datasets = self._get_datasets(results)
plugs = []
for plugname in params['algorithm']:
plugs = self.get_plugins(plugname)
for plug in plugs:
if not isinstance(plug, plugins.Evaluable):
raise Exception('Plugin {} can not be evaluated', plug.id)
for eval in plugins.evaluate(plugs, datasets):
results.evaluations.append(eval)
if 'with-parameters' not in results.parameters:
del results.parameters
logger.debug("Returning evaluation result: {}".format(results))
return results
@property @property
def default_plugin(self): def default_plugin(self):
if not self._default: if not self._default or not self._default.is_activated:
candidates = self.analysis_plugins() candidates = self.plugins(plugin_type='analysisPlugin',
is_activated=True)
if len(candidates) > 0: if len(candidates) > 0:
self._default = candidates[0] self._default = candidates[0]
else: else:
@@ -342,11 +299,87 @@ class Senpy(object):
@default_plugin.setter @default_plugin.setter
def default_plugin(self, value): def default_plugin(self, value):
if isinstance(value, plugins.Plugin): if isinstance(value, Plugin):
if not value.is_activated:
raise AttributeError('The default plugin has to be activated.')
self._default = value self._default = value
else: else:
self._default = self._plugins[value.lower()] self._default = self._plugins[value.lower()]
def activate_all(self, sync=True, allow_fail=False):
ps = []
for plug in self._plugins.keys():
try:
self.activate_plugin(plug, sync=sync)
except Exception as ex:
if not allow_fail:
raise
logger.error('Could not activate {}: {}'.format(plug, ex))
return ps
def deactivate_all(self, sync=True):
ps = []
for plug in self._plugins.keys():
ps.append(self.deactivate_plugin(plug, sync=sync))
return ps
def _set_active(self, plugin, active=True, *args, **kwargs):
''' We're using a variable in the plugin itself to activate/deactivate plugins.\
Note that plugins may activate themselves by setting this variable.
'''
plugin.is_activated = active
def _activate(self, plugin):
success = False
with plugin._lock:
if plugin.is_activated:
return
plugin.activate()
msg = "Plugin activated: {}".format(plugin.name)
logger.info(msg)
success = True
self._set_active(plugin, success)
return success
def activate_plugin(self, plugin_name, sync=True):
plugin_name = plugin_name.lower()
if plugin_name not in self._plugins:
raise Error(
message="Plugin not found: {}".format(plugin_name), status=404)
plugin = self._plugins[plugin_name]
logger.info("Activating plugin: {}".format(plugin.name))
if sync or not getattr(plugin, 'async', True):
return self._activate(plugin)
else:
th = Thread(target=partial(self._activate, plugin))
th.start()
return th
def _deactivate(self, plugin):
with plugin._lock:
if not plugin.is_activated:
return
plugin.deactivate()
logger.info("Plugin deactivated: {}".format(plugin.name))
def deactivate_plugin(self, plugin_name, sync=True):
plugin_name = plugin_name.lower()
if plugin_name not in self._plugins:
raise Error(
message="Plugin not found: {}".format(plugin_name), status=404)
plugin = self._plugins[plugin_name]
self._set_active(plugin, False)
if sync or not getattr(plugin, 'async', True):
self._deactivate(plugin)
else:
th = Thread(target=partial(self._deactivate, plugin))
th.start()
return th
def teardown(self, exception): def teardown(self, exception):
pass pass

View File

@@ -1,23 +1,4 @@
#
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging import logging
import os
from pkg_resources import parse_version, get_distribution, DistributionNotFound
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -31,37 +12,12 @@ def raise_exception(*args, **kwargs):
try: try:
gsitk_distro = get_distribution("gsitk")
GSITK_VERSION = parse_version(gsitk_distro.version)
if not os.environ.get('DATA_PATH'):
os.environ['DATA_PATH'] = os.environ.get('SENPY_DATA', 'senpy_data')
from gsitk.datasets.datasets import DatasetManager from gsitk.datasets.datasets import DatasetManager
from gsitk.evaluation.evaluation import Evaluation as Eval # noqa: F401 from gsitk.evaluation.evaluation import Evaluation as Eval
from gsitk.evaluation.evaluation import EvalPipeline # noqa: F401
from sklearn.pipeline import Pipeline from sklearn.pipeline import Pipeline
modules = locals()
GSITK_AVAILABLE = True GSITK_AVAILABLE = True
datasets = {} modules = locals()
manager = DatasetManager() except ImportError:
logger.warn(IMPORTMSG)
for item in manager.get_datasets():
for key in item:
if key in datasets:
continue
properties = item[key]
properties['@id'] = key
datasets[key] = properties
def prepare(ds, *args, **kwargs):
return manager.prepare_datasets(ds, *args, **kwargs)
except (DistributionNotFound, ImportError) as err:
logger.debug('Error importing GSITK: {}'.format(err))
logger.warning(IMPORTMSG)
GSITK_AVAILABLE = False GSITK_AVAILABLE = False
GSITK_VERSION = () DatasetManager = Eval = Pipeline = raise_exception
DatasetManager = Eval = Pipeline = prepare = raise_exception
datasets = {}

257
senpy/meta.py Normal file
View File

@@ -0,0 +1,257 @@
'''
Meta-programming for the models.
'''
import os
import json
import jsonschema
import inspect
import copy
from abc import ABCMeta
from collections import MutableMapping, namedtuple
class BaseMeta(ABCMeta):
'''
Metaclass for models. It extracts the default values for the fields in
the model.
For instance, instances of the following class wouldn't need to mark
their version or description on initialization:
.. code-block:: python
class MyPlugin(Plugin):
version=0.3
description='A dull plugin'
Note that these operations could be included in the __init__ of the
class, but it would be very inefficient.
'''
_subtypes = {}
def __new__(mcs, name, bases, attrs, **kwargs):
register_afterwards = False
defaults = {}
attrs = mcs.expand_with_schema(name, attrs)
if 'schema' in attrs:
register_afterwards = True
for base in bases:
if hasattr(base, '_defaults'):
defaults.update(getattr(base, '_defaults'))
info, rest = mcs.split_attrs(attrs)
for i in list(info.keys()):
if isinstance(info[i], _Alias):
fget, fset, fdel = make_property(info[i].indict)
rest[i] = property(fget=fget, fset=fset, fdel=fdel)
else:
defaults[i] = info[i]
rest['_defaults'] = defaults
cls = super(BaseMeta, mcs).__new__(mcs, name, tuple(bases), rest)
if register_afterwards:
mcs.register(cls, defaults['@type'])
return cls
@classmethod
def register(mcs, rsubclass, rtype=None):
mcs._subtypes[rtype or rsubclass.__name__] = rsubclass
@staticmethod
def expand_with_schema(name, attrs):
if 'schema' in attrs: # Schema specified by name
schema_file = '{}.json'.format(attrs['schema'])
elif 'schema_file' in attrs:
schema_file = attrs['schema_file']
del attrs['schema_file']
else:
return attrs
if '/' not in 'schema_file':
thisdir = os.path.dirname(os.path.realpath(__file__))
schema_file = os.path.join(thisdir,
'schemas',
schema_file)
schema_path = 'file://' + schema_file
with open(schema_file) as f:
schema = json.load(f)
resolver = jsonschema.RefResolver(schema_path, schema)
attrs['@type'] = "".join((name[0].lower(), name[1:]))
attrs['_schema_file'] = schema_file
attrs['schema'] = schema
attrs['_validator'] = jsonschema.Draft4Validator(schema, resolver=resolver)
schema_defaults = BaseMeta.get_defaults(attrs['schema'])
attrs.update(schema_defaults)
return attrs
@staticmethod
def is_func(v):
return inspect.isroutine(v) or inspect.ismethod(v) or \
inspect.ismodule(v) or isinstance(v, property)
@staticmethod
def is_internal(k):
return k[0] == '_' or k == 'schema' or k == 'data'
@staticmethod
def get_key(key):
if key[0] != '_':
key = key.replace("__", ":", 1)
return key
@staticmethod
def split_attrs(attrs):
'''
Extract the attributes of the class.
This allows adding default values in the class definition.
e.g.:
'''
isattr = {}
rest = {}
for key, value in attrs.items():
if not (BaseMeta.is_internal(key)) and (not BaseMeta.is_func(value)):
isattr[key] = value
else:
rest[key] = value
return isattr, rest
@staticmethod
def get_defaults(schema):
temp = {}
for obj in [
schema,
] + schema.get('allOf', []):
for k, v in obj.get('properties', {}).items():
if 'default' in v and k not in temp:
temp[k] = v['default']
return temp
def make_property(key):
def fget(self):
return self[key]
def fdel(self):
del self[key]
def fset(self, value):
self[key] = value
return fget, fset, fdel
class CustomDict(MutableMapping, object):
'''
A dictionary whose elements can also be accessed as attributes. Since some
characters are not valid in the dot-notation, the attribute names also
converted. e.g.:
> d = CustomDict()
> d.key = d['ns:name'] = 1
> d.key == d['key']
True
> d.ns__name == d['ns:name']
'''
_defaults = {}
_map_attr_key = {'id': '@id'}
def __init__(self, *args, **kwargs):
super(CustomDict, self).__init__()
for k, v in self._defaults.items():
self[k] = copy.copy(v)
for arg in args:
self.update(arg)
for k, v in kwargs.items():
self[self._attr_to_key(k)] = v
return self
def serializable(self):
def ser_or_down(item):
if hasattr(item, 'serializable'):
return item.serializable()
elif isinstance(item, dict):
temp = dict()
for kp in item:
vp = item[kp]
temp[kp] = ser_or_down(vp)
return temp
elif isinstance(item, list) or isinstance(item, set):
return list(ser_or_down(i) for i in item)
else:
return item
return ser_or_down(self.as_dict())
def __getitem__(self, key):
key = self._key_to_attr(key)
return self.__dict__[key]
def __setitem__(self, key, value):
'''Do not insert data directly, there might be a property in that key. '''
key = self._key_to_attr(key)
return setattr(self, key, value)
def as_dict(self):
return {self._attr_to_key(k): v for k, v in self.__dict__.items()
if not self._internal_key(k)}
def __iter__(self):
return (k for k in self.__dict__ if not self._internal_key(k))
def __len__(self):
return len(self.__dict__)
def __delitem__(self, key):
del self.__dict__[key]
def update(self, other):
for k, v in other.items():
self[k] = v
def _attr_to_key(self, key):
key = key.replace("__", ":", 1)
key = self._map_attr_key.get(key, key)
return key
def _key_to_attr(self, key):
if self._internal_key(key):
return key
key = key.replace(":", "__", 1)
return key
def __getattr__(self, key):
try:
return self.__dict__[self._attr_to_key(key)]
except KeyError:
raise AttributeError
@staticmethod
def _internal_key(key):
return key[0] == '_'
def __str__(self):
return str(self.serializable())
def __repr__(self):
return str(self.serializable())
_Alias = namedtuple('Alias', 'indict')
def alias(key):
return _Alias(key)

View File

@@ -1,101 +1,197 @@
#
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' '''
Senpy Models. Senpy Models.
This implementation should mirror the JSON schema definition.
For compatibility with Py3 and for easier debugging, this new version drops
introspection and adds all arguments to the models.
''' '''
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from future.utils import with_metaclass
from past.builtins import basestring
from typing import * import time
import copy
import os
import logging
import jmespath
import json import json
import os
import jsonref
from flask import Response as FlaskResponse
from pyld import jsonld
from textwrap import dedent import logging
from pydantic import BaseModel as PydanticModel
from pydantic import computed_field, field_validator, ConfigDict, Field
from pydantic_core import from_json
logging.getLogger('rdflib').setLevel(logging.WARN) logging.getLogger('rdflib').setLevel(logging.WARN)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
from rdflib import Graph
CONTEXT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'schemas',
'context.jsonld')
with open(CONTEXT_PATH) as f:
BASE_CONTEXT = dict(json.loads(f.read()))
class BaseModel(PydanticModel): from .meta import BaseMeta, CustomDict, alias
DEFINITIONS_FILE = 'definitions.json'
CONTEXT_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'schemas', 'context.jsonld')
def get_schema_path(schema_file, absolute=False):
if absolute:
return os.path.realpath(schema_file)
else:
return os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'schemas',
schema_file)
def read_schema(schema_file, absolute=False):
schema_path = get_schema_path(schema_file, absolute)
schema_uri = 'file://{}'.format(schema_path)
with open(schema_path) as f:
return jsonref.load(f, base_uri=schema_uri)
def dump_schema(schema):
return jsonref.dumps(schema)
def load_context(context):
logging.debug('Loading context: {}'.format(context))
if not context:
return context
elif isinstance(context, list):
contexts = []
for c in context:
contexts.append(load_context(c))
return contexts
elif isinstance(context, dict):
return dict(context)
elif isinstance(context, basestring):
try:
with open(context) as f:
return dict(json.loads(f.read()))
except IOError:
return context
else:
raise AttributeError('Please, provide a valid context')
base_context = load_context(CONTEXT_PATH)
def register(rsubclass, rtype=None):
BaseMeta.register(rsubclass, rtype)
class BaseModel(with_metaclass(BaseMeta, CustomDict)):
''' '''
Entities of the base model are a special kind of dictionary that emulates Entities of the base model are a special kind of dictionary that emulates
a JSON-LD object. a JSON-LD object. The structure of the dictionary is checked via JSON-schema.
The basic attributes should be set in the class, but additional values can For convenience, the values can also be accessed as attributes
also be provided/set via attributes (à la Javascript). e.g.: (a la Javascript). e.g.:
>>> myobject.key == myobject['key'] >>> myobject.key == myobject['key']
True True
>>> myobject['ns:name'] = 'Peter' >>> myobject.ns__name == myobject['ns:name']
True True
Additionally, subclasses of this class can specify default values for their
instances. These defaults are inherited by subclasses. e.g.:
>>> class NewModel(BaseModel):
... mydefault = 5
>>> n1 = NewModel()
>>> n1['mydefault'] == 5
True
>>> n1.mydefault = 3
>>> n1['mydefault'] = 3
True
>>> n2 = NewModel()
>>> n2 == 5
True
>>> class SubModel(NewModel):
pass
>>> subn = SubModel()
>>> subn.mydefault == 5
True
Lastly, every subclass that also specifies a schema will get registered, so it
is possible to deserialize JSON and get the right type.
i.e. to recover an instance of the original class from a plain JSON.
''' '''
model_config = ConfigDict(extra='ignore', ignored_types=(object, ))
context: Optional[Union[str, Dict]] = Field(default=None, serialization_alias="@context") schema_file = DEFINITIONS_FILE
wasGeneratedBy: Optional[str] = Field(default=None, serialization_alias="prov:wasGeneratedBy") _context = base_context["@context"]
def __init__(self, *args, **kwargs):
auto_id = kwargs.pop('_auto_id', True)
super(BaseModel, self).__init__(*args, **kwargs)
if auto_id:
self.id
if '@type' not in self:
logger.warn('Created an instance of an unknown model')
@property @property
def id(self): def id(self):
if '@id' not in self: if '@id' not in self:
self['@id'] = 'prefix:{}_{}'.format(type(self).__name__, time.time()) self['@id'] = '_:{}_{}'.format(type(self).__name__, time.time())
return self['@id'] return self['@id']
@id.setter @id.setter
def id(self, value): def id(self, value):
self['@id'] = value self['@id'] = value
def serialize(self, format='json-ld', with_mime=False, def flask(self,
template=None, prefix=None, fields=None, **kwargs): in_headers=False,
js = self.jsonld(prefix=prefix, **kwargs) headers=None,
if template is not None: outformat='json-ld',
rtemplate = Environment(loader=BaseLoader).from_string(template) **kwargs):
content = rtemplate.render(**self) """
mimetype = 'text' Return the values and error to be used in flask.
elif fields is not None: So far, it returns a fixed context. We should store/generate different
# Emulate field selection by constructing a template contexts if the plugin adds more aliases.
content = json.dumps(jmespath.search(fields, js)) """
mimetype = 'text' headers = headers or {}
elif format == 'json-ld': kwargs["with_context"] = not in_headers
content, mimetype = self.serialize(format=outformat,
with_mime=True,
**kwargs)
if outformat == 'json-ld' and in_headers:
headers.update({
"Link":
('<%s>;'
'rel="http://www.w3.org/ns/json-ld#context";'
' type="application/ld+json"' % kwargs.get('context_uri'))
})
return FlaskResponse(
response=content,
status=self.get('status', 200),
headers=headers,
mimetype=mimetype)
def serialize(self, format='json-ld', with_mime=False, **kwargs):
js = self.jsonld(**kwargs)
content = json.dumps(js, indent=2, sort_keys=True) content = json.dumps(js, indent=2, sort_keys=True)
if format == 'json-ld':
mimetype = "application/json" mimetype = "application/json"
elif format in ['turtle', 'ntriples']: elif format in ['turtle', 'ntriples']:
content = json.dumps(js, indent=2, sort_keys=True)
logger.debug(js) logger.debug(js)
context = [self._context, {'prefix': prefix, '@base': prefix}] base = kwargs.get('prefix')
g = Graph().parse( g = Graph().parse(
data=content, data=content,
format='json-ld', format='json-ld',
prefix=prefix, base=base,
context=context) context=[self._context,
{'@base': base}])
logger.debug( logger.debug(
'Parsing with prefix: {}'.format(kwargs.get('prefix'))) 'Parsing with prefix: {}'.format(kwargs.get('prefix')))
content = g.serialize(format=format, content = g.serialize(format=format,
prefix=prefix) base=base).decode('utf-8')
mimetype = 'text/{}'.format(format) mimetype = 'text/{}'.format(format)
else: else:
raise Error('Unknown outformat: {}'.format(format)) raise Error('Unknown outformat: {}'.format(format))
@@ -108,25 +204,14 @@ class BaseModel(PydanticModel):
with_context=False, with_context=False,
context_uri=None, context_uri=None,
prefix=None, prefix=None,
base=None, expanded=False):
expanded=False,
**kwargs):
result = self.serializable(**kwargs) result = self.serializable()
if expanded: if expanded:
result = jsonld.expand( result = jsonld.expand(
result, result, options={'base': prefix,
options={ 'expandContext': self._context})[0]
'expandContext': [
self.context,
{
'prefix': prefix,
'endpoint': prefix
}
]
}
)[0]
if not with_context: if not with_context:
try: try:
del result['@context'] del result['@context']
@@ -150,177 +235,117 @@ class BaseModel(PydanticModel):
self['prov:wasGeneratedBy'] = another.id self['prov:wasGeneratedBy'] = another.id
def subtypes():
return BaseMeta._subtypes
def from_dict(indict, cls=None):
if not cls:
target = indict.get('@type', None)
cls = BaseModel
try:
cls = subtypes()[target]
except KeyError:
pass
outdict = dict()
for k, v in indict.items():
if k == '@context':
pass
elif isinstance(v, dict):
v = from_dict(indict[k])
elif isinstance(v, list):
v = v[:]
for ix, v2 in enumerate(v):
if isinstance(v2, dict):
v[ix] = from_dict(v2)
outdict[k] = copy.copy(v)
return cls(**outdict)
def from_string(string, **kwargs):
return from_dict(json.loads(string), **kwargs)
def from_json(injson):
indict = json.loads(injson)
return from_dict(indict)
class Entry(BaseModel): class Entry(BaseModel):
text: str = Field(serialization_alias="nif:isString") schema = 'entry'
class Activity(BaseModel): text = alias('nif:isString')
plugin: str
params: Dict[str, Any]
analyzed: List[str] = []
class Results(BaseModel): class Sentiment(BaseModel):
activities: List[Activity] = [] schema = 'sentiment'
entries: List[Entry] = []
parameters: Dict[str, str] = {} polarity = alias('marl:hasPolarity')
polarityValue = alias('marl:hasPolarityValue')
class Analysis(BaseModel): class Error(BaseModel, Exception):
plugin: 'Plugin' schema = 'error'
params: dict[str, object] = {}
def run(self, request): def __init__(self, message='Generic senpy exception', *args, **kwargs):
return self.plugin.process(request, self) Exception.__init__(self, message)
super(Error, self).__init__(*args, **kwargs)
self.message = message
def __str__(self):
if not hasattr(self, 'errors'):
return self.message
return '{}:\n\t{}'.format(self.message, self.errors)
def __hash__(self):
return Exception.__hash__(self)
class MoralityAnalysis(Analysis): # Add the remaining schemas programmatically
usedMoralValueModel: str
usedMLModel: Optional[str] = None def _class_from_schema(name, schema=None, schema_file=None, base_classes=None):
base_classes = base_classes or []
base_classes.append(BaseModel)
attrs = {}
if schema:
attrs['schema'] = schema
elif schema_file:
attrs['schema_file'] = schema_file
else:
attrs['schema'] = name
name = "".join((name[0].upper(), name[1:]))
return BaseMeta(name, base_classes, attrs)
class Annotation(BaseModel): def _add_class_from_schema(*args, **kwargs):
annotated: Optional[str] = None generatedClass = _class_from_schema(*args, **kwargs)
wasGeneratedBy: Optional[str] = None globals()[generatedClass.__name__] = generatedClass
del generatedClass
def add_provenance(self, entry, activity):
self.annotated = entry.id
self.wasGeneratedBy = activity.id
self[prov['wasGeneratedBy']] = activity.id
activity.analysed.append(entry.id)
class MoralAnnotation(Annotation): for i in [
def __init__(self, category, confidence=None, intensity=None, 'aggregatedEvaluation',
entry=None, activity=None): 'analysis',
super().__init__() 'dataset',
if confidence is not None: 'datasets',
self[amor['confidence']] = confidence 'emotion',
if intensity is not None: 'emotionConversion',
self[amor_mft['hasPolarityIntensity']] = intensity 'emotionConversionPlugin',
if entry and activity: 'emotionAnalysis',
self.add_provenance(entry, activity) 'emotionModel',
elif entry or activity: 'emotionPlugin',
raise Exception() 'emotionSet',
'evaluation',
'entity',
'help',
'metric',
'plugin',
'plugins',
'response',
'results',
'sentimentPlugin',
'suggestion',
'topic',
@classmethod ]:
def from_label(cls, label): _add_class_from_schema(i)
return cls(label.category, label.confidence, label.intensity)
class Plugin(BaseModel):
name: str = Field(default='', validate_default=True)
version: Optional[str] = 'dev'
author: Optional[str] = None
extra_params: Dict[str, Any] = {}
description: str = Field(default="", json_schema_extra=dict(hidden=True))
@field_validator("description", mode="before")
@classmethod
def dedentdoc(cls, doc: str) -> str:
doc = doc or cls.get('__doc__', None)
return dedent(doc)
@field_validator("name", mode="before")
@classmethod
def clsname(cls, name: str) -> str:
name = name or cls.__name__.capitalize()
return name
@computed_field
@property
def id(self) -> str:
return 'endpoint:plugins/{}_{}'.format(self.name, self.version)
def activity(self, params, *, cls=Analysis):
'''Generate an Analysis (prov:Activity) from this plugin and the given parameters'''
a = cls(plugin=self, params=params)
return a
class Plugins(BaseModel):
plugins: List[Plugin] = []
class Emotion:
hasEmotionIntensity: str = Field(serialization_alias="onyx:hasEmotionIntensity")
hasEmotionCategory: str = Field(serialization_alias="onyx:hasEmotionCategory")
class Sentiment:
pass
class SentimentPlugin(Plugin):
pass
class Suggestion:
pass
class Topic:
pass
class Context:
pass
class Dataset:
pass
class Datasets:
pass
class Definitions:
pass
class Dimensions:
pass
class EmotionAnalysis:
pass
class EmotionConversion:
pass
class EmotionConversionPlugin(Plugin):
pass
class Emotion:
pass
class EmotionModel:
pass
class EmotionPlugin(Plugin):
pass
class EmotionSet:
pass
class Entity:
pass
class ErrorResponse(BaseModel):
code: Optional[int] = 500
message: str
class Metric:
pass
class Evaluation:
pass
class AggregatedEvaluation:
pass
class Help:
pass
class Parameter:
pass
def from_dict(c, cls):
return cls.model_validate(d)
def from_string(s, cls):
return from_dict(from_json(s, allow_partial=True))

View File

@@ -1,7 +0,0 @@
from rdflib import Namespace
amor = Namespace('http://www.gsi.upm.es/ontologies/amor/ns#')
amor_bhv = Namespace('http://www.gsi.upm.es/ontologies/amor-bhv/ns#')
amor_mft = Namespace('http://www.gsi.upm.es/ontologies/amor-mft/ns#')
prov = Namespace('http://www.w3.org/ns/prov#')
emoml = Namespace('http://www.gsi.upm.es/ontologies/onyx/vocabularies/emotionml/ns#')

View File

@@ -1,22 +1,8 @@
#!/usr/local/bin/python from future import standard_library
# -*- coding: utf-8 -*- standard_library.install_aliases()
#
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import *
from future.utils import with_metaclass
from functools import partial from functools import partial
import os.path import os.path
@@ -24,34 +10,61 @@ import os
import re import re
import pickle import pickle
import logging import logging
import copy
import pprint import pprint
import deprecation
import inspect import inspect
import sys import sys
import subprocess import subprocess
import importlib import importlib
import yaml import yaml
import threading import threading
import multiprocessing import nltk
import pkg_resources
from nltk import download
from sklearn.base import TransformerMixin, BaseEstimator
from itertools import product
from .. import models, utils from .. import models, utils
from ..errors import Error
from .. import api from .. import api
from .. import gsitk_compat from .. import gsitk_compat
from .. import testing from .. import testing
from .. import config
from .. import ns
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class Plugin(models.Plugin): class PluginMeta(models.BaseMeta):
_classes = {}
def __new__(mcs, name, bases, attrs, **kwargs):
plugin_type = []
if hasattr(bases[0], 'plugin_type'):
plugin_type += bases[0].plugin_type
plugin_type.append(name)
alias = attrs.get('name', name)
attrs['plugin_type'] = plugin_type
attrs['name'] = alias
if 'description' not in attrs:
doc = attrs.get('__doc__', None)
if doc:
attrs['description'] = doc
else:
logger.warn(('Plugin {} does not have a description. '
'Please, add a short summary to help other developers').format(name))
cls = super(PluginMeta, mcs).__new__(mcs, name, bases, attrs)
if alias in mcs._classes:
if os.environ.get('SENPY_TESTING', ""):
raise Exception(('The type of plugin {} already exists. '
'Please, choose a different name').format(name))
else:
logger.warn('Overloading plugin class: {}'.format(alias))
mcs._classes[alias] = cls
return cls
@classmethod
def for_type(cls, ptype):
return cls._classes[ptype]
class Plugin(with_metaclass(PluginMeta, models.Plugin)):
''' '''
Base class for all plugins in senpy. Base class for all plugins in senpy.
A plugin must provide at least these attributes: A plugin must provide at least these attributes:
@@ -64,25 +77,22 @@ class Plugin(models.Plugin):
''' '''
data_folder: str = "." def __init__(self, info=None, data_folder=None, **kwargs):
test_cases: List[Dict] = []
def __init__(self, *, data_folder=None, **data):
""" """
Provides a canonical name for plugins and serves as base for other Provides a canonical name for plugins and serves as base for other
kinds of plugins. kinds of plugins.
""" """
super().__init__(**data) logger.debug("Initialising {}".format(info))
self._directory = os.path.abspath( super(Plugin, self).__init__(**kwargs)
os.path.dirname(inspect.getfile(self.__class__))) if info:
self.update(info)
self.validate()
self.id = 'endpoint:plugins/{}_{}'.format(self['name'], self['version'])
self.is_activated = False
self._lock = threading.Lock()
self._directory = os.path.abspath(os.path.dirname(inspect.getfile(self.__class__)))
if not data_folder: data_folder = data_folder or os.getcwd()
data_folder = config.data_folder
if not data_folder:
data_folder = os.getcwd()
data_folder = os.path.abspath(data_folder)
subdir = os.path.join(data_folder, self.name) subdir = os.path.join(data_folder, self.name)
self._data_paths = [ self._data_paths = [
@@ -102,66 +112,39 @@ class Plugin(models.Plugin):
def log(self): def log(self):
return self._log return self._log
def validate(self):
missing = []
for x in ['name', 'description', 'version']:
if x not in self:
missing.append(x)
if missing:
raise models.Error('Missing configuration parameters: {}'.format(missing))
def get_folder(self): def get_folder(self):
return os.path.dirname(inspect.getfile(self.__class__)) return os.path.dirname(inspect.getfile(self.__class__))
def process(self, request, activity, **kwargs): def activate(self):
""" pass
An implemented plugin should override this method.
Here, we assume that a process_entries method exists.
"""
newentries = list(
self.process_entries(request.entries, activity))
request.entries = newentries
return request
def process_entries(self, entries, activity): def deactivate(self):
for entry in entries: pass
self.log.debug('Processing entry with plugin %s: %s', self, entry)
results = self.process_entry(entry, activity)
if inspect.isgenerator(results):
for result in results:
yield result
elif results:
yield results
def process_entry(self, entry, activity):
"""
This base method is here to adapt plugins which only
implement the *process* function.
Note that this method may yield an annotated entry or a list of
entries (e.g. in a tokenizer)
"""
raise NotImplementedError(
'You need to implement process, process_entries or process_entry in your plugin'
)
def test(self, test_cases=None): def test(self, test_cases=None):
if not test_cases: if not test_cases:
if not hasattr(self, 'test_cases'): if not hasattr(self, 'test_cases'):
raise AttributeError( raise AttributeError(('Plugin {} [{}] does not have any defined '
('Plugin {} [{}] does not have any defined '
'test cases').format(self.id, 'test cases').format(self.id,
inspect.getfile(self.__class__))) inspect.getfile(self.__class__)))
test_cases = self.test_cases test_cases = self.test_cases
for case in test_cases: for case in test_cases:
try: try:
fmt = 'case: {}'.format(case.get('name', case))
if 'name' in case:
self.log.info('Test case: {}'.format(case['name']))
self.log.debug('Test case:\n\t{}'.format(
pprint.pformat(fmt)))
self.test_case(case) self.test_case(case)
self.log.debug('Test case passed:\n{}'.format(pprint.pformat(case)))
except Exception as ex: except Exception as ex:
self.log.warning('Test case failed:\n{}'.format( self.log.warn('Test case failed:\n{}'.format(pprint.pformat(case)))
pprint.pformat(case)))
raise raise
def test_case(self, case, mock=testing.MOCK_REQUESTS): def test_case(self, case, mock=testing.MOCK_REQUESTS):
if 'entry' not in case and 'input' in case:
entry = models.Entry(_auto_id=False)
entry.text = case['input']
case['entry'] = entry
entry = models.Entry(case['entry']) entry = models.Entry(case['entry'])
given_parameters = case.get('params', case.get('parameters', {})) given_parameters = case.get('params', case.get('parameters', {}))
expected = case.get('expected', None) expected = case.get('expected', None)
@@ -169,51 +152,39 @@ class Plugin(models.Plugin):
responses = case.get('responses', []) responses = case.get('responses', [])
try: try:
request = models.Response() params = api.parse_params(given_parameters, self.extra_params)
parameters = api.parse_params(given_parameters,
self.extra_params)
request.entries = [
entry,
]
activity = self.activity(parameters) method = partial(self.analyse_entries, [entry, ], params)
method = partial(self.process, request, activity)
if mock: if mock:
res = method() res = list(method())
else: else:
with testing.patch_all_requests(responses): with testing.patch_all_requests(responses):
res = method() res = list(method())
if not isinstance(expected, list): if not isinstance(expected, list):
expected = [expected] expected = [expected]
utils.check_template(res.entries, expected) utils.check_template(res, expected)
res.validate() for r in res:
except Error: r.validate()
except models.Error:
if should_fail: if should_fail:
return return
raise raise
assert not should_fail assert not should_fail
def find_file(self, fname): def find_file(self, fname):
tried = []
for p in self._data_paths: for p in self._data_paths:
alternative = os.path.abspath(os.path.join(p, fname)) alternative = os.path.join(p, fname)
if os.path.exists(alternative): if os.path.exists(alternative):
return alternative return alternative
tried.append(alternative) raise IOError('File does not exist: {}'.format(fname))
raise IOError(f'File does not exist: {fname}. Tried: {tried}')
def path(self, fpath):
if not os.path.isabs(fpath):
fpath = os.path.join(self.data_folder, fpath)
return fpath
def open(self, fpath, mode='r'): def open(self, fpath, mode='r'):
if 'w' in mode: if 'w' in mode:
# When writing, only use absolute paths or data_folder # When writing, only use absolute paths or data_folder
fpath = self.path(fpath) if not os.path.isabs(fpath):
fpath = os.path.join(self.data_folder, fpath)
else: else:
fpath = self.find_file(fpath) fpath = self.find_file(fpath)
@@ -227,41 +198,48 @@ class Plugin(models.Plugin):
SenpyPlugin = Plugin SenpyPlugin = Plugin
class Analyser(Plugin): class Analysis(Plugin):
''' '''
A subclass of Plugin that analyses text and provides an annotation. A subclass of Plugin that analyses text and provides an annotation.
''' '''
@deprecation.deprecated(details="Use process instead") def analyse(self, *args, **kwargs):
def analyse(self, request, activity): raise NotImplementedError(
return super(Analyser, self).process(request, activity) 'Your plugin should implement either analyse or analyse_entry')
@deprecation.deprecated(details="Use process_entries instead") def analyse_entry(self, entry, parameters):
def analyse_entries(self, entries, activity): """ An implemented plugin should override this method.
for i in super(Analyser, self).process_entries(entries, activity): This base method is here to adapt old style plugins which only
implement the *analyse* function.
Note that this method may yield an annotated entry or a list of
entries (e.g. in a tokenizer)
"""
text = entry['nif:isString']
params = copy.copy(parameters)
params['input'] = text
results = self.analyse(**params)
for i in results.entries:
yield i yield i
def process(self, request, activity, **kwargs): def analyse_entries(self, entries, parameters):
return self.analyse(request, activity) for entry in entries:
self.log.debug('Analysing entry with plugin {}: {}'.format(self, entry))
def process_entries(self, entries, activity): results = self.analyse_entry(entry, parameters)
for i in self.analyse_entries(entries, activity): if inspect.isgenerator(results):
yield i for result in results:
yield result
def process_entry(self, entry, activity, **kwargs):
if hasattr(self, 'analyse_entry'):
for i in self.analyse_entry(entry, activity):
yield i
else: else:
super(Analyser, self).process_entry(entry, activity, **kwargs) yield results
def test_case(self, case):
if 'entry' not in case and 'input' in case:
entry = models.Entry(_auto_id=False)
entry.nif__isString = case['input']
case['entry'] = entry
super(Analysis, self).test_case(case)
AnalysisPlugin = Analyser AnalysisPlugin = Analysis
class Transformation(AnalysisPlugin):
'''Empty'''
pass
class Conversion(Plugin): class Conversion(Plugin):
@@ -269,201 +247,106 @@ class Conversion(Plugin):
A subclass of Plugins that convert between different annotation models. A subclass of Plugins that convert between different annotation models.
e.g. a conversion of emotion models, or normalization of sentiment values. e.g. a conversion of emotion models, or normalization of sentiment values.
''' '''
pass
def process(self, response, parameters, plugins=None, **kwargs):
plugins = plugins or []
newentries = []
for entry in response.entries:
newentries.append(
self.convert_entry(entry, parameters, plugins))
response.entries = newentries
return response
def convert_entry(self, entry, parameters, conversions_applied):
raise NotImplementedError(
'You should implement a way to convert each entry, or a custom process method'
)
ConversionPlugin = Conversion ConversionPlugin = Conversion
class Evaluable(Plugin): class SentimentPlugin(Analysis, models.SentimentPlugin):
'''
Common class for plugins that can be evaluated with GSITK.
They should implement the methods below.
'''
def as_pipe(self):
raise Exception('Implement the as_pipe function')
def evaluate_func(self, X, activity=None):
raise Exception('Implement the evaluate_func function')
def evaluate(self, *args, **kwargs):
return evaluate([self], *args, **kwargs)
class SentimentPlugin(Analyser, Evaluable):
''' '''
Sentiment plugins provide sentiment annotation (using Marl) Sentiment plugins provide sentiment annotation (using Marl)
''' '''
minPolarityValue: float = 0 minPolarityValue = 0
maxPolarityValue: float = 1 maxPolarityValue = 1
def test_case(self, case): def test_case(self, case):
if 'polarity' in case: if 'polarity' in case:
expected = case.get('expected', {}) expected = case.get('expected', {})
s = models.Sentiment(_auto_id=False) s = models.Sentiment(_auto_id=False)
s.marl__hasPolarity = case['polarity'] s.marl__hasPolarity = case['polarity']
if 'marl:hasOpinion' not in expected: if 'sentiments' not in expected:
expected['marl:hasOpinion'] = [] expected['sentiments'] = []
expected['marl:hasOpinion'].append(s) expected['sentiments'].append(s)
case['expected'] = expected case['expected'] = expected
super(SentimentPlugin, self).test_case(case) super(SentimentPlugin, self).test_case(case)
def normalize(self, value, minValue, maxValue):
nv = minValue + (value - self.minPolarityValue) * (
self.maxPolarityValue - self.minPolarityValue) / (maxValue - minValue)
return nv
def as_pipe(self): class EmotionPlugin(Analysis, models.EmotionPlugin):
pipe = gsitk_compat.Pipeline([('senpy-plugin', ScikitWrapper(self))])
pipe.name = self.id
return pipe
def evaluate_func(self, X, activity=None):
if activity is None:
parameters = api.parse_params({},
self.extra_params)
activity = self.activity(parameters)
entries = []
for feat in X:
if isinstance(feat, list):
feat = ' '.join(feat)
entries.append(models.Entry(text=feat))
labels = []
for e in self.process_entries(entries, activity):
sent = e.sentiments[0].polarity
label = -1
if sent == 'marl:Positive':
label = 1
elif sent == 'marl:Negative':
label = -1
labels.append(label)
return labels
class EmotionPlugin(Analyser, models.EmotionPlugin):
''' '''
Emotion plugins provide emotion annotation (using Onyx) Emotion plugins provide emotion annotation (using Onyx)
''' '''
minEmotionValue: float = 0 minEmotionValue = 0
maxEmotionValue: float = 1 maxEmotionValue = 1
class AnnotationPlugin(AnalysisPlugin):
def process_entry(self, entry, activity, **kwargs):
for annotation in self.annotate(entry):
annotation.add_provenance(entry, activity)
yield entry
def annotate(self, entry, **kwargs):
raise NotImplemented("this should be implemented in subclasses")
class MoralityPlugin(AnnotationPlugin):
moralValueModel: str = ns.amor_mft['MoralFoundationTHeory']
def activity(self, parameters=None):
return models.MoralityAnalysis(plugin=self,
usedMLModel=None,
usedMoralValueModel=self.model)
class EmotionConversion(Conversion): class EmotionConversion(Conversion):
''' '''
A subclass of Conversion that converts emotion annotations using different models A subclass of Conversion that converts emotion annotations using different models
''' '''
pass
def can_convert(self, fromModel, toModel):
'''
Whether this plugin can convert from fromModel to toModel.
If fromModel is None, it is interpreted as "any Model"
'''
for pair in self.onyx__doesConversion:
if (pair['onyx:conversionTo'] == toModel) and \
((fromModel is None) or (pair['onyx:conversionFrom'] == fromModel)):
return True
return False
EmotionConversionPlugin = EmotionConversion EmotionConversionPlugin = EmotionConversion
class PostProcessing(Plugin): class Box(AnalysisPlugin):
'''
A plugin that converts the output of other plugins (post-processing).
'''
def check(self, request, plugins):
'''Should this plugin be run for this request?'''
return False
class Box(Analyser):
''' '''
Black box plugins delegate analysis to a function. Black box plugins delegate analysis to a function.
The flow is like this: The flow is like so:
.. code-block:: .. code-block::
entries --> to_features() --> predict_many() --> to_entry() --> entries' entry --> input() --> predict_one() --> output() --> entry'
In other words: their ``to_features`` method converts a query (entry and a set of parameters) In other words: their ``input`` method convers a query (entry and a set of parameters) into
into the input to the `predict_one` method, which only uses an array of features. the input to the box method. The ``output`` method convers the results given by the box into
The ``to_entry`` method converts the results given by the box into an entry that senpy can an entry that senpy can handle.
handle.
''' '''
def to_features(self, entry, activity=None): def input(self, entry, params=None):
'''Transforms a query (entry+param) into an input for the black box''' '''Transforms a query (entry+param) into an input for the black box'''
return entry return entry
def to_entry(self, features, entry=None, activity=None): def output(self, output, entry=None, params=None):
'''Transforms the results of the black box into an entry''' '''Transforms the results of the black box into an entry'''
return entry return output
def predict_one(self, features, activity=None): def predict_one(self, input):
raise NotImplementedError( raise NotImplementedError('You should define the behavior of this plugin')
'You should define the behavior of this plugin')
def predict_many(self, features, activity=None): def analyse_entries(self, entries, params):
results = []
for feat in features:
results.append(self.predict_one(features=feat, activity=activity))
return results
def process_entry(self, entry, activity):
for i in self.process_entries([entry], activity):
yield i
def process_entries(self, entries, activity):
features = []
for entry in entries: for entry in entries:
features.append(self.to_features(entry=entry, activity=activity)) input = self.input(entry=entry, params=params)
results = self.predict_many(features=features, activity=activity) results = self.predict_one(input=input)
yield self.output(output=results, entry=entry, params=params)
for (result, entry) in zip(results, entries): def fit(self, X=None, y=None):
yield self.to_entry(features=result, entry=entry, activity=activity) return self
def transform(self, X):
return [self.predict_one(x) for x in X]
def predict(self, X):
return self.transform(X)
def fit_transform(self, X, y):
self.fit(X, y)
return self.transform(X)
def as_pipe(self):
pipe = gsitk_compat.Pipeline([('plugin', self)])
pipe.name = self.name
return pipe
class TextBox(Box): class TextBox(Box):
'''A black box plugin that takes only text as input''' '''A black box plugin that takes only text as input'''
def to_features(self, entry, activity): def input(self, entry, params):
return [entry.text] entry = super(TextBox, self).input(entry, params)
return entry['nif:isString']
class SentimentBox(TextBox, SentimentPlugin): class SentimentBox(TextBox, SentimentPlugin):
@@ -471,35 +354,17 @@ class SentimentBox(TextBox, SentimentPlugin):
A box plugin where the output is only a polarity label or a tuple (polarity, polarityValue) A box plugin where the output is only a polarity label or a tuple (polarity, polarityValue)
''' '''
classes: List[str] = ['marl:Positive', 'marl:Neutral', 'marl:Negative'] def output(self, output, entry, **kwargs):
binary: bool = True
def to_entry(self, features, entry, activity, **kwargs):
if len(features) != len(self.classes):
raise Error('The number of features ({}) does not match the classes '
'(plugin.classes ({})'.format(len(features), len(self.classes)))
minValue = activity.param('marl:minPolarityValue', 0)
maxValue = activity.param('marl:minPolarityValue', 1)
activity['marl:minPolarityValue'] = minValue
activity['marl:maxPolarityValue'] = maxValue
for k, v in zip(self.classes, features):
s = models.Sentiment() s = models.Sentiment()
if self.binary: try:
if not v: # Carry on if the value is 0 label, value = output
continue except ValueError:
s['marl:hasPolarity'] = k label, value = output, None
else: s.prov(self)
if v is not None: s.polarity = label
s['marl:hasPolarity'] = k if value is not None:
nv = self.normalize(v, minValue, maxValue) s.polarityValue = value
s['marl:polarityValue'] = nv
s.prov(activity)
entry.sentiments.append(s) entry.sentiments.append(s)
return entry return entry
@@ -508,27 +373,19 @@ class EmotionBox(TextBox, EmotionPlugin):
A box plugin where the output is only an a tuple of emotion labels A box plugin where the output is only an a tuple of emotion labels
''' '''
EMOTIONS: List[str] = [] def output(self, output, entry, **kwargs):
with_intensity: bool = True if not isinstance(output, list):
output = [output]
def to_entry(self, features, entry, activity, **kwargs):
s = models.EmotionSet() s = models.EmotionSet()
if len(features) != len(self.EMOTIONS):
raise Exception(('The number of classes in the plugin and the number of features '
'do not match'))
for label, intensity in zip(self.EMOTIONS, features):
e = models.Emotion(onyx__hasEmotionCategory=label)
if self.with_intensity:
e.onyx__hasEmotionIntensity = intensity
s.onyx__hasEmotion.append(e)
s.prov(activity)
entry.emotions.append(s) entry.emotions.append(s)
for label in output:
e = models.Emotion(onyx__hasEmotionCategory=label)
s.append(e)
return entry return entry
class MappingMixin(object): class MappingMixin(object):
@property @property
def mappings(self): def mappings(self):
return self._mappings return self._mappings
@@ -537,15 +394,12 @@ class MappingMixin(object):
def mappings(self, value): def mappings(self, value):
self._mappings = value self._mappings = value
def to_entry(self, features, entry, activity): def output(self, output, entry, params):
features = list(features) output = self.mappings.get(output,
for i, feat in enumerate(features): self.mappings.get('default', output))
features[i] = self.mappings.get(feat, return super(MappingMixin, self).output(output=output,
self.mappings.get('default',
feat))
return super(MappingMixin, self).to_entry(features=features,
entry=entry, entry=entry,
activity=activity) params=params)
class ShelfMixin(object): class ShelfMixin(object):
@@ -558,8 +412,7 @@ class ShelfMixin(object):
with self.open(self.shelf_file, 'rb') as p: with self.open(self.shelf_file, 'rb') as p:
self._sh = pickle.load(p) self._sh = pickle.load(p)
except (IndexError, EOFError, pickle.UnpicklingError): except (IndexError, EOFError, pickle.UnpicklingError):
self.log.warning('Corrupted shelf file: {}'.format( self.log.warning('Corrupted shelf file: {}'.format(self.shelf_file))
self.shelf_file))
if not self.get('force_shelf', False): if not self.get('force_shelf', False):
raise raise
return self._sh return self._sh
@@ -585,20 +438,14 @@ class ShelfMixin(object):
def shelf_file(self, value): def shelf_file(self, value):
self._shelf_file = value self._shelf_file = value
def save(self, ignore_errors=False): def save(self):
try:
self.log.debug('Saving pickle') self.log.debug('Saving pickle')
if hasattr(self, '_sh') and self._sh is not None: if hasattr(self, '_sh') and self._sh is not None:
with self.open(self.shelf_file, 'wb') as f: with self.open(self.shelf_file, 'wb') as f:
pickle.dump(self._sh, f) pickle.dump(self._sh, f)
except Exception as ex:
self.log.warning("Could not save shelf state. Check folder permissions for: "
f" {self.shelf_file}. Error: { ex }")
if not ignore_errors:
raise
def pfilter(plugins, plugin_type=Analyser, **kwargs): def pfilter(plugins, plugin_type=Analysis, **kwargs):
""" Filter plugins by different criteria """ """ Filter plugins by different criteria """
if isinstance(plugins, models.Plugins): if isinstance(plugins, models.Plugins):
plugins = plugins.plugins plugins = plugins.plugins
@@ -613,20 +460,19 @@ def pfilter(plugins, plugin_type=Analyser, **kwargs):
plugin_type = plugin_type[0].upper() + plugin_type[1:] plugin_type = plugin_type[0].upper() + plugin_type[1:]
pclass = globals()[plugin_type] pclass = globals()[plugin_type]
logger.debug('Class: {}'.format(pclass)) logger.debug('Class: {}'.format(pclass))
candidates = filter(lambda x: isinstance(x, pclass), plugins) candidates = filter(lambda x: isinstance(x, pclass),
plugins)
except KeyError: except KeyError:
raise Error('{} is not a valid type'.format(plugin_type)) raise models.Error('{} is not a valid type'.format(plugin_type))
else: else:
candidates = plugins candidates = plugins
if 'name' in kwargs:
kwargs['name'] = kwargs['name'].lower()
logger.debug(candidates) logger.debug(candidates)
def matches(plug): def matches(plug):
res = all(getattr(plug, k, None) == v for (k, v) in kwargs.items()) res = all(getattr(plug, k, None) == v for (k, v) in kwargs.items())
logger.debug("matching {} with {}: {}".format(plug.name, kwargs, res)) logger.debug(
"matching {} with {}: {}".format(plug.name, kwargs, res))
return res return res
if kwargs: if kwargs:
@@ -645,58 +491,33 @@ def load_module(name, root=None):
def _log_subprocess_output(process): def _log_subprocess_output(process):
for line in iter(process.stdout.readline, b''): for line in iter(process.stdout.readline, b''):
logger.info('%s', line.decode()) logger.info('%r', line)
for line in iter(process.stderr.readline, b''): for line in iter(process.stderr.readline, b''):
logger.error('%s', line.decode()) logger.error('%r', line)
def missing_requirements(reqs):
queue = []
pool = multiprocessing.Pool(4)
for req in reqs:
res = pool.apply_async(pkg_resources.get_distribution, (req,))
queue.append((req, res))
missing = []
installed = []
for req, job in queue:
try:
installed.append(job.get(1))
except Exception:
missing.append(req)
return installed, missing
def list_dependencies(*plugins):
'''List all dependencies (python and nltk) for the given list of plugins'''
nltk_resources = set()
missing = []
installed = []
for info in plugins:
reqs = info.get('requirements', [])
if reqs:
inst, miss= missing_requirements(reqs)
installed += inst
missing += miss
nltk_resources |= set(info.get('nltk_resources', []))
return installed, missing, nltk_resources
def install_deps(*plugins): def install_deps(*plugins):
_, requirements, nltk_resources = list_dependencies(*plugins)
installed = False installed = False
nltk_resources = set()
for info in plugins:
requirements = info.get('requirements', [])
if requirements: if requirements:
logger.info('Installing requirements: ' + str(requirements))
pip_args = [sys.executable, '-m', 'pip', 'install'] pip_args = [sys.executable, '-m', 'pip', 'install']
for req in requirements: for req in requirements:
pip_args.append(req) pip_args.append(req)
process = subprocess.Popen( logger.info('Installing requirements: ' + str(requirements))
pip_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) process = subprocess.Popen(pip_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_log_subprocess_output(process) _log_subprocess_output(process)
exitcode = process.wait() exitcode = process.wait()
installed = True installed = True
if exitcode != 0: if exitcode != 0:
raise Error( raise models.Error("Dependencies not properly installed: {}".format(pip_args))
"Dependencies not properly installed: {}".format(pip_args)) nltk_resources |= set(info.get('nltk_resources', []))
installed_nltk = download(list(nltk_resources))
return installed or installed_nltk installed |= nltk.download(list(nltk_resources))
return installed
is_plugin_file = re.compile(r'.*\.senpy$|senpy_[a-zA-Z0-9_]+\.py$|' is_plugin_file = re.compile(r'.*\.senpy$|senpy_[a-zA-Z0-9_]+\.py$|'
@@ -713,7 +534,7 @@ def find_plugins(folders):
yield fpath yield fpath
def from_path(fpath, **kwargs): def from_path(fpath, install_on_fail=False, **kwargs):
logger.debug("Loading plugin from {}".format(fpath)) logger.debug("Loading plugin from {}".format(fpath))
if fpath.endswith('.py'): if fpath.endswith('.py'):
# We asume root is the dir of the file, and module is the name of the file # We asume root is the dir of the file, and module is the name of the file
@@ -723,18 +544,18 @@ def from_path(fpath, **kwargs):
yield instance yield instance
else: else:
info = parse_plugin_info(fpath) info = parse_plugin_info(fpath)
yield from_info(info, **kwargs) yield from_info(info, install_on_fail=install_on_fail, **kwargs)
def from_folder(folders, loader=from_path, **kwargs): def from_folder(folders, loader=from_path, **kwargs):
plugins = [] plugins = []
for fpath in find_plugins(folders): for fpath in find_plugins(folders):
for plugin in loader(fpath, **kwargs): for plugin in loader(fpath, **kwargs):
if plugin:
plugins.append(plugin) plugins.append(plugin)
return plugins return plugins
def from_info(info, root=None, strict=False, **kwargs): def from_info(info, root=None, install_on_fail=True, **kwargs):
if any(x not in info for x in ('module',)): if any(x not in info for x in ('module',)):
raise ValueError('Plugin info is not valid: {}'.format(info)) raise ValueError('Plugin info is not valid: {}'.format(info))
module = info["module"] module = info["module"]
@@ -745,17 +566,15 @@ def from_info(info, root=None, strict=False, **kwargs):
fun = partial(one_from_module, module, root=root, info=info, **kwargs) fun = partial(one_from_module, module, root=root, info=info, **kwargs)
try: try:
return fun() return fun()
except (ImportError, LookupError) as ex: except (ImportError, LookupError):
if strict or str(info.get("optional", "false")).lower() not in ["true", "t"]: install_deps(info)
raise return fun()
print(f"Could not import plugin: { info }: {ex}", file=sys.stderr)
return FailedPlugin(info, fun)
def parse_plugin_info(fpath): def parse_plugin_info(fpath):
logger.debug("Parsing plugin info: {}".format(fpath)) logger.debug("Parsing plugin info: {}".format(fpath))
with open(fpath, 'r') as f: with open(fpath, 'r') as f:
info = yaml.load(f, Loader=yaml.FullLoader) info = yaml.load(f)
info['_path'] = fpath info['_path'] = fpath
return info return info
@@ -773,9 +592,8 @@ def from_module(module, **kwargs):
def one_from_module(module, root, info, **kwargs): def one_from_module(module, root, info, **kwargs):
if '@type' in info: if '@type' in info:
cls = PluginMeta.from_type(info['@type']) cls = PluginMeta.from_type(info['@type'])
return cls(**info, **kwargs) return cls(info=info, **kwargs)
instance = next( instance = next(from_module(module=module, root=root, info=info, **kwargs), None)
from_module(module=module, root=root, info=info, **kwargs), None)
if not instance: if not instance:
raise Exception("No valid plugin for: {}".format(module)) raise Exception("No valid plugin for: {}".format(module))
return instance return instance
@@ -799,49 +617,24 @@ def _instances_in_module(module):
def _from_module_name(module, root, info=None, **kwargs): def _from_module_name(module, root, info=None, **kwargs):
module = load_module(module, root) module = load_module(module, root)
for plugin in _from_loaded_module( for plugin in _from_loaded_module(module=module, root=root, info=info, **kwargs):
module=module, root=root, info=info, **kwargs):
yield plugin yield plugin
def _from_loaded_module(module, info=None, **kwargs): def _from_loaded_module(module, info=None, **kwargs):
for cls in _classes_in_module(module): for cls in _classes_in_module(module):
yield cls(**(info or {}), **kwargs) yield cls(info=info, **kwargs)
for instance in _instances_in_module(module): for instance in _instances_in_module(module):
yield instance yield instance
cached_evs = {}
def evaluate(plugins, datasets, **kwargs): def evaluate(plugins, datasets, **kwargs):
for plug in plugins: ev = gsitk_compat.Eval(tuples=None,
if not hasattr(plug, 'as_pipe'): datasets=datasets,
raise Error('Plugin {} cannot be evaluated'.format(plug.name)) pipelines=[plugin.as_pipe() for plugin in plugins])
if not isinstance(datasets, dict):
datasets = gsitk_compat.prepare(datasets, download=True)
tuples = list(product(plugins, datasets))
missing = []
for (p, d) in tuples:
if (p.id, d) not in cached_evs:
pipe = p.as_pipe()
missing.append(gsitk_compat.EvalPipeline(pipe, d))
if missing:
ev = gsitk_compat.Eval(tuples=missing, datasets=datasets)
ev.evaluate() ev.evaluate()
results = ev.results results = ev.results
new_ev = evaluations_to_JSONLD(results, **kwargs) evaluations = evaluations_to_JSONLD(results, **kwargs)
for ev in new_ev:
dataset = ev.evaluatesOn
model = ev.evaluates
cached_evs[(model, dataset)] = ev
evaluations = []
logger.debug('%s. Cached evs: %s', tuples, cached_evs)
for (p, d) in tuples:
logger.debug('Adding %s, %s', d, p)
evaluations.append(cached_evs[(p.id, d)])
return evaluations return evaluations
@@ -854,12 +647,12 @@ def evaluations_to_JSONLD(results, flatten=False):
metric_names = ['accuracy', 'precision_macro', 'recall_macro', metric_names = ['accuracy', 'precision_macro', 'recall_macro',
'f1_macro', 'f1_weighted', 'f1_micro', 'f1_macro'] 'f1_macro', 'f1_weighted', 'f1_micro', 'f1_macro']
for index, row in results.fillna('Not Available').iterrows(): for index, row in results.iterrows():
evaluation = models.Evaluation() evaluation = models.Evaluation()
if row.get('CV', True): if row.get('CV', True):
evaluation['@type'] = ['StaticCV', 'Evaluation'] evaluation['@type'] = ['StaticCV', 'Evaluation']
evaluation.evaluatesOn = row['Dataset'] evaluation.evaluatesOn = row['Dataset']
evaluation.evaluates = row['Model'].rstrip('__' + row['Dataset']) evaluation.evaluates = row['Model']
i = 0 i = 0
if flatten: if flatten:
metric = models.Metric() metric = models.Metric()
@@ -870,28 +663,10 @@ def evaluations_to_JSONLD(results, flatten=False):
# We should probably discontinue this representation # We should probably discontinue this representation
for name in metric_names: for name in metric_names:
metric = models.Metric() metric = models.Metric()
metric['@id'] = 'Metric' + str(i)
metric['@type'] = name.capitalize() metric['@type'] = name.capitalize()
metric.value = row[name] metric.value = row[name]
evaluation.metrics.append(metric) evaluation.metrics.append(metric)
i += 1 i += 1
evaluations.append(evaluation) evaluations.append(evaluation)
return evaluations return evaluations
class ScikitWrapper(BaseEstimator, TransformerMixin):
def __init__(self, plugin=None, **data):
super().__init__(**data)
self.plugin = plugin
def fit(self, X=None, y=None):
return self
def transform(self, X):
return self.plugin.evaluate_func(X, None)
def predict(self, X):
return self.transform(X)
def fit_transform(self, X, y):
self.fit(X, y)
return self.transform(X)

View File

@@ -1,24 +1,5 @@
#
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import *
from senpy.plugins import EmotionConversionPlugin from senpy.plugins import EmotionConversionPlugin
from senpy.models import EmotionSet, Emotion, EmotionConversion from senpy.models import EmotionSet, Emotion, Error
from senpy.errors import Error
import logging import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -30,16 +11,16 @@ class CentroidConversion(EmotionConversionPlugin):
categorical one, and vice versa. The centroids used in the conversion categorical one, and vice versa. The centroids used in the conversion
are configurable and appear in the semantic description of the plugin. are configurable and appear in the semantic description of the plugin.
''' '''
def __init__(self, info, *args, **kwargs):
if 'centroids' not in info:
raise Error('Centroid conversion plugins should provide '
'the centroids in their senpy file')
if 'onyx:doesConversion' not in info:
if 'centroids_direction' not in info:
raise Error('Please, provide centroids direction')
centroids: Dict cf, ct = info['centroids_direction']
centroids_direction: tuple[str, str] info['onyx:doesConversion'] = [{
aliases: Dict[str, str] ={}
def __super__(self, centroids, centroids_direction, doesConversion=None, aliases={}, *args, **kwargs):
super().__init__(*args, **kwargs)
cf, ct = centroids_direction
if doesConversion is None:
doesConversion = [{
'onyx:conversionFrom': cf, 'onyx:conversionFrom': cf,
'onyx:conversionTo': ct 'onyx:conversionTo': ct
}, { }, {
@@ -47,16 +28,17 @@ class CentroidConversion(EmotionConversionPlugin):
'onyx:conversionTo': cf 'onyx:conversionTo': cf
}] }]
if 'aliases' in info:
if aliases: aliases = info['aliases']
ncentroids = {} ncentroids = {}
for k1, v1 in centroids.items(): for k1, v1 in info['centroids'].items():
nv1 = {} nv1 = {}
for k2, v2 in v1.items(): for k2, v2 in v1.items():
nv1[aliases.get(k2, k2)] = v2 nv1[aliases.get(k2, k2)] = v2
ncentroids[aliases.get(k1, k1)] = nv1 ncentroids[aliases.get(k1, k1)] = nv1
centroids = centroids info['centroids'] = ncentroids
self.centroids = centroids
super(CentroidConversion, self).__init__(info, *args, **kwargs)
self.dimensions = set() self.dimensions = set()
for c in self.centroids.values(): for c in self.centroids.values():
@@ -103,13 +85,7 @@ class CentroidConversion(EmotionConversionPlugin):
def distance(centroid): def distance(centroid):
return sum(distance_k(centroid, original, k) for k in dimensions) return sum(distance_k(centroid, original, k) for k in dimensions)
distances = {k: distance(centroids[k]) for k in centroids} emotion = min(centroids, key=lambda x: distance(centroids[x]))
logger.debug('Converting %s', original)
logger.debug('Centroids: %s', centroids)
logger.debug('Distances: %s', distances)
emotion = min(distances, key=lambda x: distances[x])
result = Emotion(onyx__hasEmotionCategory=emotion) result = Emotion(onyx__hasEmotionCategory=emotion)
result.onyx__algorithmConfidence = distance(centroids[emotion]) result.onyx__algorithmConfidence = distance(centroids[emotion])
@@ -127,9 +103,7 @@ class CentroidConversion(EmotionConversionPlugin):
for i in emotionSet.onyx__hasEmotion: for i in emotionSet.onyx__hasEmotion:
e.onyx__hasEmotion.append(self._backwards_conversion(i)) e.onyx__hasEmotion.append(self._backwards_conversion(i))
else: else:
raise Error('EMOTION MODEL NOT KNOWN. ' raise Error('EMOTION MODEL NOT KNOWN')
'Cannot convert from {} to {}'.format(fromModel,
toModel))
yield e yield e
def test(self, info=None): def test(self, info=None):
@@ -154,7 +128,7 @@ class CentroidConversion(EmotionConversionPlugin):
"centroids_direction": ["emoml:big6", "emoml:fsre-dimensions"] "centroids_direction": ["emoml:big6", "emoml:fsre-dimensions"]
} }
c = CentroidConversion.parse_obj(info) c = CentroidConversion(info)
es1 = EmotionSet() es1 = EmotionSet()
e1 = Emotion() e1 = Emotion()

Some files were not shown because too many files have changed in this diff Show More