mirror of
https://github.com/gsi-upm/senpy
synced 2025-10-19 09:48:26 +00:00
Compare commits
41 Commits
44-add-bas
...
7ae493b3f3
Author | SHA1 | Date | |
---|---|---|---|
|
7ae493b3f3 | ||
|
435d107677 | ||
|
c4321dc500 | ||
|
3bd3c87af4 | ||
|
45421f4613 | ||
|
7aa69e3d02 | ||
|
a20252e4bd | ||
|
9758a2977f | ||
|
8a516d927e | ||
|
4ba30304a4 | ||
|
41aa142ce0 | ||
|
b48730137d | ||
|
f1ec057b16 | ||
|
f6ca82cac8 | ||
|
318acd5a71 | ||
|
c8f6f5613d | ||
|
748d1a00bd | ||
|
a82e4ed440 | ||
|
c939b095de | ||
|
6dd4a44924 | ||
|
4291c5eabf | ||
|
7c7a815d1a | ||
|
a3eb8f196c | ||
|
00ffbb3804 | ||
|
13cf0c71c5 | ||
|
e5662d482e | ||
|
61181db199 | ||
|
a1663a3f31 | ||
|
83b23dbdf4 | ||
|
4675d9acf1 | ||
|
6832a2816d | ||
|
7a8abf1823 | ||
|
a21ce0d90e | ||
|
a964e586d7 | ||
|
bce42b5bb4 | ||
|
1313853788 | ||
|
697e779767 | ||
|
48f5ffafa1 | ||
|
73f7cbbe8a | ||
|
07a41236f8 | ||
|
55db97cf62 |
@@ -18,40 +18,33 @@ before_script:
|
|||||||
stage: test
|
stage: test
|
||||||
script:
|
script:
|
||||||
- make -e test-$PYTHON_VERSION
|
- make -e test-$PYTHON_VERSION
|
||||||
|
except:
|
||||||
|
- tags # Avoid unnecessary double testing
|
||||||
|
|
||||||
test-3.5:
|
test-3.6:
|
||||||
<<: *test_definition
|
<<: *test_definition
|
||||||
variables:
|
variables:
|
||||||
PYTHON_VERSION: "3.5"
|
PYTHON_VERSION: "3.6"
|
||||||
|
|
||||||
test-2.7:
|
test-3.7:
|
||||||
<<: *test_definition
|
<<: *test_definition
|
||||||
|
allow_failure: true
|
||||||
variables:
|
variables:
|
||||||
PYTHON_VERSION: "2.7"
|
PYTHON_VERSION: "3.7"
|
||||||
|
|
||||||
.image: &image_definition
|
push:
|
||||||
stage: push
|
stage: push
|
||||||
script:
|
script:
|
||||||
- make -e push-$PYTHON_VERSION
|
- make -e push
|
||||||
only:
|
only:
|
||||||
- tags
|
- tags
|
||||||
- triggers
|
- triggers
|
||||||
- fix-makefiles
|
- fix-makefiles
|
||||||
|
|
||||||
push-3.5:
|
|
||||||
<<: *image_definition
|
|
||||||
variables:
|
|
||||||
PYTHON_VERSION: "3.5"
|
|
||||||
|
|
||||||
push-2.7:
|
|
||||||
<<: *image_definition
|
|
||||||
variables:
|
|
||||||
PYTHON_VERSION: "2.7"
|
|
||||||
|
|
||||||
push-latest:
|
push-latest:
|
||||||
<<: *image_definition
|
stage: push
|
||||||
variables:
|
script:
|
||||||
PYTHON_VERSION: latest
|
- make -e push-latest
|
||||||
only:
|
only:
|
||||||
- master
|
- master
|
||||||
- triggers
|
- triggers
|
||||||
@@ -108,4 +101,3 @@ cleanup_py:
|
|||||||
when: always # this is important; run even if preceding stages failed.
|
when: always # this is important; run even if preceding stages failed.
|
||||||
script:
|
script:
|
||||||
- rm -vf ~/.pypirc # we don't want to leave these around, but GitLab may clean up anyway.
|
- rm -vf ~/.pypirc # we don't want to leave these around, but GitLab may clean up anyway.
|
||||||
- docker logout
|
|
||||||
|
@@ -1,5 +1,14 @@
|
|||||||
IMAGENAME?=$(NAME)
|
ifndef IMAGENAME
|
||||||
|
ifdef CI_REGISTRY_IMAGE
|
||||||
|
IMAGENAME=$(CI_REGISTRY_IMAGE)
|
||||||
|
else
|
||||||
|
IMAGENAME=$(NAME)
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
IMAGEWTAG?=$(IMAGENAME):$(VERSION)
|
IMAGEWTAG?=$(IMAGENAME):$(VERSION)
|
||||||
|
DOCKER_FLAGS?=$(-ti)
|
||||||
|
DOCKER_CMD?=
|
||||||
|
|
||||||
docker-login: ## Log in to the registry. It will only be used in the server, or when running a CI task locally (if CI_BUILD_TOKEN is set).
|
docker-login: ## Log in to the registry. It will only be used in the server, or when running a CI task locally (if CI_BUILD_TOKEN is set).
|
||||||
ifeq ($(CI_BUILD_TOKEN),)
|
ifeq ($(CI_BUILD_TOKEN),)
|
||||||
@@ -19,6 +28,19 @@ else
|
|||||||
@docker logout
|
@docker logout
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
docker-run: ## Build a generic docker image
|
||||||
|
docker run $(DOCKER_FLAGS) $(IMAGEWTAG) $(DOCKER_CMD)
|
||||||
|
|
||||||
|
docker-build: ## Build a generic docker image
|
||||||
|
docker build . -t $(IMAGEWTAG)
|
||||||
|
|
||||||
|
docker-push: docker-login ## Push a generic docker image
|
||||||
|
docker push $(IMAGEWTAG)
|
||||||
|
|
||||||
|
docker-latest-push: docker-login ## Push the latest image
|
||||||
|
docker tag $(IMAGEWTAG) $(IMAGENAME)
|
||||||
|
docker push $(IMAGENAME)
|
||||||
|
|
||||||
login:: docker-login
|
login:: docker-login
|
||||||
|
|
||||||
clean:: docker-clean
|
clean:: docker-clean
|
||||||
|
@@ -14,7 +14,7 @@ push-github: ## Push the code to github. You need to set up GITHUB_DEPLOY_KEY
|
|||||||
ifeq ($(GITHUB_DEPLOY_KEY),)
|
ifeq ($(GITHUB_DEPLOY_KEY),)
|
||||||
else
|
else
|
||||||
$(eval KEY_FILE := "$(shell mktemp)")
|
$(eval KEY_FILE := "$(shell mktemp)")
|
||||||
@echo "$(GITHUB_DEPLOY_KEY)" > $(KEY_FILE)
|
@printf '%b' '$(GITHUB_DEPLOY_KEY)' > $(KEY_FILE)
|
||||||
@git remote rm github-deploy || true
|
@git remote rm github-deploy || true
|
||||||
git remote add github-deploy $(GITHUB_REPO)
|
git remote add github-deploy $(GITHUB_REPO)
|
||||||
-@GIT_SSH_COMMAND="ssh -i $(KEY_FILE)" git fetch github-deploy $(CI_COMMIT_REF_NAME)
|
-@GIT_SSH_COMMAND="ssh -i $(KEY_FILE)" git fetch github-deploy $(CI_COMMIT_REF_NAME)
|
||||||
@@ -22,7 +22,4 @@ else
|
|||||||
rm $(KEY_FILE)
|
rm $(KEY_FILE)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
push:: git-push
|
.PHONY:: commit tag git-push git-pull push-github
|
||||||
pull:: git-pull
|
|
||||||
|
|
||||||
.PHONY:: commit tag push git-push git-pull push-github
|
|
||||||
|
@@ -13,7 +13,7 @@
|
|||||||
KUBE_CA_TEMP=false
|
KUBE_CA_TEMP=false
|
||||||
ifndef KUBE_CA_PEM_FILE
|
ifndef KUBE_CA_PEM_FILE
|
||||||
KUBE_CA_PEM_FILE:=$$PWD/.ca.crt
|
KUBE_CA_PEM_FILE:=$$PWD/.ca.crt
|
||||||
CREATED:=$(shell echo -e "$(KUBE_CA_BUNDLE)" > $(KUBE_CA_PEM_FILE))
|
CREATED:=$(shell printf '%b\n' '$(KUBE_CA_BUNDLE)' > $(KUBE_CA_PEM_FILE))
|
||||||
endif
|
endif
|
||||||
KUBE_TOKEN?=""
|
KUBE_TOKEN?=""
|
||||||
KUBE_NAMESPACE?=$(NAME)
|
KUBE_NAMESPACE?=$(NAME)
|
||||||
|
@@ -1,17 +1,15 @@
|
|||||||
makefiles-remote:
|
makefiles-remote:
|
||||||
@git remote add makefiles ssh://git@lab.cluster.gsi.dit.upm.es:2200/docs/templates/makefiles.git 2>/dev/null || true
|
git ls-remote --exit-code makefiles 2> /dev/null || git remote add makefiles ssh://git@lab.cluster.gsi.dit.upm.es:2200/docs/templates/makefiles.git
|
||||||
|
|
||||||
makefiles-commit: makefiles-remote
|
makefiles-commit: makefiles-remote
|
||||||
git add -f .makefiles
|
git add -f .makefiles
|
||||||
git commit -em "Updated makefiles from ${NAME}"
|
git commit -em "Updated makefiles from ${NAME}"
|
||||||
|
|
||||||
makefiles-push:
|
makefiles-push:
|
||||||
|
git fetch makefiles $(NAME)
|
||||||
git subtree push --prefix=.makefiles/ makefiles $(NAME)
|
git subtree push --prefix=.makefiles/ makefiles $(NAME)
|
||||||
|
|
||||||
makefiles-pull: makefiles-remote
|
makefiles-pull: makefiles-remote
|
||||||
git subtree pull --prefix=.makefiles/ makefiles master --squash
|
git subtree pull --prefix=.makefiles/ makefiles master --squash
|
||||||
|
|
||||||
pull:: makefiles-pull
|
.PHONY:: makefiles-remote makefiles-commit makefiles-push makefiles-pull
|
||||||
push:: makefiles-push
|
|
||||||
|
|
||||||
.PHONY:: makefiles-remote makefiles-commit makefiles-push makefiles-pull pull push
|
|
||||||
|
@@ -26,9 +26,10 @@ Dockerfile-%: Dockerfile.template ## Generate a specific dockerfile (e.g. Docke
|
|||||||
quick_build: $(addprefix build-, $(PYMAIN))
|
quick_build: $(addprefix build-, $(PYMAIN))
|
||||||
|
|
||||||
build: $(addprefix build-, $(PYVERSIONS)) ## Build all images / python versions
|
build: $(addprefix build-, $(PYVERSIONS)) ## Build all images / python versions
|
||||||
|
docker tag $(IMAGEWTAG)-python$(PYMAIN) $(IMAGEWTAG)
|
||||||
|
|
||||||
build-%: version Dockerfile-% ## Build a specific version (e.g. build-2.7)
|
build-%: version Dockerfile-% ## Build a specific version (e.g. build-2.7)
|
||||||
docker build -t '$(IMAGEWTAG)-python$*' -f Dockerfile-$* .;
|
docker build --pull -t '$(IMAGEWTAG)-python$*' -f Dockerfile-$* .;
|
||||||
|
|
||||||
dev-%: ## Launch a specific development environment using docker (e.g. dev-2.7)
|
dev-%: ## Launch a specific development environment using docker (e.g. dev-2.7)
|
||||||
@docker start $(NAME)-dev$* || (\
|
@docker start $(NAME)-dev$* || (\
|
||||||
@@ -75,7 +76,7 @@ pip_upload: pip_test ## Upload package to pip
|
|||||||
|
|
||||||
push-latest: $(addprefix push-latest-,$(PYVERSIONS)) ## Push the "latest" tag to dockerhub
|
push-latest: $(addprefix push-latest-,$(PYVERSIONS)) ## Push the "latest" tag to dockerhub
|
||||||
docker tag '$(IMAGEWTAG)-python$(PYMAIN)' '$(IMAGEWTAG)'
|
docker tag '$(IMAGEWTAG)-python$(PYMAIN)' '$(IMAGEWTAG)'
|
||||||
docker tag '$(IMAGEWTAG)-python$(PYMAIN)' '$(IMAGENAME)'
|
docker tag '$(IMAGEWTAG)-python$(PYMAIN)' '$(IMAGENAME):latest'
|
||||||
docker push '$(IMAGENAME):latest'
|
docker push '$(IMAGENAME):latest'
|
||||||
docker push '$(IMAGEWTAG)'
|
docker push '$(IMAGEWTAG)'
|
||||||
|
|
||||||
|
@@ -6,7 +6,10 @@ services:
|
|||||||
language: python
|
language: python
|
||||||
|
|
||||||
env:
|
env:
|
||||||
- PYV=2.7
|
- PYV=3.4
|
||||||
- PYV=3.5
|
- PYV=3.5
|
||||||
|
- PYV=3.6
|
||||||
|
- PYV=3.7
|
||||||
|
# - PYV=3.3 # Apt fails in this docker image
|
||||||
# run nosetests - Tests
|
# run nosetests - Tests
|
||||||
script: make test-$PYV
|
script: make test-$PYV
|
||||||
|
67
CHANGELOG.md
Normal file
67
CHANGELOG.md
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
# Changelog
|
||||||
|
All notable changes to this project will be documented in this file.
|
||||||
|
|
||||||
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||||
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||||
|
|
||||||
|
## [1.0.1]
|
||||||
|
### Added
|
||||||
|
* License headers
|
||||||
|
* Description for PyPI (setup.py)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
* The evaluation tab shows datasets inline, and a tooltip shows the number of instances
|
||||||
|
* The docs should be clearer now
|
||||||
|
|
||||||
|
## [1.0.0]
|
||||||
|
### Fixed
|
||||||
|
* Restored hash changing function in `main.js`
|
||||||
|
|
||||||
|
## 0.20
|
||||||
|
|
||||||
|
### Added
|
||||||
|
* Objects can control the keys that will be used in `serialize`/`jsonld`/`as_dict` by specifying a list of keys in `terse_keys`.
|
||||||
|
e.g.
|
||||||
|
```python
|
||||||
|
>>> class MyModel(senpy.models.BaseModel):
|
||||||
|
... _terse_keys = ['visible']
|
||||||
|
... invisible = 5
|
||||||
|
... visible = 1
|
||||||
|
...
|
||||||
|
>>> m = MyModel(id='testing')
|
||||||
|
>>> m.jsonld()
|
||||||
|
{'invisible': 5, 'visible': 1, '@id': 'testing'}
|
||||||
|
>>> m.jsonld(verbose=False)
|
||||||
|
{'visible': 1}
|
||||||
|
```
|
||||||
|
* Configurable logging format.
|
||||||
|
* Added default terse keys for the most common classes (entry, sentiment, emotion...).
|
||||||
|
* Flag parameters (boolean) are set to true even when no value is added (e.g. `&verbose` is the same as `&verbose=true`).
|
||||||
|
* Plugin and parameter descriptions are now formatted with (showdown)[https://github.com/showdownjs/showdown].
|
||||||
|
* The web UI requests extra_parameters from the server. This is useful for pipelines. See #52
|
||||||
|
* First batch of semantic tests (using SPARQL)
|
||||||
|
* `Plugin.path()` method to get a file path from a relative path (using the senpy data folder)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
* `install_deps` now checks what requirements are already met before installing with pip.
|
||||||
|
* Help is now provided verbosely by default
|
||||||
|
* Other outputs are terse by default. This means some properties are now hidden unless verbose is set.
|
||||||
|
* `sentiments` and `emotions` are now `marl:hasOpinion` and `onyx:hasEmotionSet`, respectively.
|
||||||
|
* Nicer logging format
|
||||||
|
* Context aliases (e.g. `sentiments` and `emotions` properties) have been replaced with the original properties (e.g. `marl:hasOpinion` and `onyx:hasEmotionSet**), to use aliases, pass the `aliases** parameter.
|
||||||
|
* Several UI improvements
|
||||||
|
* Dedicated tab to show the list of plugins
|
||||||
|
* URLs in plugin descriptions are shown as links
|
||||||
|
* The format of the response is selected by clicking on a tab instead of selecting from a drop-down
|
||||||
|
* list of examples
|
||||||
|
* Bootstrap v4
|
||||||
|
* RandEmotion and RandSentiment are no longer included in the base set of plugins
|
||||||
|
* The `--plugin-folder` option can be used more than once, and every folder will be added to the app.
|
||||||
|
|
||||||
|
### Deprecated
|
||||||
|
### Removed
|
||||||
|
* Python 2.7 is no longer test or officially supported
|
||||||
|
### Fixed
|
||||||
|
* Plugin descriptions are now dedented when they are extracted from the docstring.
|
||||||
|
### Security
|
||||||
|
|
@@ -6,8 +6,6 @@ RUN apt-get update && apt-get install -y \
|
|||||||
libblas-dev liblapack-dev liblapacke-dev gfortran \
|
libblas-dev liblapack-dev liblapacke-dev gfortran \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
RUN pip install --no-cache-dir --upgrade numpy scipy scikit-learn
|
|
||||||
|
|
||||||
RUN mkdir /cache/ /senpy-plugins /data/
|
RUN mkdir /cache/ /senpy-plugins /data/
|
||||||
|
|
||||||
VOLUME /data/
|
VOLUME /data/
|
||||||
|
@@ -1,9 +1,11 @@
|
|||||||
include requirements.txt
|
include requirements.txt
|
||||||
include test-requirements.txt
|
include test-requirements.txt
|
||||||
|
include extra-requirements.txt
|
||||||
include README.rst
|
include README.rst
|
||||||
|
include LICENSE.txt
|
||||||
include senpy/VERSION
|
include senpy/VERSION
|
||||||
graft senpy/plugins
|
graft senpy/plugins
|
||||||
graft senpy/schemas
|
graft senpy/schemas
|
||||||
graft senpy/templates
|
graft senpy/templates
|
||||||
graft senpy/static
|
graft senpy/static
|
||||||
graft img
|
graft img
|
||||||
|
2
Makefile
2
Makefile
@@ -5,7 +5,7 @@ IMAGENAME=gsiupm/senpy
|
|||||||
|
|
||||||
# The first version is the main one (used for quick builds)
|
# The first version is the main one (used for quick builds)
|
||||||
# See .makefiles/python.mk for more info
|
# See .makefiles/python.mk for more info
|
||||||
PYVERSIONS=3.5 2.7
|
PYVERSIONS=3.6 3.7
|
||||||
|
|
||||||
DEVPORT=5000
|
DEVPORT=5000
|
||||||
|
|
||||||
|
2
Procfile
2
Procfile
@@ -1 +1 @@
|
|||||||
web: python -m senpy --host 0.0.0.0 --port $PORT --default-plugins
|
web: python -m senpy --host 0.0.0.0 --port $PORT
|
||||||
|
27
README.rst
27
README.rst
@@ -1,10 +1,19 @@
|
|||||||
.. image:: img/header.png
|
.. image:: img/header.png
|
||||||
:width: 100%
|
:width: 100%
|
||||||
:target: http://demos.gsi.dit.upm.es/senpy
|
:target: http://senpy.gsi.upm.es
|
||||||
|
|
||||||
.. image:: https://travis-ci.org/gsi-upm/senpy.svg?branch=master
|
.. image:: https://travis-ci.org/gsi-upm/senpy.svg?branch=master
|
||||||
:target: https://travis-ci.org/gsi-upm/senpy
|
:target: https://travis-ci.org/gsi-upm/senpy
|
||||||
|
|
||||||
|
.. image:: https://lab.gsi.upm.es/senpy/senpy/badges/master/pipeline.svg
|
||||||
|
:target: https://lab.gsi.upm.es/senpy/senpy/commits/master
|
||||||
|
|
||||||
|
.. image:: https://lab.gsi.upm.es/senpy/senpy/badges/master/coverage.svg
|
||||||
|
:target: https://lab.gsi.upm.es/senpy/senpy/commits/master
|
||||||
|
|
||||||
|
.. image:: https://img.shields.io/pypi/l/requests.svg
|
||||||
|
:target: https://lab.gsi.upm.es/senpy/senpy/
|
||||||
|
|
||||||
Senpy lets you create sentiment analysis web services easily, fast and using a well known API.
|
Senpy lets you create sentiment analysis web services easily, fast and using a well known API.
|
||||||
As a bonus, senpy services use semantic vocabularies (e.g. `NIF <http://persistence.uni-leipzig.org/nlp2rdf/>`_, `Marl <http://www.gsi.dit.upm.es/ontologies/marl>`_, `Onyx <http://www.gsi.dit.upm.es/ontologies/onyx>`_) and formats (turtle, JSON-LD, xml-rdf).
|
As a bonus, senpy services use semantic vocabularies (e.g. `NIF <http://persistence.uni-leipzig.org/nlp2rdf/>`_, `Marl <http://www.gsi.dit.upm.es/ontologies/marl>`_, `Onyx <http://www.gsi.dit.upm.es/ontologies/onyx>`_) and formats (turtle, JSON-LD, xml-rdf).
|
||||||
|
|
||||||
@@ -12,7 +21,7 @@ Have you ever wanted to turn your sentiment analysis algorithms into a service?
|
|||||||
With senpy, now you can.
|
With senpy, now you can.
|
||||||
It provides all the tools so you just have to worry about improving your algorithms:
|
It provides all the tools so you just have to worry about improving your algorithms:
|
||||||
|
|
||||||
`See it in action. <http://senpy.cluster.gsi.dit.upm.es/>`_
|
`See it in action. <http://senpy.gsi.upm.es/>`_
|
||||||
|
|
||||||
Installation
|
Installation
|
||||||
------------
|
------------
|
||||||
@@ -38,9 +47,9 @@ If you want to install senpy globally, use sudo instead of the ``--user`` flag.
|
|||||||
|
|
||||||
Docker Image
|
Docker Image
|
||||||
************
|
************
|
||||||
Build the image or use the pre-built one: ``docker run -ti -p 5000:5000 gsiupm/senpy --default-plugins``.
|
Build the image or use the pre-built one: ``docker run -ti -p 5000:5000 gsiupm/senpy``.
|
||||||
|
|
||||||
To add custom plugins, add a volume and tell senpy where to find the plugins: ``docker run -ti -p 5000:5000 -v <PATH OF PLUGINS>:/plugins gsiupm/senpy --default-plugins -f /plugins``
|
To add custom plugins, add a volume and tell senpy where to find the plugins: ``docker run -ti -p 5000:5000 -v <PATH OF PLUGINS>:/plugins gsiupm/senpy -f /plugins``
|
||||||
|
|
||||||
|
|
||||||
Developing
|
Developing
|
||||||
@@ -125,6 +134,16 @@ For more information, check out the `documentation <http://senpy.readthedocs.org
|
|||||||
------------------------------------------------------------------------------------
|
------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
Python 2.x compatibility
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
Keeping compatibility between python 2.7 and 3.x is not always easy, especially for a framework that deals both with text and web requests.
|
||||||
|
Hence, starting February 2019, this project will no longer make efforts to support python 2.7, which will reach its end of life in 2020.
|
||||||
|
Most of the functionality should still work, and the compatibility shims will remain for now, but we cannot make any guarantees at this point.
|
||||||
|
Instead, the maintainers will focus their efforts on keeping the codebase compatible across different Python 3.3+ versions, including upcoming ones.
|
||||||
|
We apologize for the inconvenience.
|
||||||
|
|
||||||
|
|
||||||
Acknowledgement
|
Acknowledgement
|
||||||
---------------
|
---------------
|
||||||
This development has been partially funded by the European Union through the MixedEmotions Project (project number H2020 655632), as part of the `RIA ICT 15 Big data and Open Data Innovation and take-up` programme.
|
This development has been partially funded by the European Union through the MixedEmotions Project (project number H2020 655632), as part of the `RIA ICT 15 Big data and Open Data Innovation and take-up` programme.
|
||||||
|
@@ -1,4 +0,0 @@
|
|||||||
import os
|
|
||||||
|
|
||||||
SERVER_PORT = os.environ.get("SERVER_PORT", 5000)
|
|
||||||
DEBUG = os.environ.get("DEBUG", True)
|
|
10
docker-compose.dev.yml
Normal file
10
docker-compose.dev.yml
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
version: '3'
|
||||||
|
services:
|
||||||
|
senpy:
|
||||||
|
image: "${IMAGENAME-gsiupm/senpy}:${VERSION-latest}"
|
||||||
|
entrypoint: ["/bin/bash"]
|
||||||
|
working_dir: "/senpy-plugins"
|
||||||
|
ports:
|
||||||
|
- 5000:5000
|
||||||
|
volumes:
|
||||||
|
- ".:/usr/src/app/"
|
9
docker-compose.test.yml
Normal file
9
docker-compose.test.yml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
version: '3'
|
||||||
|
services:
|
||||||
|
test:
|
||||||
|
image: "${IMAGENAME-gsiupm/senpy}:${VERSION-dev}"
|
||||||
|
entrypoint: ["py.test"]
|
||||||
|
volumes:
|
||||||
|
- ".:/usr/src/app/"
|
||||||
|
command:
|
||||||
|
[]
|
11
docker-compose.yml
Normal file
11
docker-compose.yml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
version: '3'
|
||||||
|
services:
|
||||||
|
senpy:
|
||||||
|
image: "${IMAGENAME-gsiupm/senpy}:${VERSION-dev}"
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: Dockerfile${PYVERSION--2.7}
|
||||||
|
ports:
|
||||||
|
- 5001:5000
|
||||||
|
volumes:
|
||||||
|
- "./data:/data"
|
4428
docs/Advanced.ipynb
Normal file
4428
docs/Advanced.ipynb
Normal file
File diff suppressed because it is too large
Load Diff
592
docs/Evaluation.ipynb
Normal file
592
docs/Evaluation.ipynb
Normal file
@@ -0,0 +1,592 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Evaluating Services"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Sentiment analysis plugins can also be evaluated on a series of pre-defined datasets.\n",
|
||||||
|
"This can be done in three ways: through the Web UI (playground), through the web API and programmatically.\n",
|
||||||
|
"\n",
|
||||||
|
"Regardless of the way you perform the evaluation, you will need to specify a plugin (service) that you want to evaluate, and a series of datasets on which it should be evaluated.\n",
|
||||||
|
"\n",
|
||||||
|
"to evaluate a plugin on a dataset, senpy use the plugin to predict the sentiment in each entry in the dataset.\n",
|
||||||
|
"These predictions are compared with the expected values to produce several metrics, such as: accuracy, precision and f1-score.\n",
|
||||||
|
"\n",
|
||||||
|
"**note**: the evaluation process might take long for plugins that use external services, such as `sentiment140`.\n",
|
||||||
|
"\n",
|
||||||
|
"**note**: plugins are assumed to be pre-trained and invariant. i.e., the prediction for an entry should "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Web UI (Playground)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"The playground should contain a tab for Evaluation, where you can select any plugin that can be evaluated, and the set of datasets that you want to test the plugin on.\n",
|
||||||
|
"\n",
|
||||||
|
"For example, the image below shows the results of the `sentiment-vader` plugin on the `vader` and `sts` datasets:\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Web API"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"The api exposes an endpoint (`/evaluate`), which accents the plugin and the set of datasets on which it should be evaluated."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"The following code is not necessary, but it will display the results better:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Here is a simple call using the requests library:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/html": [
|
||||||
|
"<style>.output_html .hll { background-color: #ffffcc }\n",
|
||||||
|
".output_html { background: #f8f8f8; }\n",
|
||||||
|
".output_html .c { color: #408080; font-style: italic } /* Comment */\n",
|
||||||
|
".output_html .err { border: 1px solid #FF0000 } /* Error */\n",
|
||||||
|
".output_html .k { color: #008000; font-weight: bold } /* Keyword */\n",
|
||||||
|
".output_html .o { color: #666666 } /* Operator */\n",
|
||||||
|
".output_html .ch { color: #408080; font-style: italic } /* Comment.Hashbang */\n",
|
||||||
|
".output_html .cm { color: #408080; font-style: italic } /* Comment.Multiline */\n",
|
||||||
|
".output_html .cp { color: #BC7A00 } /* Comment.Preproc */\n",
|
||||||
|
".output_html .cpf { color: #408080; font-style: italic } /* Comment.PreprocFile */\n",
|
||||||
|
".output_html .c1 { color: #408080; font-style: italic } /* Comment.Single */\n",
|
||||||
|
".output_html .cs { color: #408080; font-style: italic } /* Comment.Special */\n",
|
||||||
|
".output_html .gd { color: #A00000 } /* Generic.Deleted */\n",
|
||||||
|
".output_html .ge { font-style: italic } /* Generic.Emph */\n",
|
||||||
|
".output_html .gr { color: #FF0000 } /* Generic.Error */\n",
|
||||||
|
".output_html .gh { color: #000080; font-weight: bold } /* Generic.Heading */\n",
|
||||||
|
".output_html .gi { color: #00A000 } /* Generic.Inserted */\n",
|
||||||
|
".output_html .go { color: #888888 } /* Generic.Output */\n",
|
||||||
|
".output_html .gp { color: #000080; font-weight: bold } /* Generic.Prompt */\n",
|
||||||
|
".output_html .gs { font-weight: bold } /* Generic.Strong */\n",
|
||||||
|
".output_html .gu { color: #800080; font-weight: bold } /* Generic.Subheading */\n",
|
||||||
|
".output_html .gt { color: #0044DD } /* Generic.Traceback */\n",
|
||||||
|
".output_html .kc { color: #008000; font-weight: bold } /* Keyword.Constant */\n",
|
||||||
|
".output_html .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */\n",
|
||||||
|
".output_html .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */\n",
|
||||||
|
".output_html .kp { color: #008000 } /* Keyword.Pseudo */\n",
|
||||||
|
".output_html .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */\n",
|
||||||
|
".output_html .kt { color: #B00040 } /* Keyword.Type */\n",
|
||||||
|
".output_html .m { color: #666666 } /* Literal.Number */\n",
|
||||||
|
".output_html .s { color: #BA2121 } /* Literal.String */\n",
|
||||||
|
".output_html .na { color: #7D9029 } /* Name.Attribute */\n",
|
||||||
|
".output_html .nb { color: #008000 } /* Name.Builtin */\n",
|
||||||
|
".output_html .nc { color: #0000FF; font-weight: bold } /* Name.Class */\n",
|
||||||
|
".output_html .no { color: #880000 } /* Name.Constant */\n",
|
||||||
|
".output_html .nd { color: #AA22FF } /* Name.Decorator */\n",
|
||||||
|
".output_html .ni { color: #999999; font-weight: bold } /* Name.Entity */\n",
|
||||||
|
".output_html .ne { color: #D2413A; font-weight: bold } /* Name.Exception */\n",
|
||||||
|
".output_html .nf { color: #0000FF } /* Name.Function */\n",
|
||||||
|
".output_html .nl { color: #A0A000 } /* Name.Label */\n",
|
||||||
|
".output_html .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */\n",
|
||||||
|
".output_html .nt { color: #008000; font-weight: bold } /* Name.Tag */\n",
|
||||||
|
".output_html .nv { color: #19177C } /* Name.Variable */\n",
|
||||||
|
".output_html .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */\n",
|
||||||
|
".output_html .w { color: #bbbbbb } /* Text.Whitespace */\n",
|
||||||
|
".output_html .mb { color: #666666 } /* Literal.Number.Bin */\n",
|
||||||
|
".output_html .mf { color: #666666 } /* Literal.Number.Float */\n",
|
||||||
|
".output_html .mh { color: #666666 } /* Literal.Number.Hex */\n",
|
||||||
|
".output_html .mi { color: #666666 } /* Literal.Number.Integer */\n",
|
||||||
|
".output_html .mo { color: #666666 } /* Literal.Number.Oct */\n",
|
||||||
|
".output_html .sa { color: #BA2121 } /* Literal.String.Affix */\n",
|
||||||
|
".output_html .sb { color: #BA2121 } /* Literal.String.Backtick */\n",
|
||||||
|
".output_html .sc { color: #BA2121 } /* Literal.String.Char */\n",
|
||||||
|
".output_html .dl { color: #BA2121 } /* Literal.String.Delimiter */\n",
|
||||||
|
".output_html .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */\n",
|
||||||
|
".output_html .s2 { color: #BA2121 } /* Literal.String.Double */\n",
|
||||||
|
".output_html .se { color: #BB6622; font-weight: bold } /* Literal.String.Escape */\n",
|
||||||
|
".output_html .sh { color: #BA2121 } /* Literal.String.Heredoc */\n",
|
||||||
|
".output_html .si { color: #BB6688; font-weight: bold } /* Literal.String.Interpol */\n",
|
||||||
|
".output_html .sx { color: #008000 } /* Literal.String.Other */\n",
|
||||||
|
".output_html .sr { color: #BB6688 } /* Literal.String.Regex */\n",
|
||||||
|
".output_html .s1 { color: #BA2121 } /* Literal.String.Single */\n",
|
||||||
|
".output_html .ss { color: #19177C } /* Literal.String.Symbol */\n",
|
||||||
|
".output_html .bp { color: #008000 } /* Name.Builtin.Pseudo */\n",
|
||||||
|
".output_html .fm { color: #0000FF } /* Name.Function.Magic */\n",
|
||||||
|
".output_html .vc { color: #19177C } /* Name.Variable.Class */\n",
|
||||||
|
".output_html .vg { color: #19177C } /* Name.Variable.Global */\n",
|
||||||
|
".output_html .vi { color: #19177C } /* Name.Variable.Instance */\n",
|
||||||
|
".output_html .vm { color: #19177C } /* Name.Variable.Magic */\n",
|
||||||
|
".output_html .il { color: #666666 } /* Literal.Number.Integer.Long */</style><div class=\"highlight\"><pre><span></span><span class=\"p\">{</span>\n",
|
||||||
|
" <span class=\"nt\">"@context"</span><span class=\"p\">:</span> <span class=\"s2\">"http://senpy.gsi.upm.es/api/contexts/YXBpL2V2YWx1YXRlLz9hbGdvPXNlbnRpbWVudC12YWRlciZkYXRhc2V0PXZhZGVyJTJDc3RzJm91dGZvcm1hdD1qc29uLWxkIw%3D%3D"</span><span class=\"p\">,</span>\n",
|
||||||
|
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"AggregatedEvaluation"</span><span class=\"p\">,</span>\n",
|
||||||
|
" <span class=\"nt\">"senpy:evaluations"</span><span class=\"p\">:</span> <span class=\"p\">[</span>\n",
|
||||||
|
" <span class=\"p\">{</span>\n",
|
||||||
|
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"Evaluation"</span><span class=\"p\">,</span>\n",
|
||||||
|
" <span class=\"nt\">"evaluates"</span><span class=\"p\">:</span> <span class=\"s2\">"endpoint:plugins/sentiment-vader_0.1.1__vader"</span><span class=\"p\">,</span>\n",
|
||||||
|
" <span class=\"nt\">"evaluatesOn"</span><span class=\"p\">:</span> <span class=\"s2\">"vader"</span><span class=\"p\">,</span>\n",
|
||||||
|
" <span class=\"nt\">"metrics"</span><span class=\"p\">:</span> <span class=\"p\">[</span>\n",
|
||||||
|
" <span class=\"p\">{</span>\n",
|
||||||
|
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"Accuracy"</span><span class=\"p\">,</span>\n",
|
||||||
|
" <span class=\"nt\">"value"</span><span class=\"p\">:</span> <span class=\"mf\">0.6907142857142857</span>\n",
|
||||||
|
" <span class=\"p\">},</span>\n",
|
||||||
|
" <span class=\"p\">{</span>\n",
|
||||||
|
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"Precision_macro"</span><span class=\"p\">,</span>\n",
|
||||||
|
" <span class=\"nt\">"value"</span><span class=\"p\">:</span> <span class=\"mf\">0.34535714285714286</span>\n",
|
||||||
|
" <span class=\"p\">},</span>\n",
|
||||||
|
" <span class=\"p\">{</span>\n",
|
||||||
|
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"Recall_macro"</span><span class=\"p\">,</span>\n",
|
||||||
|
" <span class=\"nt\">"value"</span><span class=\"p\">:</span> <span class=\"mf\">0.5</span>\n",
|
||||||
|
" <span class=\"p\">},</span>\n",
|
||||||
|
" <span class=\"p\">{</span>\n",
|
||||||
|
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"F1_macro"</span><span class=\"p\">,</span>\n",
|
||||||
|
" <span class=\"nt\">"value"</span><span class=\"p\">:</span> <span class=\"mf\">0.40853400929446554</span>\n",
|
||||||
|
" <span class=\"p\">},</span>\n",
|
||||||
|
" <span class=\"p\">{</span>\n",
|
||||||
|
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"F1_weighted"</span><span class=\"p\">,</span>\n",
|
||||||
|
" <span class=\"nt\">"value"</span><span class=\"p\">:</span> <span class=\"mf\">0.5643605528396403</span>\n",
|
||||||
|
" <span class=\"p\">},</span>\n",
|
||||||
|
" <span class=\"p\">{</span>\n",
|
||||||
|
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"F1_micro"</span><span class=\"p\">,</span>\n",
|
||||||
|
" <span class=\"nt\">"value"</span><span class=\"p\">:</span> <span class=\"mf\">0.6907142857142857</span>\n",
|
||||||
|
" <span class=\"p\">},</span>\n",
|
||||||
|
" <span class=\"p\">{</span>\n",
|
||||||
|
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"F1_macro"</span><span class=\"p\">,</span>\n",
|
||||||
|
" <span class=\"nt\">"value"</span><span class=\"p\">:</span> <span class=\"mf\">0.40853400929446554</span>\n",
|
||||||
|
" <span class=\"p\">}</span>\n",
|
||||||
|
" <span class=\"p\">]</span>\n",
|
||||||
|
" <span class=\"p\">},</span>\n",
|
||||||
|
" <span class=\"p\">{</span>\n",
|
||||||
|
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"Evaluation"</span><span class=\"p\">,</span>\n",
|
||||||
|
" <span class=\"nt\">"evaluates"</span><span class=\"p\">:</span> <span class=\"s2\">"endpoint:plugins/sentiment-vader_0.1.1__sts"</span><span class=\"p\">,</span>\n",
|
||||||
|
" <span class=\"nt\">"evaluatesOn"</span><span class=\"p\">:</span> <span class=\"s2\">"sts"</span><span class=\"p\">,</span>\n",
|
||||||
|
" <span class=\"nt\">"metrics"</span><span class=\"p\">:</span> <span class=\"p\">[</span>\n",
|
||||||
|
" <span class=\"p\">{</span>\n",
|
||||||
|
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"Accuracy"</span><span class=\"p\">,</span>\n",
|
||||||
|
" <span class=\"nt\">"value"</span><span class=\"p\">:</span> <span class=\"mf\">0.3107177974434612</span>\n",
|
||||||
|
" <span class=\"p\">},</span>\n",
|
||||||
|
" <span class=\"p\">{</span>\n",
|
||||||
|
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"Precision_macro"</span><span class=\"p\">,</span>\n",
|
||||||
|
" <span class=\"nt\">"value"</span><span class=\"p\">:</span> <span class=\"mf\">0.1553588987217306</span>\n",
|
||||||
|
" <span class=\"p\">},</span>\n",
|
||||||
|
" <span class=\"p\">{</span>\n",
|
||||||
|
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"Recall_macro"</span><span class=\"p\">,</span>\n",
|
||||||
|
" <span class=\"nt\">"value"</span><span class=\"p\">:</span> <span class=\"mf\">0.5</span>\n",
|
||||||
|
" <span class=\"p\">},</span>\n",
|
||||||
|
" <span class=\"p\">{</span>\n",
|
||||||
|
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"F1_macro"</span><span class=\"p\">,</span>\n",
|
||||||
|
" <span class=\"nt\">"value"</span><span class=\"p\">:</span> <span class=\"mf\">0.23705926481620407</span>\n",
|
||||||
|
" <span class=\"p\">},</span>\n",
|
||||||
|
" <span class=\"p\">{</span>\n",
|
||||||
|
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"F1_weighted"</span><span class=\"p\">,</span>\n",
|
||||||
|
" <span class=\"nt\">"value"</span><span class=\"p\">:</span> <span class=\"mf\">0.14731706525451424</span>\n",
|
||||||
|
" <span class=\"p\">},</span>\n",
|
||||||
|
" <span class=\"p\">{</span>\n",
|
||||||
|
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"F1_micro"</span><span class=\"p\">,</span>\n",
|
||||||
|
" <span class=\"nt\">"value"</span><span class=\"p\">:</span> <span class=\"mf\">0.3107177974434612</span>\n",
|
||||||
|
" <span class=\"p\">},</span>\n",
|
||||||
|
" <span class=\"p\">{</span>\n",
|
||||||
|
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"F1_macro"</span><span class=\"p\">,</span>\n",
|
||||||
|
" <span class=\"nt\">"value"</span><span class=\"p\">:</span> <span class=\"mf\">0.23705926481620407</span>\n",
|
||||||
|
" <span class=\"p\">}</span>\n",
|
||||||
|
" <span class=\"p\">]</span>\n",
|
||||||
|
" <span class=\"p\">}</span>\n",
|
||||||
|
" <span class=\"p\">]</span>\n",
|
||||||
|
"<span class=\"p\">}</span>\n",
|
||||||
|
"</pre></div>\n"
|
||||||
|
],
|
||||||
|
"text/latex": [
|
||||||
|
"\\begin{Verbatim}[commandchars=\\\\\\{\\}]\n",
|
||||||
|
"\\PY{p}{\\PYZob{}}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}@context\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}http://senpy.gsi.upm.es/api/contexts/YXBpL2V2YWx1YXRlLz9hbGdvPXNlbnRpbWVudC12YWRlciZkYXRhc2V0PXZhZGVyJTJDc3RzJm91dGZvcm1hdD1qc29uLWxkIw\\PYZpc{}3D\\PYZpc{}3D\\PYZdq{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}AggregatedEvaluation\\PYZdq{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}senpy:evaluations\\PYZdq{}}\\PY{p}{:} \\PY{p}{[}\n",
|
||||||
|
" \\PY{p}{\\PYZob{}}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Evaluation\\PYZdq{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}evaluates\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}endpoint:plugins/sentiment\\PYZhy{}vader\\PYZus{}0.1.1\\PYZus{}\\PYZus{}vader\\PYZdq{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}evaluatesOn\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}vader\\PYZdq{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}metrics\\PYZdq{}}\\PY{p}{:} \\PY{p}{[}\n",
|
||||||
|
" \\PY{p}{\\PYZob{}}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Accuracy\\PYZdq{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.6907142857142857}\n",
|
||||||
|
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{p}{\\PYZob{}}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Precision\\PYZus{}macro\\PYZdq{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.34535714285714286}\n",
|
||||||
|
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{p}{\\PYZob{}}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Recall\\PYZus{}macro\\PYZdq{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.5}\n",
|
||||||
|
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{p}{\\PYZob{}}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}F1\\PYZus{}macro\\PYZdq{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.40853400929446554}\n",
|
||||||
|
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{p}{\\PYZob{}}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}F1\\PYZus{}weighted\\PYZdq{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.5643605528396403}\n",
|
||||||
|
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{p}{\\PYZob{}}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}F1\\PYZus{}micro\\PYZdq{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.6907142857142857}\n",
|
||||||
|
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{p}{\\PYZob{}}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}F1\\PYZus{}macro\\PYZdq{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.40853400929446554}\n",
|
||||||
|
" \\PY{p}{\\PYZcb{}}\n",
|
||||||
|
" \\PY{p}{]}\n",
|
||||||
|
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{p}{\\PYZob{}}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Evaluation\\PYZdq{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}evaluates\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}endpoint:plugins/sentiment\\PYZhy{}vader\\PYZus{}0.1.1\\PYZus{}\\PYZus{}sts\\PYZdq{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}evaluatesOn\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}sts\\PYZdq{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}metrics\\PYZdq{}}\\PY{p}{:} \\PY{p}{[}\n",
|
||||||
|
" \\PY{p}{\\PYZob{}}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Accuracy\\PYZdq{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.3107177974434612}\n",
|
||||||
|
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{p}{\\PYZob{}}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Precision\\PYZus{}macro\\PYZdq{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.1553588987217306}\n",
|
||||||
|
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{p}{\\PYZob{}}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Recall\\PYZus{}macro\\PYZdq{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.5}\n",
|
||||||
|
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{p}{\\PYZob{}}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}F1\\PYZus{}macro\\PYZdq{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.23705926481620407}\n",
|
||||||
|
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{p}{\\PYZob{}}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}F1\\PYZus{}weighted\\PYZdq{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.14731706525451424}\n",
|
||||||
|
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{p}{\\PYZob{}}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}F1\\PYZus{}micro\\PYZdq{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.3107177974434612}\n",
|
||||||
|
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{p}{\\PYZob{}}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}F1\\PYZus{}macro\\PYZdq{}}\\PY{p}{,}\n",
|
||||||
|
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.23705926481620407}\n",
|
||||||
|
" \\PY{p}{\\PYZcb{}}\n",
|
||||||
|
" \\PY{p}{]}\n",
|
||||||
|
" \\PY{p}{\\PYZcb{}}\n",
|
||||||
|
" \\PY{p}{]}\n",
|
||||||
|
"\\PY{p}{\\PYZcb{}}\n",
|
||||||
|
"\\end{Verbatim}\n"
|
||||||
|
],
|
||||||
|
"text/plain": [
|
||||||
|
"{\n",
|
||||||
|
" \"@context\": \"http://senpy.gsi.upm.es/api/contexts/YXBpL2V2YWx1YXRlLz9hbGdvPXNlbnRpbWVudC12YWRlciZkYXRhc2V0PXZhZGVyJTJDc3RzJm91dGZvcm1hdD1qc29uLWxkIw%3D%3D\",\n",
|
||||||
|
" \"@type\": \"AggregatedEvaluation\",\n",
|
||||||
|
" \"senpy:evaluations\": [\n",
|
||||||
|
" {\n",
|
||||||
|
" \"@type\": \"Evaluation\",\n",
|
||||||
|
" \"evaluates\": \"endpoint:plugins/sentiment-vader_0.1.1__vader\",\n",
|
||||||
|
" \"evaluatesOn\": \"vader\",\n",
|
||||||
|
" \"metrics\": [\n",
|
||||||
|
" {\n",
|
||||||
|
" \"@type\": \"Accuracy\",\n",
|
||||||
|
" \"value\": 0.6907142857142857\n",
|
||||||
|
" },\n",
|
||||||
|
" {\n",
|
||||||
|
" \"@type\": \"Precision_macro\",\n",
|
||||||
|
" \"value\": 0.34535714285714286\n",
|
||||||
|
" },\n",
|
||||||
|
" {\n",
|
||||||
|
" \"@type\": \"Recall_macro\",\n",
|
||||||
|
" \"value\": 0.5\n",
|
||||||
|
" },\n",
|
||||||
|
" {\n",
|
||||||
|
" \"@type\": \"F1_macro\",\n",
|
||||||
|
" \"value\": 0.40853400929446554\n",
|
||||||
|
" },\n",
|
||||||
|
" {\n",
|
||||||
|
" \"@type\": \"F1_weighted\",\n",
|
||||||
|
" \"value\": 0.5643605528396403\n",
|
||||||
|
" },\n",
|
||||||
|
" {\n",
|
||||||
|
" \"@type\": \"F1_micro\",\n",
|
||||||
|
" \"value\": 0.6907142857142857\n",
|
||||||
|
" },\n",
|
||||||
|
" {\n",
|
||||||
|
" \"@type\": \"F1_macro\",\n",
|
||||||
|
" \"value\": 0.40853400929446554\n",
|
||||||
|
" }\n",
|
||||||
|
" ]\n",
|
||||||
|
" },\n",
|
||||||
|
" {\n",
|
||||||
|
" \"@type\": \"Evaluation\",\n",
|
||||||
|
" \"evaluates\": \"endpoint:plugins/sentiment-vader_0.1.1__sts\",\n",
|
||||||
|
" \"evaluatesOn\": \"sts\",\n",
|
||||||
|
" \"metrics\": [\n",
|
||||||
|
" {\n",
|
||||||
|
" \"@type\": \"Accuracy\",\n",
|
||||||
|
" \"value\": 0.3107177974434612\n",
|
||||||
|
" },\n",
|
||||||
|
" {\n",
|
||||||
|
" \"@type\": \"Precision_macro\",\n",
|
||||||
|
" \"value\": 0.1553588987217306\n",
|
||||||
|
" },\n",
|
||||||
|
" {\n",
|
||||||
|
" \"@type\": \"Recall_macro\",\n",
|
||||||
|
" \"value\": 0.5\n",
|
||||||
|
" },\n",
|
||||||
|
" {\n",
|
||||||
|
" \"@type\": \"F1_macro\",\n",
|
||||||
|
" \"value\": 0.23705926481620407\n",
|
||||||
|
" },\n",
|
||||||
|
" {\n",
|
||||||
|
" \"@type\": \"F1_weighted\",\n",
|
||||||
|
" \"value\": 0.14731706525451424\n",
|
||||||
|
" },\n",
|
||||||
|
" {\n",
|
||||||
|
" \"@type\": \"F1_micro\",\n",
|
||||||
|
" \"value\": 0.3107177974434612\n",
|
||||||
|
" },\n",
|
||||||
|
" {\n",
|
||||||
|
" \"@type\": \"F1_macro\",\n",
|
||||||
|
" \"value\": 0.23705926481620407\n",
|
||||||
|
" }\n",
|
||||||
|
" ]\n",
|
||||||
|
" }\n",
|
||||||
|
" ]\n",
|
||||||
|
"}"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 2,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"import requests\n",
|
||||||
|
"from IPython.display import Code\n",
|
||||||
|
"\n",
|
||||||
|
"endpoint = 'http://senpy.gsi.upm.es/api'\n",
|
||||||
|
"res = requests.get(f'{endpoint}/evaluate',\n",
|
||||||
|
" params={\"algo\": \"sentiment-vader\",\n",
|
||||||
|
" \"dataset\": \"vader,sts\",\n",
|
||||||
|
" 'outformat': 'json-ld'\n",
|
||||||
|
" })\n",
|
||||||
|
"Code(res.text, language='json')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Programmatically (expert)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"A third option is to evaluate plugins manually without launching the server.\n",
|
||||||
|
"\n",
|
||||||
|
"This option is particularly interesting for advanced users that want faster iterations and evaluation results, and for automation.\n",
|
||||||
|
"\n",
|
||||||
|
"We would first need an instance of a plugin.\n",
|
||||||
|
"In this example we will use the Sentiment140 plugin that is included in every senpy installation:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 22,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from senpy.plugins.sentiment import sentiment140_plugin"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 23,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"s140 = sentiment140_plugin.Sentiment140()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Then, we need to know what datasets are available.\n",
|
||||||
|
"We can list all datasets and basic stats (e.g., number of instances and labels used) like this:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 32,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"vader {'instances': 4200, 'labels': [1, -1]}\n",
|
||||||
|
"sts {'instances': 4200, 'labels': [1, -1]}\n",
|
||||||
|
"imdb_unsup {'instances': 50000, 'labels': [1, -1]}\n",
|
||||||
|
"imdb {'instances': 50000, 'labels': [1, -1]}\n",
|
||||||
|
"sst {'instances': 11855, 'labels': [1, -1]}\n",
|
||||||
|
"multidomain {'instances': 38548, 'labels': [1, -1]}\n",
|
||||||
|
"sentiment140 {'instances': 1600000, 'labels': [1, -1]}\n",
|
||||||
|
"semeval07 {'instances': 'None', 'labels': [1, -1]}\n",
|
||||||
|
"semeval14 {'instances': 7838, 'labels': [1, -1]}\n",
|
||||||
|
"pl04 {'instances': 4000, 'labels': [1, -1]}\n",
|
||||||
|
"pl05 {'instances': 10662, 'labels': [1, -1]}\n",
|
||||||
|
"semeval13 {'instances': 6259, 'labels': [1, -1]}\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"from senpy.gsitk_compat import datasets\n",
|
||||||
|
"for k, d in datasets.items():\n",
|
||||||
|
" print(k, d['stats'])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Now, we will evaluate our plugin in one of the smallest datasets, `sts`:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 37,
|
||||||
|
"metadata": {
|
||||||
|
"scrolled": false
|
||||||
|
},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"[{\n",
|
||||||
|
" \"@type\": \"Evaluation\",\n",
|
||||||
|
" \"evaluates\": \"endpoint:plugins/sentiment140_0.2\",\n",
|
||||||
|
" \"evaluatesOn\": \"sts\",\n",
|
||||||
|
" \"metrics\": [\n",
|
||||||
|
" {\n",
|
||||||
|
" \"@type\": \"Accuracy\",\n",
|
||||||
|
" \"value\": 0.872173058013766\n",
|
||||||
|
" },\n",
|
||||||
|
" {\n",
|
||||||
|
" \"@type\": \"Precision_macro\",\n",
|
||||||
|
" \"value\": 0.9035254323131467\n",
|
||||||
|
" },\n",
|
||||||
|
" {\n",
|
||||||
|
" \"@type\": \"Recall_macro\",\n",
|
||||||
|
" \"value\": 0.8021249029415483\n",
|
||||||
|
" },\n",
|
||||||
|
" {\n",
|
||||||
|
" \"@type\": \"F1_macro\",\n",
|
||||||
|
" \"value\": 0.8320673712021136\n",
|
||||||
|
" },\n",
|
||||||
|
" {\n",
|
||||||
|
" \"@type\": \"F1_weighted\",\n",
|
||||||
|
" \"value\": 0.8631351567604358\n",
|
||||||
|
" },\n",
|
||||||
|
" {\n",
|
||||||
|
" \"@type\": \"F1_micro\",\n",
|
||||||
|
" \"value\": 0.872173058013766\n",
|
||||||
|
" },\n",
|
||||||
|
" {\n",
|
||||||
|
" \"@type\": \"F1_macro\",\n",
|
||||||
|
" \"value\": 0.8320673712021136\n",
|
||||||
|
" }\n",
|
||||||
|
" ]\n",
|
||||||
|
" }]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 37,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"s140.evaluate(['sts', ])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"anaconda-cloud": {},
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.7.3"
|
||||||
|
},
|
||||||
|
"toc": {
|
||||||
|
"colors": {
|
||||||
|
"hover_highlight": "#DAA520",
|
||||||
|
"running_highlight": "#FF0000",
|
||||||
|
"selected_highlight": "#FFD700"
|
||||||
|
},
|
||||||
|
"moveMenuLeft": true,
|
||||||
|
"nav_menu": {
|
||||||
|
"height": "68px",
|
||||||
|
"width": "252px"
|
||||||
|
},
|
||||||
|
"navigate_menu": true,
|
||||||
|
"number_sections": true,
|
||||||
|
"sideBar": true,
|
||||||
|
"threshold": 4,
|
||||||
|
"toc_cell": false,
|
||||||
|
"toc_section_display": "block",
|
||||||
|
"toc_window_display": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 1
|
||||||
|
}
|
@@ -24,6 +24,7 @@ I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
|||||||
help:
|
help:
|
||||||
@echo "Please use \`make <target>' where <target> is one of"
|
@echo "Please use \`make <target>' where <target> is one of"
|
||||||
@echo " html to make standalone HTML files"
|
@echo " html to make standalone HTML files"
|
||||||
|
@echo " entr to watch for changes and continuously make HTML files"
|
||||||
@echo " dirhtml to make HTML files named index.html in directories"
|
@echo " dirhtml to make HTML files named index.html in directories"
|
||||||
@echo " singlehtml to make a single large HTML file"
|
@echo " singlehtml to make a single large HTML file"
|
||||||
@echo " pickle to make pickle files"
|
@echo " pickle to make pickle files"
|
||||||
@@ -49,6 +50,9 @@ help:
|
|||||||
clean:
|
clean:
|
||||||
rm -rf $(BUILDDIR)/*
|
rm -rf $(BUILDDIR)/*
|
||||||
|
|
||||||
|
entr:
|
||||||
|
while true; do ag -g rst | entr -d make html; done
|
||||||
|
|
||||||
html:
|
html:
|
||||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||||
@echo
|
@echo
|
||||||
|
1717
docs/Quickstart.ipynb
Normal file
1717
docs/Quickstart.ipynb
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,317 +0,0 @@
|
|||||||
{
|
|
||||||
"cells": [
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {
|
|
||||||
"ExecuteTime": {
|
|
||||||
"end_time": "2017-04-10T17:05:31.465571Z",
|
|
||||||
"start_time": "2017-04-10T19:05:31.458282+02:00"
|
|
||||||
},
|
|
||||||
"deletable": true,
|
|
||||||
"editable": true
|
|
||||||
},
|
|
||||||
"source": [
|
|
||||||
"# Client"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {
|
|
||||||
"collapsed": true,
|
|
||||||
"deletable": true,
|
|
||||||
"editable": true
|
|
||||||
},
|
|
||||||
"source": [
|
|
||||||
"The built-in senpy client allows you to query any Senpy endpoint. We will illustrate how to use it with the public demo endpoint, and then show you how to spin up your own endpoint using docker."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {
|
|
||||||
"deletable": true,
|
|
||||||
"editable": true
|
|
||||||
},
|
|
||||||
"source": [
|
|
||||||
"Demo Endpoint\n",
|
|
||||||
"-------------"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {
|
|
||||||
"deletable": true,
|
|
||||||
"editable": true
|
|
||||||
},
|
|
||||||
"source": [
|
|
||||||
"To start using senpy, simply create a new Client and point it to your endpoint. In this case, the latest version of Senpy at GSI."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {
|
|
||||||
"ExecuteTime": {
|
|
||||||
"end_time": "2017-04-10T17:29:12.827640Z",
|
|
||||||
"start_time": "2017-04-10T19:29:12.818617+02:00"
|
|
||||||
},
|
|
||||||
"collapsed": false,
|
|
||||||
"deletable": true,
|
|
||||||
"editable": true
|
|
||||||
},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"from senpy.client import Client\n",
|
|
||||||
"\n",
|
|
||||||
"c = Client('http://latest.senpy.cluster.gsi.dit.upm.es/api')\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {
|
|
||||||
"deletable": true,
|
|
||||||
"editable": true
|
|
||||||
},
|
|
||||||
"source": [
|
|
||||||
"Now, let's use that client analyse some queries:"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {
|
|
||||||
"ExecuteTime": {
|
|
||||||
"end_time": "2017-04-10T17:29:14.011657Z",
|
|
||||||
"start_time": "2017-04-10T19:29:13.701808+02:00"
|
|
||||||
},
|
|
||||||
"collapsed": false,
|
|
||||||
"deletable": true,
|
|
||||||
"editable": true
|
|
||||||
},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"r = c.analyse('I like sugar!!', algorithm='sentiment140')\n",
|
|
||||||
"r"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {
|
|
||||||
"ExecuteTime": {
|
|
||||||
"end_time": "2017-04-10T17:08:19.616754Z",
|
|
||||||
"start_time": "2017-04-10T19:08:19.610767+02:00"
|
|
||||||
},
|
|
||||||
"deletable": true,
|
|
||||||
"editable": true
|
|
||||||
},
|
|
||||||
"source": [
|
|
||||||
"As you can see, that gave us the full JSON result. A more concise way to print it would be:"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {
|
|
||||||
"ExecuteTime": {
|
|
||||||
"end_time": "2017-04-10T17:29:14.854213Z",
|
|
||||||
"start_time": "2017-04-10T19:29:14.842068+02:00"
|
|
||||||
},
|
|
||||||
"collapsed": false,
|
|
||||||
"deletable": true,
|
|
||||||
"editable": true
|
|
||||||
},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"for entry in r.entries:\n",
|
|
||||||
" print('{} -> {}'.format(entry['text'], entry['sentiments'][0]['marl:hasPolarity']))"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {
|
|
||||||
"deletable": true,
|
|
||||||
"editable": true
|
|
||||||
},
|
|
||||||
"source": [
|
|
||||||
"We can also obtain a list of available plugins with the client:"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {
|
|
||||||
"ExecuteTime": {
|
|
||||||
"end_time": "2017-04-10T17:29:16.245198Z",
|
|
||||||
"start_time": "2017-04-10T19:29:16.056545+02:00"
|
|
||||||
},
|
|
||||||
"collapsed": false,
|
|
||||||
"deletable": true,
|
|
||||||
"editable": true
|
|
||||||
},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"c.plugins()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {
|
|
||||||
"deletable": true,
|
|
||||||
"editable": true
|
|
||||||
},
|
|
||||||
"source": [
|
|
||||||
"Or, more concisely:"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {
|
|
||||||
"ExecuteTime": {
|
|
||||||
"end_time": "2017-04-10T17:29:17.663275Z",
|
|
||||||
"start_time": "2017-04-10T19:29:17.484623+02:00"
|
|
||||||
},
|
|
||||||
"collapsed": false,
|
|
||||||
"deletable": true,
|
|
||||||
"editable": true
|
|
||||||
},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"c.plugins().keys()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {
|
|
||||||
"deletable": true,
|
|
||||||
"editable": true
|
|
||||||
},
|
|
||||||
"source": [
|
|
||||||
"Local Endpoint\n",
|
|
||||||
"--------------"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {
|
|
||||||
"deletable": true,
|
|
||||||
"editable": true
|
|
||||||
},
|
|
||||||
"source": [
|
|
||||||
"To run your own instance of senpy, just create a docker container with the latest Senpy image. Using `--default-plugins` you will get some extra plugins to start playing with the API."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {
|
|
||||||
"ExecuteTime": {
|
|
||||||
"end_time": "2017-04-10T17:29:20.637539Z",
|
|
||||||
"start_time": "2017-04-10T19:29:19.938322+02:00"
|
|
||||||
},
|
|
||||||
"collapsed": false,
|
|
||||||
"deletable": true,
|
|
||||||
"editable": true
|
|
||||||
},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"!docker run -ti --name 'SenpyEndpoint' -d -p 6000:5000 gsiupm/senpy:0.8.6 --host 0.0.0.0 --default-plugins"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {
|
|
||||||
"deletable": true,
|
|
||||||
"editable": true
|
|
||||||
},
|
|
||||||
"source": [
|
|
||||||
"To use this endpoint:"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {
|
|
||||||
"ExecuteTime": {
|
|
||||||
"end_time": "2017-04-10T17:29:21.263976Z",
|
|
||||||
"start_time": "2017-04-10T19:29:21.260595+02:00"
|
|
||||||
},
|
|
||||||
"collapsed": false,
|
|
||||||
"deletable": true,
|
|
||||||
"editable": true
|
|
||||||
},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"c_local = Client('http://127.0.0.1:6000/api')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {
|
|
||||||
"deletable": true,
|
|
||||||
"editable": true
|
|
||||||
},
|
|
||||||
"source": [
|
|
||||||
"That's all! After you are done with your analysis, stop the docker container:"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {
|
|
||||||
"ExecuteTime": {
|
|
||||||
"end_time": "2017-04-10T17:29:33.226686Z",
|
|
||||||
"start_time": "2017-04-10T19:29:22.392121+02:00"
|
|
||||||
},
|
|
||||||
"collapsed": false,
|
|
||||||
"deletable": true,
|
|
||||||
"editable": true
|
|
||||||
},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"!docker stop SenpyEndpoint\n",
|
|
||||||
"!docker rm SenpyEndpoint"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"anaconda-cloud": {},
|
|
||||||
"kernelspec": {
|
|
||||||
"display_name": "Python 3",
|
|
||||||
"language": "python",
|
|
||||||
"name": "python3"
|
|
||||||
},
|
|
||||||
"language_info": {
|
|
||||||
"codemirror_mode": {
|
|
||||||
"name": "ipython",
|
|
||||||
"version": 3
|
|
||||||
},
|
|
||||||
"file_extension": ".py",
|
|
||||||
"mimetype": "text/x-python",
|
|
||||||
"name": "python",
|
|
||||||
"nbconvert_exporter": "python",
|
|
||||||
"pygments_lexer": "ipython3",
|
|
||||||
"version": "3.6.0"
|
|
||||||
},
|
|
||||||
"toc": {
|
|
||||||
"colors": {
|
|
||||||
"hover_highlight": "#DAA520",
|
|
||||||
"running_highlight": "#FF0000",
|
|
||||||
"selected_highlight": "#FFD700"
|
|
||||||
},
|
|
||||||
"moveMenuLeft": true,
|
|
||||||
"nav_menu": {
|
|
||||||
"height": "68px",
|
|
||||||
"width": "252px"
|
|
||||||
},
|
|
||||||
"navigate_menu": true,
|
|
||||||
"number_sections": true,
|
|
||||||
"sideBar": true,
|
|
||||||
"threshold": 4,
|
|
||||||
"toc_cell": false,
|
|
||||||
"toc_section_display": "block",
|
|
||||||
"toc_window_display": false
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nbformat": 4,
|
|
||||||
"nbformat_minor": 1
|
|
||||||
}
|
|
@@ -1,106 +0,0 @@
|
|||||||
|
|
||||||
Client
|
|
||||||
======
|
|
||||||
|
|
||||||
Demo Endpoint
|
|
||||||
-------------
|
|
||||||
|
|
||||||
Import Client and send a request
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
from senpy.client import Client
|
|
||||||
|
|
||||||
c = Client('http://latest.senpy.cluster.gsi.dit.upm.es/api')
|
|
||||||
r = c.analyse('I like Pizza', algorithm='sentiment140')
|
|
||||||
|
|
||||||
Print response
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
for entry in r.entries:
|
|
||||||
print('{} -> {}'.format(entry['text'], entry['sentiments'][0]['marl:hasPolarity']))
|
|
||||||
|
|
||||||
|
|
||||||
.. parsed-literal::
|
|
||||||
|
|
||||||
I like Pizza -> marl:Positive
|
|
||||||
|
|
||||||
|
|
||||||
Obtain a list of available plugins
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
for plugin in c.request('/plugins')['plugins']:
|
|
||||||
print(plugin['name'])
|
|
||||||
|
|
||||||
|
|
||||||
.. parsed-literal::
|
|
||||||
|
|
||||||
emoRand
|
|
||||||
rand
|
|
||||||
sentiment140
|
|
||||||
|
|
||||||
|
|
||||||
Local Endpoint
|
|
||||||
--------------
|
|
||||||
|
|
||||||
Run a docker container with Senpy image and default plugins
|
|
||||||
|
|
||||||
.. code::
|
|
||||||
|
|
||||||
docker run -ti --name 'SenpyEndpoint' -d -p 5000:5000 gsiupm/senpy:0.8.6 --host 0.0.0.0 --default-plugins
|
|
||||||
|
|
||||||
|
|
||||||
.. parsed-literal::
|
|
||||||
|
|
||||||
a0157cd98057072388bfebeed78a830da7cf0a796f4f1a3fd9188f9f2e5fe562
|
|
||||||
|
|
||||||
|
|
||||||
Import client and send a request to localhost
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
c_local = Client('http://127.0.0.1:5000/api')
|
|
||||||
r = c_local.analyse('Hello world', algorithm='sentiment140')
|
|
||||||
|
|
||||||
Print response
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
for entry in r.entries:
|
|
||||||
print('{} -> {}'.format(entry['text'], entry['sentiments'][0]['marl:hasPolarity']))
|
|
||||||
|
|
||||||
|
|
||||||
.. parsed-literal::
|
|
||||||
|
|
||||||
Hello world -> marl:Neutral
|
|
||||||
|
|
||||||
|
|
||||||
Obtain a list of available plugins deployed locally
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
c_local.plugins().keys()
|
|
||||||
|
|
||||||
|
|
||||||
.. parsed-literal::
|
|
||||||
|
|
||||||
rand
|
|
||||||
sentiment140
|
|
||||||
emoRand
|
|
||||||
|
|
||||||
|
|
||||||
Stop the docker container
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
!docker stop SenpyEndpoint
|
|
||||||
!docker rm SenpyEndpoint
|
|
||||||
|
|
||||||
|
|
||||||
.. parsed-literal::
|
|
||||||
|
|
||||||
SenpyEndpoint
|
|
||||||
SenpyEndpoint
|
|
||||||
|
|
@@ -1,11 +0,0 @@
|
|||||||
About
|
|
||||||
--------
|
|
||||||
|
|
||||||
If you use Senpy in your research, please cite `Senpy: A Pragmatic Linked Sentiment Analysis Framework <http://gsi.dit.upm.es/index.php/es/investigacion/publicaciones?view=publication&task=show&id=417>`__ (`BibTex <http://gsi.dit.upm.es/index.php/es/investigacion/publicaciones?controller=publications&task=export&format=bibtex&id=417>`__):
|
|
||||||
|
|
||||||
.. code-block:: text
|
|
||||||
|
|
||||||
Sánchez-Rada, J. F., Iglesias, C. A., Corcuera, I., & Araque, Ó. (2016, October).
|
|
||||||
Senpy: A Pragmatic Linked Sentiment Analysis Framework.
|
|
||||||
In Data Science and Advanced Analytics (DSAA),
|
|
||||||
2016 IEEE International Conference on (pp. 735-742). IEEE.
|
|
17
docs/api.rst
17
docs/api.rst
@@ -25,7 +25,7 @@ NIF API
|
|||||||
"@context":"http://127.0.0.1/api/contexts/Results.jsonld",
|
"@context":"http://127.0.0.1/api/contexts/Results.jsonld",
|
||||||
"@id":"_:Results_11241245.22",
|
"@id":"_:Results_11241245.22",
|
||||||
"@type":"results"
|
"@type":"results"
|
||||||
"analysis": [
|
"activities": [
|
||||||
"plugins/sentiment-140_0.1"
|
"plugins/sentiment-140_0.1"
|
||||||
],
|
],
|
||||||
"entries": [
|
"entries": [
|
||||||
@@ -73,7 +73,7 @@ NIF API
|
|||||||
.. http:get:: /api/plugins
|
.. http:get:: /api/plugins
|
||||||
|
|
||||||
Returns a list of installed plugins.
|
Returns a list of installed plugins.
|
||||||
**Example request**:
|
**Example request and response**:
|
||||||
|
|
||||||
.. sourcecode:: http
|
.. sourcecode:: http
|
||||||
|
|
||||||
@@ -82,10 +82,6 @@ NIF API
|
|||||||
Accept: application/json, text/javascript
|
Accept: application/json, text/javascript
|
||||||
|
|
||||||
|
|
||||||
**Example response**:
|
|
||||||
|
|
||||||
.. sourcecode:: http
|
|
||||||
|
|
||||||
{
|
{
|
||||||
"@id": "plugins/sentiment-140_0.1",
|
"@id": "plugins/sentiment-140_0.1",
|
||||||
"@type": "sentimentPlugin",
|
"@type": "sentimentPlugin",
|
||||||
@@ -143,19 +139,14 @@ NIF API
|
|||||||
.. http:get:: /api/plugins/<pluginname>
|
.. http:get:: /api/plugins/<pluginname>
|
||||||
|
|
||||||
Returns the information of a specific plugin.
|
Returns the information of a specific plugin.
|
||||||
**Example request**:
|
**Example request and response**:
|
||||||
|
|
||||||
.. sourcecode:: http
|
.. sourcecode:: http
|
||||||
|
|
||||||
GET /api/plugins/rand/ HTTP/1.1
|
GET /api/plugins/sentiment-random/ HTTP/1.1
|
||||||
Host: localhost
|
Host: localhost
|
||||||
Accept: application/json, text/javascript
|
Accept: application/json, text/javascript
|
||||||
|
|
||||||
|
|
||||||
**Example response**:
|
|
||||||
|
|
||||||
.. sourcecode:: http
|
|
||||||
|
|
||||||
{
|
{
|
||||||
"@context": "http://127.0.0.1/api/contexts/ExamplePlugin.jsonld",
|
"@context": "http://127.0.0.1/api/contexts/ExamplePlugin.jsonld",
|
||||||
"@id": "plugins/ExamplePlugin_0.1",
|
"@id": "plugins/ExamplePlugin_0.1",
|
||||||
|
@@ -1,5 +1,6 @@
|
|||||||
API and Examples
|
API and vocabularies
|
||||||
################
|
####################
|
||||||
|
|
||||||
.. toctree::
|
.. toctree::
|
||||||
|
|
||||||
vocabularies.rst
|
vocabularies.rst
|
||||||
|
@@ -2,7 +2,7 @@
|
|||||||
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
||||||
"@id": "me:Result1",
|
"@id": "me:Result1",
|
||||||
"@type": "results",
|
"@type": "results",
|
||||||
"analysis": [
|
"activities": [
|
||||||
"me:SAnalysis1",
|
"me:SAnalysis1",
|
||||||
"me:SgAnalysis1",
|
"me:SgAnalysis1",
|
||||||
"me:EmotionAnalysis1",
|
"me:EmotionAnalysis1",
|
||||||
|
@@ -2,17 +2,13 @@
|
|||||||
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
||||||
"@id": "http://example.com#NIFExample",
|
"@id": "http://example.com#NIFExample",
|
||||||
"@type": "results",
|
"@type": "results",
|
||||||
"analysis": [
|
"activities": [
|
||||||
],
|
],
|
||||||
"entries": [
|
"entries": [
|
||||||
{
|
{
|
||||||
"@type": [
|
|
||||||
"nif:RFC5147String",
|
|
||||||
"nif:Context"
|
|
||||||
],
|
|
||||||
"nif:beginIndex": 0,
|
"nif:beginIndex": 0,
|
||||||
"nif:endIndex": 40,
|
"nif:endIndex": 40,
|
||||||
"nif:isString": "My favourite actress is Natalie Portman"
|
"text": "An entry should have a nif:isString key"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
@@ -1,9 +0,0 @@
|
|||||||
Command line
|
|
||||||
============
|
|
||||||
|
|
||||||
This video shows how to analyse text directly on the command line using the senpy tool.
|
|
||||||
|
|
||||||
.. image:: https://asciinema.org/a/9uwef1ghkjk062cw2t4mhzpyk.png
|
|
||||||
:width: 100%
|
|
||||||
:target: https://asciinema.org/a/9uwef1ghkjk062cw2t4mhzpyk
|
|
||||||
:alt: CLI demo
|
|
18
docs/conf.py
18
docs/conf.py
@@ -38,6 +38,8 @@ extensions = [
|
|||||||
'sphinxcontrib.httpdomain',
|
'sphinxcontrib.httpdomain',
|
||||||
'sphinx.ext.coverage',
|
'sphinx.ext.coverage',
|
||||||
'sphinx.ext.autosectionlabel',
|
'sphinx.ext.autosectionlabel',
|
||||||
|
'nbsphinx',
|
||||||
|
'sphinx.ext.mathjax',
|
||||||
]
|
]
|
||||||
|
|
||||||
# Add any paths that contain templates here, relative to this directory.
|
# Add any paths that contain templates here, relative to this directory.
|
||||||
@@ -54,7 +56,7 @@ master_doc = 'index'
|
|||||||
|
|
||||||
# General information about the project.
|
# General information about the project.
|
||||||
project = u'Senpy'
|
project = u'Senpy'
|
||||||
copyright = u'2016, J. Fernando Sánchez'
|
copyright = u'2019, J. Fernando Sánchez'
|
||||||
description = u'A framework for sentiment and emotion analysis services'
|
description = u'A framework for sentiment and emotion analysis services'
|
||||||
|
|
||||||
# The version info for the project you're documenting, acts as replacement for
|
# The version info for the project you're documenting, acts as replacement for
|
||||||
@@ -79,7 +81,9 @@ language = None
|
|||||||
|
|
||||||
# List of patterns, relative to source directory, that match files and
|
# List of patterns, relative to source directory, that match files and
|
||||||
# directories to ignore when looking for source files.
|
# directories to ignore when looking for source files.
|
||||||
exclude_patterns = ['_build']
|
exclude_patterns = ['_build', '**.ipynb_checkpoints']
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# The reST default role (used for this markup: `text`) to use for all
|
# The reST default role (used for this markup: `text`) to use for all
|
||||||
# documents.
|
# documents.
|
||||||
@@ -126,6 +130,7 @@ html_theme_options = {
|
|||||||
'github_user': 'gsi-upm',
|
'github_user': 'gsi-upm',
|
||||||
'github_repo': 'senpy',
|
'github_repo': 'senpy',
|
||||||
'github_banner': True,
|
'github_banner': True,
|
||||||
|
'sidebar_collapse': True,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -286,3 +291,12 @@ texinfo_documents = [
|
|||||||
|
|
||||||
# If true, do not generate a @detailmenu in the "Top" node's menu.
|
# If true, do not generate a @detailmenu in the "Top" node's menu.
|
||||||
#texinfo_no_detailmenu = False
|
#texinfo_no_detailmenu = False
|
||||||
|
|
||||||
|
nbsphinx_prolog = """
|
||||||
|
.. note:: This is an `auto-generated <https://nbsphinx.readthedocs.io>`_ static view of a Jupyter notebook.
|
||||||
|
|
||||||
|
To run the code examples in your computer, you may download the original notebook from the repository: https://github.com/gsi-upm/senpy/tree/master/docs/{{ env.doc2path(env.docname, base=None) }}
|
||||||
|
|
||||||
|
|
||||||
|
----
|
||||||
|
"""
|
||||||
|
@@ -1,93 +1,152 @@
|
|||||||
Conversion
|
Automatic Model Conversion
|
||||||
----------
|
--------------------------
|
||||||
|
|
||||||
Senpy includes experimental support for emotion/sentiment conversion plugins.
|
Senpy includes support for emotion and sentiment conversion.
|
||||||
|
When a user requests a specific model, senpy will choose a strategy to convert the model that the service usually outputs and the model requested by the user.
|
||||||
|
|
||||||
|
Out of the box, senpy can convert from the `emotionml:pad` (pleasure-arousal-dominance) dimensional model to `emoml:big6` (Ekman's big-6) categories, and vice versa.
|
||||||
|
This specific conversion uses a series of dimensional centroids (`emotionml:pad`) for each emotion category (`emotionml:big6`).
|
||||||
|
A dimensional value is converted to a category by looking for the nearest centroid.
|
||||||
|
The centroids are calculated according to this article:
|
||||||
|
|
||||||
|
.. code-block:: text
|
||||||
|
|
||||||
|
Kim, S. M., Valitutti, A., & Calvo, R. A. (2010, June).
|
||||||
|
Evaluation of unsupervised emotion models to textual affect recognition.
|
||||||
|
In Proceedings of the NAACL HLT 2010 Workshop on Computational Approaches to Analysis and Generation of Emotion in Text (pp. 62-70).
|
||||||
|
Association for Computational Linguistics.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
It is possible to add new conversion strategies by `Developing a conversion plugin`_.
|
||||||
|
|
||||||
|
|
||||||
Use
|
Use
|
||||||
===
|
===
|
||||||
|
|
||||||
Consider the original query: http://127.0.0.1:5000/api/?i=hello&algo=emoRand
|
Consider the following query to an emotion service: http://senpy.gsi.upm.es/api/emotion-anew?i=good
|
||||||
|
|
||||||
The requested plugin (emoRand) returns emotions using Ekman's model (or big6 in EmotionML):
|
The requested plugin (emotion-random) returns emotions using the VAD space (FSRE dimensions in EmotionML):
|
||||||
|
|
||||||
.. code:: json
|
.. code:: json
|
||||||
|
|
||||||
|
|
||||||
... rest of the document ...
|
[
|
||||||
{
|
{
|
||||||
"@type": "emotionSet",
|
"@type": "EmotionSet",
|
||||||
"onyx:hasEmotion": {
|
"onyx:hasEmotion": [
|
||||||
"@type": "emotion",
|
{
|
||||||
"onyx:hasEmotionCategory": "emoml:big6anger"
|
"@type": "Emotion",
|
||||||
},
|
"emoml:pad-dimensions_arousal": 5.43,
|
||||||
"prov:wasGeneratedBy": "plugins/emoRand_0.1"
|
"emoml:pad-dimensions_dominance": 6.41,
|
||||||
}
|
"emoml:pad-dimensions_pleasure": 7.47,
|
||||||
|
"prov:wasGeneratedBy": "prefix:Analysis_1562744784.8789825"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"prov:wasGeneratedBy": "prefix:Analysis_1562744784.8789825"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
To get these emotions in VAD space (FSRE dimensions in EmotionML), we'd do this:
|
To get the equivalent of these emotions in Ekman's categories (i.e., Ekman's Big 6 in EmotionML), we'd do this:
|
||||||
|
|
||||||
http://127.0.0.1:5000/api/?i=hello&algo=emoRand&emotionModel=emoml:fsre-dimensions
|
http://senpy.gsi.upm.es/api/emotion-anew?i=good&emotion-model=emoml:big6
|
||||||
|
|
||||||
This call, provided there is a valid conversion plugin from Ekman's to VAD, would return something like this:
|
This call, provided there is a valid conversion plugin from Ekman's to VAD, would return something like this:
|
||||||
|
|
||||||
.. code:: json
|
.. code:: json
|
||||||
|
|
||||||
|
[
|
||||||
... rest of the document ...
|
{
|
||||||
|
"@type": "EmotionSet",
|
||||||
|
"onyx:hasEmotion": [
|
||||||
{
|
{
|
||||||
"@type": "emotionSet",
|
"@type": "Emotion",
|
||||||
"onyx:hasEmotion": {
|
"onyx:algorithmConfidence": 4.4979,
|
||||||
"@type": "emotion",
|
"onyx:hasEmotionCategory": "emoml:big6happiness"
|
||||||
"onyx:hasEmotionCategory": "emoml:big6anger"
|
|
||||||
},
|
|
||||||
"prov:wasGeneratedBy": "plugins/emoRand_0.1"
|
|
||||||
}, {
|
|
||||||
"@type": "emotionSet",
|
|
||||||
"onyx:hasEmotion": {
|
|
||||||
"@type": "emotion",
|
|
||||||
"A": 7.22,
|
|
||||||
"D": 6.28,
|
|
||||||
"V": 8.6
|
|
||||||
},
|
|
||||||
"prov:wasGeneratedBy": "plugins/Ekman2VAD_0.1"
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
],
|
||||||
|
"prov:wasDerivedFrom": {
|
||||||
|
"@id": "Emotions0",
|
||||||
|
"@type": "EmotionSet",
|
||||||
|
"onyx:hasEmotion": [
|
||||||
|
{
|
||||||
|
"@id": "Emotion0",
|
||||||
|
"@type": "Emotion",
|
||||||
|
"emoml:pad-dimensions_arousal": 5.43,
|
||||||
|
"emoml:pad-dimensions_dominance": 6.41,
|
||||||
|
"emoml:pad-dimensions_pleasure": 7.47,
|
||||||
|
"prov:wasGeneratedBy": "prefix:Analysis_1562745220.1553965"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"prov:wasGeneratedBy": "prefix:Analysis_1562745220.1553965"
|
||||||
|
},
|
||||||
|
"prov:wasGeneratedBy": "prefix:Analysis_1562745220.1570725"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
That is called a *full* response, as it simply adds the converted emotion alongside.
|
That is called a *full* response, as it simply adds the converted emotion alongside.
|
||||||
It is also possible to get the original emotion nested within the new converted emotion, using the `conversion=nested` parameter:
|
It is also possible to get the original emotion nested within the new converted emotion, using the `conversion=nested` parameter:
|
||||||
|
|
||||||
|
http://senpy.gsi.upm.es/api/emotion-anew?i=good&emotion-model=emoml:big6&conversion=nested
|
||||||
|
|
||||||
.. code:: json
|
.. code:: json
|
||||||
|
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"@type": "EmotionSet",
|
||||||
|
"onyx:hasEmotion": [
|
||||||
|
{
|
||||||
|
"@type": "Emotion",
|
||||||
|
"onyx:algorithmConfidence": 4.4979,
|
||||||
|
"onyx:hasEmotionCategory": "emoml:big6happiness"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"prov:wasDerivedFrom": {
|
||||||
|
"@id": "Emotions0",
|
||||||
|
"@type": "EmotionSet",
|
||||||
|
"onyx:hasEmotion": [
|
||||||
|
{
|
||||||
|
"@id": "Emotion0",
|
||||||
|
"@type": "Emotion",
|
||||||
|
"emoml:pad-dimensions_arousal": 5.43,
|
||||||
|
"emoml:pad-dimensions_dominance": 6.41,
|
||||||
|
"emoml:pad-dimensions_pleasure": 7.47,
|
||||||
|
"prov:wasGeneratedBy": "prefix:Analysis_1562744962.896306"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"prov:wasGeneratedBy": "prefix:Analysis_1562744962.896306"
|
||||||
|
},
|
||||||
|
"prov:wasGeneratedBy": "prefix:Analysis_1562744962.8978968"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
... rest of the document ...
|
|
||||||
{
|
|
||||||
"@type": "emotionSet",
|
|
||||||
"onyx:hasEmotion": {
|
|
||||||
"@type": "emotion",
|
|
||||||
"onyx:hasEmotionCategory": "emoml:big6anger"
|
|
||||||
},
|
|
||||||
"prov:wasGeneratedBy": "plugins/emoRand_0.1"
|
|
||||||
"onyx:wasDerivedFrom": {
|
|
||||||
"@type": "emotionSet",
|
|
||||||
"onyx:hasEmotion": {
|
|
||||||
"@type": "emotion",
|
|
||||||
"A": 7.22,
|
|
||||||
"D": 6.28,
|
|
||||||
"V": 8.6
|
|
||||||
},
|
|
||||||
"prov:wasGeneratedBy": "plugins/Ekman2VAD_0.1"
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
Lastly, `conversion=filtered` would only return the converted emotions.
|
Lastly, `conversion=filtered` would only return the converted emotions.
|
||||||
|
|
||||||
|
|
||||||
|
.. code:: json
|
||||||
|
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"@type": "EmotionSet",
|
||||||
|
"onyx:hasEmotion": [
|
||||||
|
{
|
||||||
|
"@type": "Emotion",
|
||||||
|
"onyx:algorithmConfidence": 4.4979,
|
||||||
|
"onyx:hasEmotionCategory": "emoml:big6happiness"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"prov:wasGeneratedBy": "prefix:Analysis_1562744925.7322266"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
Developing a conversion plugin
|
Developing a conversion plugin
|
||||||
================================
|
==============================
|
||||||
|
|
||||||
Conversion plugins are discovered by the server just like any other plugin.
|
Conversion plugins are discovered by the server just like any other plugin.
|
||||||
The difference is the slightly different API, and the need to specify the `source` and `target` of the conversion.
|
The difference is the slightly different API, and the need to specify the `source` and `target` of the conversion.
|
||||||
@@ -106,7 +165,6 @@ For instance, an emotion conversion plugin needs the following:
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
.. code:: python
|
.. code:: python
|
||||||
|
|
||||||
|
|
||||||
@@ -114,3 +172,6 @@ For instance, an emotion conversion plugin needs the following:
|
|||||||
|
|
||||||
def convert(self, emotionSet, fromModel, toModel, params):
|
def convert(self, emotionSet, fromModel, toModel, params):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
More implementation details are shown in the `centroids plugin <https://github.com/gsi-upm/senpy/blob/master/senpy/plugins/postprocessing/emotion/centroids.py>`_.
|
||||||
|
@@ -1,16 +1,13 @@
|
|||||||
Demo
|
Demo
|
||||||
----
|
----
|
||||||
|
|
||||||
There is a demo available on http://senpy.cluster.gsi.dit.upm.es/, where you can test a serie of different plugins.
|
There is a demo available on http://senpy.gsi.upm.es/, where you can test a live instance of Senpy, with several open source plugins.
|
||||||
You can use the playground (a web interface) or make HTTP requests to the service API.
|
You can use the playground (a web interface) or the HTTP API.
|
||||||
|
|
||||||
.. image:: senpy-playground.png
|
.. image:: playground-0.20.png
|
||||||
:height: 400px
|
:target: http://senpy.gsi.upm.es
|
||||||
:width: 800px
|
:width: 800px
|
||||||
:scale: 100 %
|
|
||||||
:align: center
|
:align: center
|
||||||
|
|
||||||
Plugins Demo
|
|
||||||
============
|
|
||||||
|
|
||||||
The source code and description of the plugins used in the demo is available here: https://lab.cluster.gsi.dit.upm.es/senpy/senpy-plugins-community/.
|
The source code and description of the plugins used in the demo are available here: https://github.com/gsi-upm/senpy-plugins-community/.
|
||||||
|
25
docs/development.rst
Normal file
25
docs/development.rst
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
Developing new services
|
||||||
|
-----------------------
|
||||||
|
|
||||||
|
Developing web services can be hard.
|
||||||
|
A text analysis service must implement all the typical features, such as: extraction of parameters, validation, format conversion, visualization...
|
||||||
|
|
||||||
|
Senpy implements all the common blocks, so developers can focus on what really matters: great analysis algorithms that solve real problems.
|
||||||
|
Among other things, Senpy takes care of these tasks:
|
||||||
|
|
||||||
|
* Interfacing with the user: parameter validation, error handling.
|
||||||
|
* Formatting: JSON-LD, Turtle/n-triples input and output, or simple text input
|
||||||
|
* Linked Data: senpy results are semantically annotated, using a series of well established vocabularies, and sane default URIs.
|
||||||
|
* User interface: a web UI where users can explore your service and test different settings
|
||||||
|
* A client to interact with the service. Currently only available in Python.
|
||||||
|
|
||||||
|
You only need to provide the algorithm to turn a piece of text into an annotation
|
||||||
|
Sharing your sentiment analysis with the world has never been easier!
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:maxdepth: 1
|
||||||
|
|
||||||
|
server-cli
|
||||||
|
plugins-quickstart
|
||||||
|
plugins-faq
|
||||||
|
plugins-definition
|
BIN
docs/eval_table.png
Normal file
BIN
docs/eval_table.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 77 KiB |
BIN
docs/evaluation-results.png
Normal file
BIN
docs/evaluation-results.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 76 KiB |
@@ -1,5 +1,6 @@
|
|||||||
Examples
|
Examples
|
||||||
------
|
--------
|
||||||
|
|
||||||
All the examples in this page use the :download:`the main schema <_static/schemas/definitions.json>`.
|
All the examples in this page use the :download:`the main schema <_static/schemas/definitions.json>`.
|
||||||
|
|
||||||
Simple NIF annotation
|
Simple NIF annotation
|
||||||
@@ -17,6 +18,7 @@ Sentiment Analysis
|
|||||||
.....................
|
.....................
|
||||||
Description
|
Description
|
||||||
,,,,,,,,,,,
|
,,,,,,,,,,,
|
||||||
|
|
||||||
This annotation corresponds to the sentiment analysis of an input. The example shows the sentiment represented according to Marl format.
|
This annotation corresponds to the sentiment analysis of an input. The example shows the sentiment represented according to Marl format.
|
||||||
The sentiments detected are contained in the Sentiments array with their related part of the text.
|
The sentiments detected are contained in the Sentiments array with their related part of the text.
|
||||||
|
|
||||||
@@ -24,20 +26,7 @@ Representation
|
|||||||
,,,,,,,,,,,,,,
|
,,,,,,,,,,,,,,
|
||||||
|
|
||||||
.. literalinclude:: examples/results/example-sentiment.json
|
.. literalinclude:: examples/results/example-sentiment.json
|
||||||
:emphasize-lines: 5-10,25-33
|
:emphasize-lines: 5-11,20-30
|
||||||
:language: json-ld
|
|
||||||
|
|
||||||
Suggestion Mining
|
|
||||||
.................
|
|
||||||
Description
|
|
||||||
,,,,,,,,,,,
|
|
||||||
The suggestions schema represented below shows the suggestions detected in the text. Within it, we can find the NIF fields highlighted that corresponds to the text of the detected suggestion.
|
|
||||||
|
|
||||||
Representation
|
|
||||||
,,,,,,,,,,,,,,
|
|
||||||
|
|
||||||
.. literalinclude:: examples/results/example-suggestion.json
|
|
||||||
:emphasize-lines: 5-8,22-27
|
|
||||||
:language: json-ld
|
:language: json-ld
|
||||||
|
|
||||||
Emotion Analysis
|
Emotion Analysis
|
||||||
@@ -51,28 +40,6 @@ Representation
|
|||||||
|
|
||||||
.. literalinclude:: examples/results/example-emotion.json
|
.. literalinclude:: examples/results/example-emotion.json
|
||||||
:language: json-ld
|
:language: json-ld
|
||||||
:emphasize-lines: 5-8,25-37
|
:emphasize-lines: 5-11,22-36
|
||||||
|
|
||||||
Named Entity Recognition
|
|
||||||
........................
|
|
||||||
Description
|
|
||||||
,,,,,,,,,,,
|
|
||||||
The Named Entity Recognition is represented as follows. In this particular case, it can be seen within the entities array the entities recognised. For the example input, Microsoft and Windows Phone are the ones detected.
|
|
||||||
Representation
|
|
||||||
,,,,,,,,,,,,,,
|
|
||||||
|
|
||||||
.. literalinclude:: examples/results/example-ner.json
|
|
||||||
:emphasize-lines: 5-8,19-34
|
|
||||||
:language: json-ld
|
|
||||||
|
|
||||||
Complete example
|
|
||||||
................
|
|
||||||
Description
|
|
||||||
,,,,,,,,,,,
|
|
||||||
This example covers all of the above cases, integrating all the annotations in the same document.
|
|
||||||
|
|
||||||
Representation
|
|
||||||
,,,,,,,,,,,,,,
|
|
||||||
|
|
||||||
.. literalinclude:: examples/results/example-complete.json
|
|
||||||
:language: json-ld
|
|
||||||
|
@@ -2,11 +2,22 @@
|
|||||||
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
||||||
"@id": "me:Result1",
|
"@id": "me:Result1",
|
||||||
"@type": "results",
|
"@type": "results",
|
||||||
"analysis": [
|
"activities": [
|
||||||
"me:SAnalysis1",
|
{
|
||||||
"me:SgAnalysis1",
|
"@id": "_:SAnalysis1_Activity",
|
||||||
"me:EmotionAnalysis1",
|
"@type": "marl:SentimentAnalysis",
|
||||||
"me:NER1"
|
"prov:wasAssociatedWith": "me:SAnalysis1"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@id": "_:EmotionAnalysis1_Activity",
|
||||||
|
"@type": "onyx:EmotionAnalysis",
|
||||||
|
"prov:wasAssociatedWith": "me:EmotionAnalysis1"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@id": "_:NER1_Activity",
|
||||||
|
"@type": "me:NER",
|
||||||
|
"prov:wasAssociatedWith": "me:NER1"
|
||||||
|
}
|
||||||
],
|
],
|
||||||
"entries": [
|
"entries": [
|
||||||
{
|
{
|
||||||
@@ -23,7 +34,7 @@
|
|||||||
"nif:endIndex": 13,
|
"nif:endIndex": 13,
|
||||||
"nif:anchorOf": "Microsoft",
|
"nif:anchorOf": "Microsoft",
|
||||||
"me:references": "http://dbpedia.org/page/Microsoft",
|
"me:references": "http://dbpedia.org/page/Microsoft",
|
||||||
"prov:wasGeneratedBy": "me:NER1"
|
"prov:wasGeneratedBy": "_:NER1_Activity"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"@id": "http://micro.blog/status1#char=25,37",
|
"@id": "http://micro.blog/status1#char=25,37",
|
||||||
@@ -31,7 +42,7 @@
|
|||||||
"nif:endIndex": 37,
|
"nif:endIndex": 37,
|
||||||
"nif:anchorOf": "Windows Phone",
|
"nif:anchorOf": "Windows Phone",
|
||||||
"me:references": "http://dbpedia.org/page/Windows_Phone",
|
"me:references": "http://dbpedia.org/page/Windows_Phone",
|
||||||
"prov:wasGeneratedBy": "me:NER1"
|
"prov:wasGeneratedBy": "_:NER1_Activity"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"suggestions": [
|
"suggestions": [
|
||||||
@@ -40,7 +51,7 @@
|
|||||||
"nif:beginIndex": 16,
|
"nif:beginIndex": 16,
|
||||||
"nif:endIndex": 77,
|
"nif:endIndex": 77,
|
||||||
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
|
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
|
||||||
"prov:wasGeneratedBy": "me:SgAnalysis1"
|
"prov:wasGeneratedBy": "_:SgAnalysis1_Activity"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"sentiments": [
|
"sentiments": [
|
||||||
@@ -51,14 +62,14 @@
|
|||||||
"nif:anchorOf": "You'll be awesome.",
|
"nif:anchorOf": "You'll be awesome.",
|
||||||
"marl:hasPolarity": "marl:Positive",
|
"marl:hasPolarity": "marl:Positive",
|
||||||
"marl:polarityValue": 0.9,
|
"marl:polarityValue": 0.9,
|
||||||
"prov:wasGeneratedBy": "me:SAnalysis1"
|
"prov:wasGeneratedBy": "_:SgAnalysis1_Activity"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"emotions": [
|
"emotions": [
|
||||||
{
|
{
|
||||||
"@id": "http://micro.blog/status1#char=0,109",
|
"@id": "http://micro.blog/status1#char=0,109",
|
||||||
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
||||||
"prov:wasGeneratedBy": "me:EAnalysis1",
|
"prov:wasGeneratedBy": "_:EmotionAnalysis1_Activity",
|
||||||
"onyx:hasEmotion": [
|
"onyx:hasEmotion": [
|
||||||
{
|
{
|
||||||
"onyx:hasEmotionCategory": "wna:liking"
|
"onyx:hasEmotionCategory": "wna:liking"
|
||||||
|
@@ -1,78 +0,0 @@
|
|||||||
{
|
|
||||||
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
|
||||||
"@id": "me:Result1",
|
|
||||||
"@type": "results",
|
|
||||||
"analysis": [
|
|
||||||
"me:SAnalysis1",
|
|
||||||
"me:SgAnalysis1",
|
|
||||||
"me:EmotionAnalysis1",
|
|
||||||
"me:NER1",
|
|
||||||
{
|
|
||||||
"@type": "analysis",
|
|
||||||
"@id": "anonymous"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"entries": [
|
|
||||||
{
|
|
||||||
"@id": "http://micro.blog/status1",
|
|
||||||
"@type": [
|
|
||||||
"nif:RFC5147String",
|
|
||||||
"nif:Context"
|
|
||||||
],
|
|
||||||
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
|
||||||
"entities": [
|
|
||||||
{
|
|
||||||
"@id": "http://micro.blog/status1#char=5,13",
|
|
||||||
"nif:beginIndex": 5,
|
|
||||||
"nif:endIndex": 13,
|
|
||||||
"nif:anchorOf": "Microsoft",
|
|
||||||
"me:references": "http://dbpedia.org/page/Microsoft",
|
|
||||||
"prov:wasGeneratedBy": "me:NER1"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"@id": "http://micro.blog/status1#char=25,37",
|
|
||||||
"nif:beginIndex": 25,
|
|
||||||
"nif:endIndex": 37,
|
|
||||||
"nif:anchorOf": "Windows Phone",
|
|
||||||
"me:references": "http://dbpedia.org/page/Windows_Phone",
|
|
||||||
"prov:wasGeneratedBy": "me:NER1"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"suggestions": [
|
|
||||||
{
|
|
||||||
"@id": "http://micro.blog/status1#char=16,77",
|
|
||||||
"nif:beginIndex": 16,
|
|
||||||
"nif:endIndex": 77,
|
|
||||||
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
|
|
||||||
"prov:wasGeneratedBy": "me:SgAnalysis1"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"sentiments": [
|
|
||||||
{
|
|
||||||
"@id": "http://micro.blog/status1#char=80,97",
|
|
||||||
"nif:beginIndex": 80,
|
|
||||||
"nif:endIndex": 97,
|
|
||||||
"nif:anchorOf": "You'll be awesome.",
|
|
||||||
"marl:hasPolarity": "marl:Positive",
|
|
||||||
"marl:polarityValue": 0.9,
|
|
||||||
"prov:wasGeneratedBy": "me:SAnalysis1"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"emotions": [
|
|
||||||
{
|
|
||||||
"@id": "http://micro.blog/status1#char=0,109",
|
|
||||||
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
|
||||||
"prov:wasGeneratedBy": "me:EAnalysis1",
|
|
||||||
"onyx:hasEmotion": [
|
|
||||||
{
|
|
||||||
"onyx:hasEmotionCategory": "wna:liking"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"onyx:hasEmotionCategory": "wna:excitement"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
@@ -1,19 +1,18 @@
|
|||||||
{
|
{
|
||||||
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
||||||
"@id": "http://example.com#NIFExample",
|
"@id": "me:Result1",
|
||||||
"@type": "results",
|
"@type": "results",
|
||||||
"analysis": [
|
"activities": [ ],
|
||||||
],
|
"entries": [
|
||||||
"entries": [
|
{
|
||||||
{
|
"@id": "http://example.org#char=0,40",
|
||||||
"@id": "http://example.org#char=0,40",
|
"@type": [
|
||||||
"@type": [
|
"nif:RFC5147String",
|
||||||
"nif:RFC5147String",
|
"nif:Context"
|
||||||
"nif:Context"
|
],
|
||||||
],
|
"nif:beginIndex": 0,
|
||||||
"nif:beginIndex": 0,
|
"nif:endIndex": 40,
|
||||||
"nif:endIndex": 40,
|
"nif:isString": "My favourite actress is Natalie Portman"
|
||||||
"nif:isString": "My favourite actress is Natalie Portman"
|
}
|
||||||
}
|
]
|
||||||
]
|
|
||||||
}
|
}
|
||||||
|
@@ -1,88 +1,100 @@
|
|||||||
{
|
{
|
||||||
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
||||||
"@id": "me:Result1",
|
"@id": "me:Result1",
|
||||||
"@type": "results",
|
"@type": "results",
|
||||||
"analysis": [
|
"activities": [
|
||||||
{
|
|
||||||
"@id": "me:SAnalysis1",
|
|
||||||
"@type": "marl:SentimentAnalysis",
|
|
||||||
"marl:maxPolarityValue": 1,
|
|
||||||
"marl:minPolarityValue": 0
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"@id": "me:SgAnalysis1",
|
|
||||||
"@type": "me:SuggestionAnalysis"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"@id": "me:EmotionAnalysis1",
|
|
||||||
"@type": "me:EmotionAnalysis"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"@id": "me:NER1",
|
|
||||||
"@type": "me:NER"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"entries": [
|
|
||||||
{
|
|
||||||
"@id": "http://micro.blog/status1",
|
|
||||||
"@type": [
|
|
||||||
"nif:RFC5147String",
|
|
||||||
"nif:Context"
|
|
||||||
],
|
|
||||||
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
|
||||||
"entities": [
|
|
||||||
{
|
{
|
||||||
"@id": "http://micro.blog/status1#char=5,13",
|
"@id": "_:SAnalysis1_Activity",
|
||||||
"nif:beginIndex": 5,
|
"@type": "marl:SentimentAnalysis",
|
||||||
"nif:endIndex": 13,
|
"prov:wasAssociatedWith": "me:SentimentAnalysis",
|
||||||
"nif:anchorOf": "Microsoft",
|
"prov:used": [
|
||||||
"me:references": "http://dbpedia.org/page/Microsoft",
|
{
|
||||||
"prov:wasGeneratedBy": "me:NER1"
|
"name": "marl:maxPolarityValue",
|
||||||
|
"prov:value": "1"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "marl:minPolarityValue",
|
||||||
|
"prov:value": "0"
|
||||||
|
}
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"@id": "http://micro.blog/status1#char=25,37",
|
"@id": "_:SgAnalysis1_Activity",
|
||||||
"nif:beginIndex": 25,
|
"prov:wasAssociatedWith": "me:SgAnalysis1",
|
||||||
"nif:endIndex": 37,
|
"@type": "me:SuggestionAnalysis"
|
||||||
"nif:anchorOf": "Windows Phone",
|
},
|
||||||
"me:references": "http://dbpedia.org/page/Windows_Phone",
|
|
||||||
"prov:wasGeneratedBy": "me:NER1"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"suggestions": [
|
|
||||||
{
|
{
|
||||||
"@id": "http://micro.blog/status1#char=16,77",
|
"@id": "_:EmotionAnalysis1_Activity",
|
||||||
"nif:beginIndex": 16,
|
"@type": "me:EmotionAnalysis",
|
||||||
"nif:endIndex": 77,
|
"prov:wasAssociatedWith": "me:EmotionAnalysis1"
|
||||||
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
|
},
|
||||||
"prov:wasGeneratedBy": "me:SgAnalysis1"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"sentiments": [
|
|
||||||
{
|
{
|
||||||
"@id": "http://micro.blog/status1#char=80,97",
|
"@id": "_:NER1_Activity",
|
||||||
"nif:beginIndex": 80,
|
"@type": "me:NER",
|
||||||
"nif:endIndex": 97,
|
"prov:wasAssociatedWith": "me:EmotionNER1"
|
||||||
"nif:anchorOf": "You'll be awesome.",
|
|
||||||
"marl:hasPolarity": "marl:Positive",
|
|
||||||
"marl:polarityValue": 0.9,
|
|
||||||
"prov:wasGeneratedBy": "me:SAnalysis1"
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"emotions": [
|
"entries": [
|
||||||
{
|
{
|
||||||
"@id": "http://micro.blog/status1#char=0,109",
|
"@id": "http://micro.blog/status1",
|
||||||
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
"@type": [
|
||||||
"prov:wasGeneratedBy": "me:EAnalysis1",
|
"nif:RFC5147String",
|
||||||
"onyx:hasEmotion": [
|
"nif:Context"
|
||||||
{
|
],
|
||||||
"onyx:hasEmotionCategory": "wna:liking"
|
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
||||||
},
|
"entities": [
|
||||||
{
|
{
|
||||||
"onyx:hasEmotionCategory": "wna:excitement"
|
"@id": "http://micro.blog/status1#char=5,13",
|
||||||
}
|
"nif:beginIndex": 5,
|
||||||
]
|
"nif:endIndex": 13,
|
||||||
|
"nif:anchorOf": "Microsoft",
|
||||||
|
"me:references": "http://dbpedia.org/page/Microsoft",
|
||||||
|
"prov:wasGeneratedBy": "me:NER1"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@id": "http://micro.blog/status1#char=25,37",
|
||||||
|
"nif:beginIndex": 25,
|
||||||
|
"nif:endIndex": 37,
|
||||||
|
"nif:anchorOf": "Windows Phone",
|
||||||
|
"me:references": "http://dbpedia.org/page/Windows_Phone",
|
||||||
|
"prov:wasGeneratedBy": "me:NER1"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"suggestions": [
|
||||||
|
{
|
||||||
|
"@id": "http://micro.blog/status1#char=16,77",
|
||||||
|
"nif:beginIndex": 16,
|
||||||
|
"nif:endIndex": 77,
|
||||||
|
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
|
||||||
|
"prov:wasGeneratedBy": "me:SgAnalysis1"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"sentiments": [
|
||||||
|
{
|
||||||
|
"@id": "http://micro.blog/status1#char=80,97",
|
||||||
|
"nif:beginIndex": 80,
|
||||||
|
"nif:endIndex": 97,
|
||||||
|
"nif:anchorOf": "You'll be awesome.",
|
||||||
|
"marl:hasPolarity": "marl:Positive",
|
||||||
|
"marl:polarityValue": 0.9,
|
||||||
|
"prov:wasGeneratedBy": "me:SAnalysis1"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"emotions": [
|
||||||
|
{
|
||||||
|
"@id": "http://micro.blog/status1#char=0,109",
|
||||||
|
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
||||||
|
"prov:wasGeneratedBy": "me:EAnalysis1",
|
||||||
|
"onyx:hasEmotion": [
|
||||||
|
{
|
||||||
|
"onyx:hasEmotionCategory": "wna:liking"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"onyx:hasEmotionCategory": "wna:excitement"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
|
@@ -2,10 +2,11 @@
|
|||||||
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
||||||
"@id": "me:Result1",
|
"@id": "me:Result1",
|
||||||
"@type": "results",
|
"@type": "results",
|
||||||
"analysis": [
|
"activities": [
|
||||||
{
|
{
|
||||||
"@id": "me:EmotionAnalysis1",
|
"@id": "me:EmotionAnalysis1_Activity",
|
||||||
"@type": "onyx:EmotionAnalysis"
|
"@type": "me:EmotionAnalysis1",
|
||||||
|
"prov:wasAssociatedWith": "me:EmotionAnalysis1"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"entries": [
|
"entries": [
|
||||||
@@ -16,17 +17,13 @@
|
|||||||
"nif:Context"
|
"nif:Context"
|
||||||
],
|
],
|
||||||
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
||||||
"entities": [
|
|
||||||
],
|
|
||||||
"suggestions": [
|
|
||||||
],
|
|
||||||
"sentiments": [
|
"sentiments": [
|
||||||
],
|
],
|
||||||
"emotions": [
|
"emotions": [
|
||||||
{
|
{
|
||||||
"@id": "http://micro.blog/status1#char=0,109",
|
"@id": "http://micro.blog/status1#char=0,109",
|
||||||
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
||||||
"prov:wasGeneratedBy": "me:EmotionAnalysis1",
|
"prov:wasGeneratedBy": "_:EmotionAnalysis1_Activity",
|
||||||
"onyx:hasEmotion": [
|
"onyx:hasEmotion": [
|
||||||
{
|
{
|
||||||
"onyx:hasEmotionCategory": "wna:liking"
|
"onyx:hasEmotionCategory": "wna:liking"
|
||||||
|
@@ -2,10 +2,11 @@
|
|||||||
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
||||||
"@id": "me:Result1",
|
"@id": "me:Result1",
|
||||||
"@type": "results",
|
"@type": "results",
|
||||||
"analysis": [
|
"activities": [
|
||||||
{
|
{
|
||||||
"@id": "me:NER1",
|
"@id": "_:NER1_Activity",
|
||||||
"@type": "me:NERAnalysis"
|
"@type": "me:NERAnalysis",
|
||||||
|
"prov:wasAssociatedWith": "me:NER1"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"entries": [
|
"entries": [
|
||||||
|
@@ -7,11 +7,17 @@
|
|||||||
],
|
],
|
||||||
"@id": "me:Result1",
|
"@id": "me:Result1",
|
||||||
"@type": "results",
|
"@type": "results",
|
||||||
"analysis": [
|
"activities": [
|
||||||
{
|
{
|
||||||
"@id": "me:HesamsAnalysis",
|
"@id": "me:HesamsAnalysis_Activity",
|
||||||
"@type": "onyx:EmotionAnalysis",
|
"@type": "onyx:EmotionAnalysis",
|
||||||
"onyx:usesEmotionModel": "emovoc:pad-dimensions"
|
"prov:wasAssociatedWith": "me:HesamsAnalysis",
|
||||||
|
"prov:used": [
|
||||||
|
{
|
||||||
|
"name": "emotion-model",
|
||||||
|
"prov:value": "emovoc:pad-dimensions"
|
||||||
|
}
|
||||||
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"entries": [
|
"entries": [
|
||||||
@@ -32,7 +38,7 @@
|
|||||||
{
|
{
|
||||||
"@id": "Entry1#char=0,21",
|
"@id": "Entry1#char=0,21",
|
||||||
"nif:anchorOf": "This is a test string",
|
"nif:anchorOf": "This is a test string",
|
||||||
"prov:wasGeneratedBy": "me:HesamAnalysis",
|
"prov:wasGeneratedBy": "_:HesamAnalysis_Activity",
|
||||||
"onyx:hasEmotion": [
|
"onyx:hasEmotion": [
|
||||||
{
|
{
|
||||||
"emovoc:pleasure": 0.5,
|
"emovoc:pleasure": 0.5,
|
||||||
|
@@ -2,12 +2,11 @@
|
|||||||
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
||||||
"@id": "me:Result1",
|
"@id": "me:Result1",
|
||||||
"@type": "results",
|
"@type": "results",
|
||||||
"analysis": [
|
"activities": [
|
||||||
{
|
{
|
||||||
"@id": "me:SAnalysis1",
|
"@id": "_:SAnalysis1_Activity",
|
||||||
"@type": "marl:SentimentAnalysis",
|
"@type": "marl:SentimentAnalysis",
|
||||||
"marl:maxPolarityValue": 1,
|
"prov:wasAssociatedWith": "me:SAnalysis1"
|
||||||
"marl:minPolarityValue": 0
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"entries": [
|
"entries": [
|
||||||
@@ -18,10 +17,6 @@
|
|||||||
"nif:Context"
|
"nif:Context"
|
||||||
],
|
],
|
||||||
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
||||||
"entities": [
|
|
||||||
],
|
|
||||||
"suggestions": [
|
|
||||||
],
|
|
||||||
"sentiments": [
|
"sentiments": [
|
||||||
{
|
{
|
||||||
"@id": "http://micro.blog/status1#char=80,97",
|
"@id": "http://micro.blog/status1#char=80,97",
|
||||||
@@ -30,10 +25,10 @@
|
|||||||
"nif:anchorOf": "You'll be awesome.",
|
"nif:anchorOf": "You'll be awesome.",
|
||||||
"marl:hasPolarity": "marl:Positive",
|
"marl:hasPolarity": "marl:Positive",
|
||||||
"marl:polarityValue": 0.9,
|
"marl:polarityValue": 0.9,
|
||||||
"prov:wasGeneratedBy": "me:SAnalysis1"
|
"prov:wasGeneratedBy": "_:SAnalysis1_Activity"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"emotionSets": [
|
"emotions": [
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
@@ -2,8 +2,12 @@
|
|||||||
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
||||||
"@id": "me:Result1",
|
"@id": "me:Result1",
|
||||||
"@type": "results",
|
"@type": "results",
|
||||||
"analysis": [
|
"activities": [
|
||||||
"me:SgAnalysis1"
|
{
|
||||||
|
"@id": "_:SgAnalysis1_Activity",
|
||||||
|
"@type": "me:SuggestionAnalysis",
|
||||||
|
"prov:wasAssociatedWith": "me:SgAnalysis1"
|
||||||
|
}
|
||||||
],
|
],
|
||||||
"entries": [
|
"entries": [
|
||||||
{
|
{
|
||||||
@@ -12,7 +16,6 @@
|
|||||||
"nif:RFC5147String",
|
"nif:RFC5147String",
|
||||||
"nif:Context"
|
"nif:Context"
|
||||||
],
|
],
|
||||||
"prov:wasGeneratedBy": "me:SAnalysis1",
|
|
||||||
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
||||||
"entities": [
|
"entities": [
|
||||||
],
|
],
|
||||||
@@ -22,7 +25,7 @@
|
|||||||
"nif:beginIndex": 16,
|
"nif:beginIndex": 16,
|
||||||
"nif:endIndex": 77,
|
"nif:endIndex": 77,
|
||||||
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
|
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
|
||||||
"prov:wasGeneratedBy": "me:SgAnalysis1"
|
"prov:wasGeneratedBy": "_:SgAnalysis1_Activity"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"sentiments": [
|
"sentiments": [
|
||||||
|
119
docs/index.rst
119
docs/index.rst
@@ -1,35 +1,108 @@
|
|||||||
Welcome to Senpy's documentation!
|
Welcome to Senpy's documentation!
|
||||||
=================================
|
=================================
|
||||||
|
|
||||||
.. image:: https://readthedocs.org/projects/senpy/badge/?version=latest
|
.. image:: https://readthedocs.org/projects/senpy/badge/?version=latest
|
||||||
:target: http://senpy.readthedocs.io/en/latest/
|
:target: http://senpy.readthedocs.io/en/latest/
|
||||||
.. image:: https://badge.fury.io/py/senpy.svg
|
.. image:: https://badge.fury.io/py/senpy.svg
|
||||||
:target: https://badge.fury.io/py/senpy
|
:target: https://badge.fury.io/py/senpy
|
||||||
.. image:: https://lab.cluster.gsi.dit.upm.es/senpy/senpy/badges/master/build.svg
|
.. image:: https://lab.gsi.upm.es/senpy/senpy/badges/master/build.svg
|
||||||
:target: https://lab.cluster.gsi.dit.upm.es/senpy/senpy/commits/master
|
:target: https://lab.gsi.upm.es/senpy/senpy/commits/master
|
||||||
.. image:: https://lab.cluster.gsi.dit.upm.es/senpy/senpy/badges/master/coverage.svg
|
.. image:: https://lab.gsi.upm.es/senpy/senpy/badges/master/coverage.svg
|
||||||
:target: https://lab.cluster.gsi.dit.upm.es/senpy/senpy/commits/master
|
:target: https://lab.gsi.upm.es/senpy/senpy/commits/master
|
||||||
.. image:: https://img.shields.io/pypi/l/requests.svg
|
.. image:: https://img.shields.io/pypi/l/requests.svg
|
||||||
:target: https://lab.cluster.gsi.dit.upm.es/senpy/senpy/
|
:target: https://lab.gsi.upm.es/senpy/senpy/
|
||||||
|
|
||||||
|
Senpy is a framework to build sentiment and emotion analysis services.
|
||||||
|
It provides functionalities for:
|
||||||
|
|
||||||
|
- developing sentiment and emotion classifier and exposing them as an HTTP service
|
||||||
|
- requesting sentiment and emotion analysis from different providers (i.e. Vader, Sentimet140, ...) using the same interface (:doc:`apischema`). In this way, applications do not depend on the API offered for these services.
|
||||||
|
- combining services that use different sentiment model (e.g. polarity between [-1, 1] or [0,1] or emotion models (e.g. Ekkman or VAD)
|
||||||
|
- evaluating sentiment algorithms with well known datasets
|
||||||
|
|
||||||
|
|
||||||
|
Using senpy services is as simple as sending an HTTP request with your favourite tool or library.
|
||||||
|
Let's analyze the sentiment of the text "Senpy is awesome".
|
||||||
|
|
||||||
|
We can call the `Sentiment140 <http://www.sentiment140.com/>`_ service with an HTTP request using curl:
|
||||||
|
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
:emphasize-lines: 14,18
|
||||||
|
|
||||||
|
$ curl "http://senpy.gsi.upm.es/api/sentiment140" \
|
||||||
|
--data-urlencode "input=Senpy is awesome"
|
||||||
|
|
||||||
|
{
|
||||||
|
"@context": "http://senpy.gsi.upm.es/api/contexts/YXBpL3NlbnRpbWVudDE0MD8j",
|
||||||
|
"@type": "Results",
|
||||||
|
"entries": [
|
||||||
|
{
|
||||||
|
"@id": "prefix:",
|
||||||
|
"@type": "Entry",
|
||||||
|
"marl:hasOpinion": [
|
||||||
|
{
|
||||||
|
"@type": "Sentiment",
|
||||||
|
"marl:hasPolarity": "marl:Positive",
|
||||||
|
"prov:wasGeneratedBy": "prefix:Analysis_1554389334.6431913"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"nif:isString": "Senpy is awesome",
|
||||||
|
"onyx:hasEmotionSet": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Congratulations, you’ve used your first senpy service!
|
||||||
|
You can observe the result: the polarity is positive (marl:Positive). The reason of this prefix is that Senpy follows a linked data approach.
|
||||||
|
|
||||||
|
You can analyze the same sentence using a different sentiment service (e.g. Vader) and requesting a different format (e.g. turtle):
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Senpy is a framework for sentiment and emotion analysis services.
|
.. code:: shell
|
||||||
Services built with senpy are interchangeable and easy to use because they share a common :doc:`apischema`.
|
|
||||||
It also simplifies service development.
|
|
||||||
|
|
||||||
.. image:: senpy-architecture.png
|
$ curl "http://senpy.gsi.upm.es/api/sentiment-vader" \
|
||||||
:width: 100%
|
--data-urlencode "input=Senpy is awesome" \
|
||||||
:align: center
|
--data-urlencode "outformat=turtle"
|
||||||
|
|
||||||
|
@prefix : <http://www.gsi.upm.es/onto/senpy/ns#> .
|
||||||
|
@prefix endpoint: <http://senpy.gsi.upm.es/api/> .
|
||||||
|
@prefix marl: <http://www.gsi.dit.upm.es/ontologies/marl/ns#> .
|
||||||
|
@prefix nif: <http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core#> .
|
||||||
|
@prefix prefix: <http://senpy.invalid/> .
|
||||||
|
@prefix prov: <http://www.w3.org/ns/prov#> .
|
||||||
|
@prefix senpy: <http://www.gsi.upm.es/onto/senpy/ns#> .
|
||||||
|
|
||||||
|
prefix: a senpy:Entry ;
|
||||||
|
nif:isString "Senpy is awesome" ;
|
||||||
|
marl:hasOpinion [ a senpy:Sentiment ;
|
||||||
|
marl:hasPolarity "marl:Positive" ;
|
||||||
|
marl:polarityValue 6.72e-01 ;
|
||||||
|
prov:wasGeneratedBy prefix:Analysis_1562668175.9808676 ] .
|
||||||
|
|
||||||
|
[] a senpy:Results ;
|
||||||
|
prov:used prefix: .
|
||||||
|
|
||||||
|
As you see, Vader returns also the polarity value (0.67) in addition to the category (positive).
|
||||||
|
|
||||||
|
If you are interested in consuming Senpy services, read :doc:`Quickstart`.
|
||||||
|
To get familiar with the concepts behind Senpy, and what it can offer for service developers, check out :doc:`development`.
|
||||||
|
:doc:`apischema` contains information about the semantic models and vocabularies used by Senpy.
|
||||||
|
|
||||||
.. toctree::
|
.. toctree::
|
||||||
:caption: Learn more about senpy:
|
:caption: Learn more about senpy:
|
||||||
:maxdepth: 2
|
:maxdepth: 2
|
||||||
|
:hidden:
|
||||||
|
|
||||||
senpy
|
senpy
|
||||||
installation
|
demo
|
||||||
demo
|
Quickstart.ipynb
|
||||||
usage
|
installation
|
||||||
apischema
|
conversion
|
||||||
plugins
|
Evaluation.ipynb
|
||||||
conversion
|
apischema
|
||||||
about
|
development
|
||||||
|
publications
|
||||||
|
projects
|
||||||
|
@@ -17,42 +17,42 @@ Through PIP
|
|||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
|
pip install senpy
|
||||||
|
|
||||||
|
# Or with --user if you get permission errors:
|
||||||
|
|
||||||
pip install --user senpy
|
pip install --user senpy
|
||||||
|
|
||||||
|
|
||||||
Alternatively, you can use the development version:
|
..
|
||||||
|
Alternatively, you can use the development version:
|
||||||
.. code:: bash
|
|
||||||
|
|
||||||
git clone git@github.com:gsi-upm/senpy
|
.. code:: bash
|
||||||
cd senpy
|
|
||||||
pip install --user .
|
|
||||||
|
|
||||||
If you want to install senpy globally, use sudo instead of the ``--user`` flag.
|
git clone git@github.com:gsi-upm/senpy
|
||||||
|
cd senpy
|
||||||
|
pip install --user .
|
||||||
|
|
||||||
Docker Image
|
Docker Image
|
||||||
************
|
************
|
||||||
Build the image or use the pre-built one:
|
The base image of senpy comes with some builtin plugins that you can use:
|
||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
docker run -ti -p 5000:5000 gsiupm/senpy --host 0.0.0.0 --default-plugins
|
docker run -ti -p 5000:5000 gsiupm/senpy --host 0.0.0.0
|
||||||
|
|
||||||
To add custom plugins, use a docker volume:
|
To add your custom plugins, you can use a docker volume:
|
||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
docker run -ti -p 5000:5000 -v <PATH OF PLUGINS>:/plugins gsiupm/senpy --host 0.0.0.0 --default-plugins -f /plugins
|
docker run -ti -p 5000:5000 -v <PATH OF PLUGINS>:/plugins gsiupm/senpy --host 0.0.0.0 --plugins -f /plugins
|
||||||
|
|
||||||
|
|
||||||
Python 2
|
|
||||||
........
|
|
||||||
|
|
||||||
There is a Senpy version for python2 too:
|
There is a Senpy image for **python 2**, too:
|
||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
docker run -ti -p 5000:5000 gsiupm/senpy:python2.7 --host 0.0.0.0 --default-plugins
|
docker run -ti -p 5000:5000 gsiupm/senpy:python2.7 --host 0.0.0.0
|
||||||
|
|
||||||
|
|
||||||
Alias
|
Alias
|
||||||
@@ -62,7 +62,7 @@ If you are using the docker approach regularly, it is advisable to use a script
|
|||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
alias senpy='docker run --rm -ti -p 5000:5000 -v $PWD:/senpy-plugins gsiupm/senpy --default-plugins'
|
alias senpy='docker run --rm -ti -p 5000:5000 -v $PWD:/senpy-plugins gsiupm/senpy'
|
||||||
|
|
||||||
|
|
||||||
Now, you may run senpy from any folder in your computer like so:
|
Now, you may run senpy from any folder in your computer like so:
|
||||||
|
BIN
docs/playground-0.20.png
Normal file
BIN
docs/playground-0.20.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 68 KiB |
@@ -9,20 +9,21 @@ Lastly, it is also possible to add new plugins programmatically.
|
|||||||
|
|
||||||
.. contents:: :local:
|
.. contents:: :local:
|
||||||
|
|
||||||
What is a plugin?
|
..
|
||||||
=================
|
What is a plugin?
|
||||||
|
=================
|
||||||
|
|
||||||
A plugin is a program that, given a text, will add annotations to it.
|
A plugin is a program that, given a text, will add annotations to it.
|
||||||
In practice, a plugin consists of at least two files:
|
In practice, a plugin consists of at least two files:
|
||||||
|
|
||||||
- Definition file: a `.senpy` file that describes the plugin (e.g. what input parameters it accepts, what emotion model it uses).
|
- Definition file: a `.senpy` file that describes the plugin (e.g. what input parameters it accepts, what emotion model it uses).
|
||||||
- Python module: the actual code that will add annotations to each input.
|
- Python module: the actual code that will add annotations to each input.
|
||||||
|
|
||||||
This separation allows us to deploy plugins that use the same code but employ different parameters.
|
This separation allows us to deploy plugins that use the same code but employ different parameters.
|
||||||
For instance, one could use the same classifier and processing in several plugins, but train with different datasets.
|
For instance, one could use the same classifier and processing in several plugins, but train with different datasets.
|
||||||
This scenario is particularly useful for evaluation purposes.
|
This scenario is particularly useful for evaluation purposes.
|
||||||
|
|
||||||
The only limitation is that the name of each plugin needs to be unique.
|
The only limitation is that the name of each plugin needs to be unique.
|
||||||
|
|
||||||
Definition files
|
Definition files
|
||||||
================
|
================
|
||||||
@@ -109,5 +110,3 @@ Now, in a file named ``helloworld.py``:
|
|||||||
sentiment['marl:hasPolarity'] = 'marl:Negative'
|
sentiment['marl:hasPolarity'] = 'marl:Negative'
|
||||||
entry.sentiments.append(sentiment)
|
entry.sentiments.append(sentiment)
|
||||||
yield entry
|
yield entry
|
||||||
|
|
||||||
The complete code of the example plugin is available `here <https://lab.cluster.gsi.dit.upm.es/senpy/plugin-prueba>`__.
|
|
||||||
|
@@ -1,61 +1,18 @@
|
|||||||
Developing new plugins
|
F.A.Q.
|
||||||
----------------------
|
======
|
||||||
This document contains the minimum to get you started with developing new analysis plugin.
|
|
||||||
For an example of conversion plugins, see :doc:`conversion`.
|
|
||||||
For a description of definition files, see :doc:`plugins-definition`.
|
|
||||||
|
|
||||||
A more step-by-step tutorial with slides is available `here <https://lab.cluster.gsi.dit.upm.es/senpy/senpy-tutorial>`__
|
|
||||||
|
|
||||||
.. contents:: :local:
|
.. contents:: :local:
|
||||||
|
|
||||||
What is a plugin?
|
|
||||||
=================
|
|
||||||
|
|
||||||
A plugin is a python object that can process entries. Given an entry, it will modify it, add annotations to it, or generate new entries.
|
|
||||||
|
|
||||||
|
|
||||||
What is an entry?
|
|
||||||
=================
|
|
||||||
|
|
||||||
Entries are objects that can be annotated.
|
|
||||||
In general, they will be a piece of text.
|
|
||||||
By default, entries are `NIF contexts <http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core/nif-core.html>`_ represented in JSON-LD format.
|
|
||||||
It is a dictionary/JSON object that looks like this:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
{
|
|
||||||
"@id": "<unique identifier or blank node name>",
|
|
||||||
"nif:isString": "input text",
|
|
||||||
"sentiments": [ {
|
|
||||||
...
|
|
||||||
}
|
|
||||||
],
|
|
||||||
...
|
|
||||||
}
|
|
||||||
|
|
||||||
Annotations are added to the object like this:
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
entry = Entry()
|
|
||||||
entry.vocabulary__annotationName = 'myvalue'
|
|
||||||
entry['vocabulary:annotationName'] = 'myvalue'
|
|
||||||
entry['annotationNameURI'] = 'myvalue'
|
|
||||||
|
|
||||||
Where vocabulary is one of the prefixes defined in the default senpy context, and annotationURI is a full URI.
|
|
||||||
The value may be any valid JSON-LD dictionary.
|
|
||||||
For simplicity, senpy includes a series of models by default in the ``senpy.models`` module.
|
|
||||||
|
|
||||||
|
|
||||||
What are annotations?
|
What are annotations?
|
||||||
=====================
|
#####################
|
||||||
They are objects just like entries.
|
They are objects just like entries.
|
||||||
Senpy ships with several default annotations, including: ``Sentiment``, ``Emotion``, ``EmotionSet``...jk bb
|
Senpy ships with several default annotations, including ``Sentiment`` and ``Emotion``.
|
||||||
|
|
||||||
|
|
||||||
What's a plugin made of?
|
What's a plugin made of?
|
||||||
========================
|
########################
|
||||||
|
|
||||||
When receiving a query, senpy selects what plugin or plugins should process each entry, and in what order.
|
When receiving a query, senpy selects what plugin or plugins should process each entry, and in what order.
|
||||||
It also makes sure the every entry and the parameters provided by the user meet the plugin requirements.
|
It also makes sure the every entry and the parameters provided by the user meet the plugin requirements.
|
||||||
@@ -65,45 +22,33 @@ Hence, two parts are necessary: 1) the code that will process the entry, and 2)
|
|||||||
In practice, this is what a plugin looks like, tests included:
|
In practice, this is what a plugin looks like, tests included:
|
||||||
|
|
||||||
|
|
||||||
.. literalinclude:: ../senpy/plugins/example/rand_plugin.py
|
.. literalinclude:: ../example-plugins/rand_plugin.py
|
||||||
:emphasize-lines: 5-11
|
:emphasize-lines: 21-28
|
||||||
:language: python
|
:language: python
|
||||||
|
|
||||||
|
|
||||||
The lines highlighted contain some information about the plugin.
|
The lines highlighted contain some information about the plugin.
|
||||||
In particular, the following information is mandatory:
|
In particular, the following information is mandatory:
|
||||||
|
|
||||||
* A unique name for the class. In our example, Rand.
|
* A unique name for the class. In our example, sentiment-random.
|
||||||
* The subclass/type of plugin. This is typically either `SentimentPlugin` or `EmotionPlugin`. However, new types of plugin can be created for different annotations. The only requirement is that these new types inherit from `senpy.Analysis`
|
* The subclass/type of plugin. This is typically either `SentimentPlugin` or `EmotionPlugin`. However, new types of plugin can be created for different annotations. The only requirement is that these new types inherit from `senpy.Analysis`
|
||||||
* A description of the plugin. This can be done simply by adding a doc to the class.
|
* A description of the plugin. This can be done simply by adding a doc to the class.
|
||||||
* A version, which should get updated.
|
* A version, which should get updated.
|
||||||
* An author name.
|
* An author name.
|
||||||
|
|
||||||
|
|
||||||
Plugins Code
|
|
||||||
============
|
|
||||||
|
|
||||||
The basic methods in a plugin are:
|
|
||||||
|
|
||||||
* analyse_entry: called in every user requests. It takes two parameters: ``Entry``, the entry object, and ``params``, the parameters supplied by the user. It should yield one or more ``Entry`` objects.
|
|
||||||
* activate: used to load memory-hungry resources. For instance, to train a classifier.
|
|
||||||
* deactivate: used to free up resources when the plugin is no longer needed.
|
|
||||||
|
|
||||||
Plugins are loaded asynchronously, so don't worry if the activate method takes too long. The plugin will be marked as activated once it is finished executing the method.
|
|
||||||
|
|
||||||
|
|
||||||
How does senpy find modules?
|
How does senpy find modules?
|
||||||
============================
|
############################
|
||||||
|
|
||||||
Senpy looks for files of two types:
|
Senpy looks for files of two types:
|
||||||
|
|
||||||
* Python files of the form `senpy_<NAME>.py` or `<NAME>_plugin.py`. In these files, it will look for: 1) Instances that inherit from `senpy.Plugin`, or subclasses of `senpy.Plugin` that can be initialized without a configuration file. i.e. classes that contain all the required attributes for a plugin.
|
* Python files of the form `senpy_<NAME>.py` or `<NAME>_plugin.py`. In these files, it will look for: 1) Instances that inherit from `senpy.Plugin`, or subclasses of `senpy.Plugin` that can be initialized without a configuration file. i.e. classes that contain all the required attributes for a plugin.
|
||||||
* Plugin definition files (see :doc:`advanced-plugins`)
|
* Plugin definition files (see :doc:`plugins-definition`)
|
||||||
|
|
||||||
Defining additional parameters
|
How can I define additional parameters for my plugin?
|
||||||
==============================
|
#####################################################
|
||||||
|
|
||||||
Your plugin may ask for additional parameters from the users of the service by using the attribute ``extra_params`` in your plugin definition.
|
Your plugin may ask for additional parameters from users by using the attribute ``extra_params`` in your plugin definition.
|
||||||
It takes a dictionary, where the keys are the name of the argument/parameter, and the value has the following fields:
|
It takes a dictionary, where the keys are the name of the argument/parameter, and the value has the following fields:
|
||||||
|
|
||||||
* aliases: the different names which can be used in the request to use the parameter.
|
* aliases: the different names which can be used in the request to use the parameter.
|
||||||
@@ -124,15 +69,16 @@ It takes a dictionary, where the keys are the name of the argument/parameter, an
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
Loading data and files
|
How should I load external data and files
|
||||||
======================
|
#########################################
|
||||||
|
|
||||||
Most plugins will need access to files (dictionaries, lexicons, etc.).
|
Most plugins will need access to files (dictionaries, lexicons, etc.).
|
||||||
These files are usually heavy or under a license that does not allow redistribution.
|
These files are usually heavy or under a license that does not allow redistribution.
|
||||||
For this reason, senpy has a `data_folder` that is separated from the source files.
|
For this reason, senpy has a `data_folder` that is separated from the source files.
|
||||||
The location of this folder is controlled programmatically or by setting the `SENPY_DATA` environment variable.
|
The location of this folder is controlled programmatically or by setting the `SENPY_DATA` environment variable.
|
||||||
|
You can use the `self.path(filepath)` function to get the path of a given `filepath` within the data folder.
|
||||||
|
|
||||||
Plugins have a convenience function `self.open` which will automatically prepend the data folder to relative paths:
|
Plugins have a convenience function `self.open` which will automatically look for the file if it exists, or open a new one if it doesn't:
|
||||||
|
|
||||||
|
|
||||||
.. code:: python
|
.. code:: python
|
||||||
@@ -144,7 +90,7 @@ Plugins have a convenience function `self.open` which will automatically prepend
|
|||||||
file_in_data = <FILE PATH>
|
file_in_data = <FILE PATH>
|
||||||
file_in_sources = <FILE PATH>
|
file_in_sources = <FILE PATH>
|
||||||
|
|
||||||
def activate(self):
|
def on activate(self):
|
||||||
with self.open(self.file_in_data) as f:
|
with self.open(self.file_in_data) as f:
|
||||||
self._classifier = train_from_file(f)
|
self._classifier = train_from_file(f)
|
||||||
file_in_source = os.path.join(self.get_folder(), self.file_in_sources)
|
file_in_source = os.path.join(self.get_folder(), self.file_in_sources)
|
||||||
@@ -155,8 +101,8 @@ Plugins have a convenience function `self.open` which will automatically prepend
|
|||||||
It is good practice to specify the paths of these files in the plugin configuration, so the same code can be reused with different resources.
|
It is good practice to specify the paths of these files in the plugin configuration, so the same code can be reused with different resources.
|
||||||
|
|
||||||
|
|
||||||
Docker image
|
Can I build a docker image for my plugin?
|
||||||
============
|
#########################################
|
||||||
|
|
||||||
Add the following dockerfile to your project to generate a docker image with your plugin:
|
Add the following dockerfile to your project to generate a docker image with your plugin:
|
||||||
|
|
||||||
@@ -187,7 +133,7 @@ And you can run it with:
|
|||||||
docker run -p 5000:5000 gsiupm/exampleplugin
|
docker run -p 5000:5000 gsiupm/exampleplugin
|
||||||
|
|
||||||
|
|
||||||
If the plugin uses non-source files (:ref:`loading data and files`), the recommended way is to use `SENPY_DATA` folder.
|
If the plugin uses non-source files (:ref:`How should I load external data and files`), the recommended way is to use `SENPY_DATA` folder.
|
||||||
Data can then be mounted in the container or added to the image.
|
Data can then be mounted in the container or added to the image.
|
||||||
The former is recommended for open source plugins with licensed resources, whereas the latter is the most convenient and can be used for private images.
|
The former is recommended for open source plugins with licensed resources, whereas the latter is the most convenient and can be used for private images.
|
||||||
|
|
||||||
@@ -204,17 +150,15 @@ Adding data to the image:
|
|||||||
FROM gsiupm/senpy:1.0.1
|
FROM gsiupm/senpy:1.0.1
|
||||||
COPY data /
|
COPY data /
|
||||||
|
|
||||||
F.A.Q.
|
|
||||||
======
|
|
||||||
What annotations can I use?
|
What annotations can I use?
|
||||||
???????????????????????????
|
###########################
|
||||||
|
|
||||||
You can add almost any annotation to an entry.
|
You can add almost any annotation to an entry.
|
||||||
The most common use cases are covered in the :doc:`apischema`.
|
The most common use cases are covered in the :doc:`apischema`.
|
||||||
|
|
||||||
|
|
||||||
Why does the analyse function yield instead of return?
|
Why does the analyse function yield instead of return?
|
||||||
??????????????????????????????????????????????????????
|
######################################################
|
||||||
|
|
||||||
This is so that plugins may add new entries to the response or filter some of them.
|
This is so that plugins may add new entries to the response or filter some of them.
|
||||||
For instance, a chunker may split one entry into several.
|
For instance, a chunker may split one entry into several.
|
||||||
@@ -222,7 +166,7 @@ On the other hand, a conversion plugin may leave out those entries that do not c
|
|||||||
|
|
||||||
|
|
||||||
If I'm using a classifier, where should I train it?
|
If I'm using a classifier, where should I train it?
|
||||||
???????????????????????????????????????????????????
|
###################################################
|
||||||
|
|
||||||
Training a classifier can be time time consuming. To avoid running the training unnecessarily, you can use ShelfMixin to store the classifier. For instance:
|
Training a classifier can be time time consuming. To avoid running the training unnecessarily, you can use ShelfMixin to store the classifier. For instance:
|
||||||
|
|
||||||
@@ -256,7 +200,7 @@ A corrupt shelf prevents the plugin from loading.
|
|||||||
If you do not care about the data in the shelf, you can force your plugin to remove the corrupted file and load anyway, set the 'force_shelf' to True in your plugin and start it again.
|
If you do not care about the data in the shelf, you can force your plugin to remove the corrupted file and load anyway, set the 'force_shelf' to True in your plugin and start it again.
|
||||||
|
|
||||||
How can I turn an external service into a plugin?
|
How can I turn an external service into a plugin?
|
||||||
?????????????????????????????????????????????????
|
#################################################
|
||||||
|
|
||||||
This example ilustrate how to implement a plugin that accesses the Sentiment140 service.
|
This example ilustrate how to implement a plugin that accesses the Sentiment140 service.
|
||||||
|
|
||||||
@@ -292,8 +236,8 @@ This example ilustrate how to implement a plugin that accesses the Sentiment140
|
|||||||
yield entry
|
yield entry
|
||||||
|
|
||||||
|
|
||||||
Can I activate a DEBUG mode for my plugin?
|
How can I activate a DEBUG mode for my plugin?
|
||||||
???????????????????????????????????????????
|
###############################################
|
||||||
|
|
||||||
You can activate the DEBUG mode by the command-line tool using the option -d.
|
You can activate the DEBUG mode by the command-line tool using the option -d.
|
||||||
|
|
||||||
@@ -309,6 +253,6 @@ Additionally, with the ``--pdb`` option you will be dropped into a pdb post mort
|
|||||||
python -m pdb yourplugin.py
|
python -m pdb yourplugin.py
|
||||||
|
|
||||||
Where can I find more code examples?
|
Where can I find more code examples?
|
||||||
????????????????????????????????????
|
####################################
|
||||||
|
|
||||||
See: `<http://github.com/gsi-upm/senpy-plugins-community>`_.
|
See: `<http://github.com/gsi-upm/senpy-plugins-community>`_.
|
87
docs/plugins-quickstart.rst
Normal file
87
docs/plugins-quickstart.rst
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
Quickstart for service developers
|
||||||
|
=================================
|
||||||
|
|
||||||
|
This document contains the minimum to get you started with developing new services using Senpy.
|
||||||
|
|
||||||
|
For an example of conversion plugins, see :doc:`conversion`.
|
||||||
|
For a description of definition files, see :doc:`plugins-definition`.
|
||||||
|
|
||||||
|
A more step-by-step tutorial with slides is available `here <https://lab.gsi.upm.es/senpy/senpy-tutorial>`__
|
||||||
|
|
||||||
|
.. contents:: :local:
|
||||||
|
|
||||||
|
Installation
|
||||||
|
############
|
||||||
|
|
||||||
|
First of all, you need to install the package.
|
||||||
|
See :doc:`installation` for instructions.
|
||||||
|
Once installed, the `senpy` command should be available.
|
||||||
|
|
||||||
|
Architecture
|
||||||
|
############
|
||||||
|
|
||||||
|
The main component of a sentiment analysis service is the algorithm itself. However, for the algorithm to work, it needs to get the appropriate parameters from the user, format the results according to the defined API, interact with the user whn errors occur or more information is needed, etc.
|
||||||
|
|
||||||
|
Senpy proposes a modular and dynamic architecture that allows:
|
||||||
|
|
||||||
|
* Implementing different algorithms in a extensible way, yet offering a common interface.
|
||||||
|
* Offering common services that facilitate development, so developers can focus on implementing new and better algorithms.
|
||||||
|
|
||||||
|
The framework consists of two main modules: Senpy core, which is the building block of the service, and Senpy plugins, which consist of the analysis algorithm. The next figure depicts a simplified version of the processes involved in an analysis with the Senpy framework.
|
||||||
|
|
||||||
|
.. image:: senpy-architecture.png
|
||||||
|
:width: 100%
|
||||||
|
:align: center
|
||||||
|
|
||||||
|
|
||||||
|
What is a plugin?
|
||||||
|
#################
|
||||||
|
|
||||||
|
A plugin is a python object that can process entries.
|
||||||
|
Given an entry, it will modify it, add annotations to it, or generate new entries.
|
||||||
|
|
||||||
|
|
||||||
|
What is an entry?
|
||||||
|
#################
|
||||||
|
|
||||||
|
Entries are objects that can be annotated.
|
||||||
|
In general, they will be a piece of text.
|
||||||
|
By default, entries are `NIF contexts <http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core/nif-core.html>`_ represented in JSON-LD format.
|
||||||
|
It is a dictionary/JSON object that looks like this:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
{
|
||||||
|
"@id": "<unique identifier or blank node name>",
|
||||||
|
"nif:isString": "input text",
|
||||||
|
"sentiments": [ {
|
||||||
|
...
|
||||||
|
}
|
||||||
|
],
|
||||||
|
...
|
||||||
|
}
|
||||||
|
|
||||||
|
Annotations are added to the object like this:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
entry = Entry()
|
||||||
|
entry.vocabulary__annotationName = 'myvalue'
|
||||||
|
entry['vocabulary:annotationName'] = 'myvalue'
|
||||||
|
entry['annotationNameURI'] = 'myvalue'
|
||||||
|
|
||||||
|
Where vocabulary is one of the prefixes defined in the default senpy context, and annotationURI is a full URI.
|
||||||
|
The value may be any valid JSON-LD dictionary.
|
||||||
|
For simplicity, senpy includes a series of models by default in the ``senpy.models`` module.
|
||||||
|
|
||||||
|
Plugins Code
|
||||||
|
############
|
||||||
|
|
||||||
|
The basic methods in a plugin are:
|
||||||
|
|
||||||
|
* analyse_entry: called in every user requests. It takes two parameters: ``Entry``, the entry object, and ``params``, the parameters supplied by the user. It should yield one or more ``Entry`` objects.
|
||||||
|
* activate: used to load memory-hungry resources. For instance, to train a classifier.
|
||||||
|
* deactivate: used to free up resources when the plugin is no longer needed.
|
||||||
|
|
||||||
|
Plugins are loaded asynchronously, so don't worry if the activate method takes too long. The plugin will be marked as activated once it is finished executing the method.
|
||||||
|
|
49
docs/projects.rst
Normal file
49
docs/projects.rst
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
Projects using Senpy
|
||||||
|
--------------------
|
||||||
|
|
||||||
|
Are you using Senpy in your work?, we would love to hear from you!
|
||||||
|
Here is a list of on-going and past projects that have benefited from senpy:
|
||||||
|
|
||||||
|
|
||||||
|
MixedEmotions
|
||||||
|
,,,,,,,,,,,,,
|
||||||
|
|
||||||
|
`MixedEmotions <https://mixedemotions-project.eu/>`_ develops innovative multilingual multi-modal Big Data analytics applications.
|
||||||
|
The analytics relies on a common toolbox for multi-modal sentiment and emotion analysis.
|
||||||
|
The NLP parts of the toolbox are based on senpy and its API.
|
||||||
|
|
||||||
|
The toolbox is featured in this publication:
|
||||||
|
|
||||||
|
.. code-block:: text
|
||||||
|
|
||||||
|
Buitelaar, P., Wood, I. D., Arcan, M., McCrae, J. P., Abele, A., Robin, C., … Tummarello, G. (2018).
|
||||||
|
MixedEmotions: An Open-Source Toolbox for Multi-Modal Emotion Analysis.
|
||||||
|
IEEE Transactions on Multimedia.
|
||||||
|
|
||||||
|
EuroSentiment
|
||||||
|
,,,,,,,,,,,,,
|
||||||
|
|
||||||
|
The aim of the EUROSENTIMENT project was to create a pool for multilingual language resources and services for Sentiment Analysis.
|
||||||
|
|
||||||
|
The EuroSentiment project was the main motivation behind the development of Senpy, and some early versions were used:
|
||||||
|
|
||||||
|
.. code-block:: text
|
||||||
|
|
||||||
|
Sánchez-Rada, J. F., Vulcu, G., Iglesias, C. A., & Buitelaar, P. (2014).
|
||||||
|
EUROSENTIMENT: Linked Data Sentiment Analysis.
|
||||||
|
Proceedings of the ISWC 2014 Posters & Demonstrations Track
|
||||||
|
13th International Semantic Web Conference (ISWC 2014) (Vol. 1272, pp. 145–148).
|
||||||
|
|
||||||
|
|
||||||
|
SoMeDi
|
||||||
|
,,,,,,
|
||||||
|
`SoMeDi <https://itea3.org/project/somedi.html>`_ is an ITEA3 project to research machine learning and artificial intelligence techniques that can be used to turn digital interaction data into Digital Interaction Intelligence and approaches that can be used to effectively enter and act in social media, and to automate this process.
|
||||||
|
SoMeDi exploits senpy's interoperability of services in their customizable data enrichment and NLP workflows.
|
||||||
|
|
||||||
|
TRIVALENT
|
||||||
|
,,,,,,,,,
|
||||||
|
|
||||||
|
`TRIVALENT <https://trivalent-project.eu/>`_ is an EU funded project which aims to a better understanding of root causes of the phenomenon of violent radicalisation in Europe in order to develop appropriate countermeasures, ranging from early detection methodologies to techniques of counter-narrative.
|
||||||
|
|
||||||
|
In addition to sentiment and emotion analysis services, trivalent provides other types of senpy services such as radicalism and writing style analysis.
|
||||||
|
|
36
docs/publications.rst
Normal file
36
docs/publications.rst
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
Publications
|
||||||
|
============
|
||||||
|
|
||||||
|
|
||||||
|
And if you use Senpy in your research, please cite `Senpy: A Pragmatic Linked Sentiment Analysis Framework <http://gsi.dit.upm.es/index.php/es/investigacion/publicaciones?view=publication&task=show&id=417>`__ (`BibTex <http://gsi.dit.upm.es/index.php/es/investigacion/publicaciones?controller=publications&task=export&format=bibtex&id=417>`__):
|
||||||
|
|
||||||
|
.. code-block:: text
|
||||||
|
|
||||||
|
Sánchez-Rada, J. F., Iglesias, C. A., Corcuera, I., & Araque, Ó. (2016, October).
|
||||||
|
Senpy: A Pragmatic Linked Sentiment Analysis Framework.
|
||||||
|
In Data Science and Advanced Analytics (DSAA),
|
||||||
|
2016 IEEE International Conference on (pp. 735-742). IEEE.
|
||||||
|
|
||||||
|
|
||||||
|
Senpy uses Onyx for emotion representation, first introduced in:
|
||||||
|
|
||||||
|
.. code-block:: text
|
||||||
|
|
||||||
|
Sánchez-Rada, J. F., & Iglesias, C. A. (2016).
|
||||||
|
Onyx: A linked data approach to emotion representation.
|
||||||
|
Information Processing & Management, 52(1), 99-114.
|
||||||
|
|
||||||
|
Senpy uses Marl for sentiment representation, which was presented in:
|
||||||
|
|
||||||
|
.. code-block:: text
|
||||||
|
|
||||||
|
Westerski, A., Iglesias Fernandez, C. A., & Tapia Rico, F. (2011).
|
||||||
|
Linked opinions: Describing sentiments on the structured web of data.
|
||||||
|
|
||||||
|
The representation models, formats and challenges are partially covered in a chapter of the book Sentiment Analysis in Social Networks:
|
||||||
|
|
||||||
|
.. code-block:: text
|
||||||
|
|
||||||
|
Iglesias, C. A., Sánchez-Rada, J. F., Vulcu, G., & Buitelaar, P. (2017).
|
||||||
|
Linked Data Models for Sentiment and Emotion Analysis in Social Networks.
|
||||||
|
In Sentiment Analysis in Social Networks (pp. 49-69).
|
@@ -1,2 +1,3 @@
|
|||||||
sphinxcontrib-httpdomain>=1.4
|
sphinxcontrib-httpdomain>=1.4
|
||||||
|
ipykernel
|
||||||
nbsphinx
|
nbsphinx
|
||||||
|
@@ -1,54 +1,27 @@
|
|||||||
What is Senpy?
|
What is Senpy?
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
Senpy is a framework for text analysis using Linked Data. There are three main applications of Senpy so far: sentiment and emotion analysis, user profiling and entity recoginition. Annotations and Services are compliant with NIF (NLP Interchange Format).
|
Senpy is a framework for sentiment and emotion analysis services.
|
||||||
|
Its goal is to produce analysis services that are interchangeable and fully interoperable.
|
||||||
Senpy aims at providing a framework where analysis modules can be integrated easily as plugins, and providing a core functionality for managing tasks such as data validation, user interaction, formatting, logging, translation to linked data, etc.
|
|
||||||
|
|
||||||
The figure below summarizes the typical features in a text analysis service.
|
|
||||||
Senpy implements all the common blocks, so developers can focus on what really matters: great analysis algorithms that solve real problems.
|
|
||||||
|
|
||||||
.. image:: senpy-framework.png
|
|
||||||
:width: 60%
|
|
||||||
:align: center
|
|
||||||
|
|
||||||
|
|
||||||
Senpy for end users
|
|
||||||
===================
|
|
||||||
|
|
||||||
All services built using senpy share a common interface.
|
|
||||||
This allows users to use them (almost) interchangeably.
|
|
||||||
Senpy comes with a :ref:`built-in client`.
|
|
||||||
|
|
||||||
|
|
||||||
Senpy for service developers
|
|
||||||
============================
|
|
||||||
|
|
||||||
Senpy is a framework that turns your sentiment or emotion analysis algorithm into a full blown semantic service.
|
|
||||||
Senpy takes care of:
|
|
||||||
|
|
||||||
* Interfacing with the user: parameter validation, error handling.
|
|
||||||
* Formatting: JSON-LD, Turtle/n-triples input and output, or simple text input
|
|
||||||
* Linked Data: senpy results are semantically annotated, using a series of well established vocabularies, and sane default URIs.
|
|
||||||
* User interface: a web UI where users can explore your service and test different settings
|
|
||||||
* A client to interact with the service. Currently only available in Python.
|
|
||||||
|
|
||||||
Sharing your sentiment analysis with the world has never been easier!
|
|
||||||
|
|
||||||
Check out the :doc:`plugins` if you have developed an analysis algorithm (e.g. sentiment analysis) and you want to publish it as a service.
|
|
||||||
|
|
||||||
Architecture
|
|
||||||
============
|
|
||||||
|
|
||||||
The main component of a sentiment analysis service is the algorithm itself. However, for the algorithm to work, it needs to get the appropriate parameters from the user, format the results according to the defined API, interact with the user whn errors occur or more information is needed, etc.
|
|
||||||
|
|
||||||
Senpy proposes a modular and dynamic architecture that allows:
|
|
||||||
|
|
||||||
* Implementing different algorithms in a extensible way, yet offering a common interface.
|
|
||||||
* Offering common services that facilitate development, so developers can focus on implementing new and better algorithms.
|
|
||||||
|
|
||||||
The framework consists of two main modules: Senpy core, which is the building block of the service, and Senpy plugins, which consist of the analysis algorithm. The next figure depicts a simplified version of the processes involved in an analysis with the Senpy framework.
|
|
||||||
|
|
||||||
.. image:: senpy-architecture.png
|
.. image:: senpy-architecture.png
|
||||||
:width: 100%
|
:width: 100%
|
||||||
:align: center
|
:align: center
|
||||||
|
|
||||||
|
All services built using senpy share a common interface.
|
||||||
|
This allows users to use them (almost) interchangeably, with the same API and tools, simply by pointing to a different URL or changing a parameter.
|
||||||
|
The common schema also makes it easier to evaluate the performance of different algorithms and services.
|
||||||
|
In fact, Senpy has a built-in evaluation API you can use to compare results with different algorithms.
|
||||||
|
|
||||||
|
Services can also use the common interface to communicate with each other.
|
||||||
|
And higher level features can be built on top of these services, such as automatic fusion of results, emotion model conversion, and service discovery.
|
||||||
|
|
||||||
|
These benefits are not limited to new services.
|
||||||
|
The community has developed wrappers for some proprietary and commercial services (such as sentiment140 and Meaning Cloud), so you can consult them as.
|
||||||
|
Senpy comes with a built-in client in the client package.
|
||||||
|
|
||||||
|
|
||||||
|
To achieve this goal, Senpy uses a Linked Data principled approach, based on the NIF (NLP Interchange Format) specification, and open vocabularies such as Marl and Onyx.
|
||||||
|
You can learn more about this in :doc:`vocabularies`.
|
||||||
|
|
||||||
|
Check out :doc:`development` if you have developed an analysis algorithm (e.g. sentiment analysis) and you want to publish it as a service.
|
||||||
|
@@ -1,14 +1,18 @@
|
|||||||
Server
|
Command line tool
|
||||||
======
|
=================
|
||||||
|
|
||||||
|
Basic usage
|
||||||
|
-----------
|
||||||
|
|
||||||
The senpy server is launched via the `senpy` command:
|
The senpy server is launched via the `senpy` command:
|
||||||
|
|
||||||
.. code:: text
|
.. code:: text
|
||||||
|
|
||||||
usage: senpy [-h] [--level logging_level] [--debug] [--default-plugins]
|
usage: senpy [-h] [--level logging_level] [--log-format log_format] [--debug]
|
||||||
[--host HOST] [--port PORT] [--plugins-folder PLUGINS_FOLDER]
|
[--no-default-plugins] [--host HOST] [--port PORT]
|
||||||
[--only-install] [--only-list] [--data-folder DATA_FOLDER]
|
[--plugins-folder PLUGINS_FOLDER] [--only-install] [--only-test]
|
||||||
[--threaded] [--version]
|
[--test] [--only-list] [--data-folder DATA_FOLDER]
|
||||||
|
[--no-threaded] [--no-deps] [--version] [--allow-fail]
|
||||||
|
|
||||||
Run a Senpy server
|
Run a Senpy server
|
||||||
|
|
||||||
@@ -16,20 +20,25 @@ The senpy server is launched via the `senpy` command:
|
|||||||
-h, --help show this help message and exit
|
-h, --help show this help message and exit
|
||||||
--level logging_level, -l logging_level
|
--level logging_level, -l logging_level
|
||||||
Logging level
|
Logging level
|
||||||
|
--log-format log_format
|
||||||
|
Logging format
|
||||||
--debug, -d Run the application in debug mode
|
--debug, -d Run the application in debug mode
|
||||||
--default-plugins Load the default plugins
|
--no-default-plugins Do not load the default plugins
|
||||||
--host HOST Use 0.0.0.0 to accept requests from any host.
|
--host HOST Use 0.0.0.0 to accept requests from any host.
|
||||||
--port PORT, -p PORT Port to listen on.
|
--port PORT, -p PORT Port to listen on.
|
||||||
--plugins-folder PLUGINS_FOLDER, -f PLUGINS_FOLDER
|
--plugins-folder PLUGINS_FOLDER, -f PLUGINS_FOLDER
|
||||||
Where to look for plugins.
|
Where to look for plugins.
|
||||||
--only-install, -i Do not run a server, only install plugin dependencies
|
--only-install, -i Do not run a server, only install plugin dependencies
|
||||||
|
--only-test Do not run a server, just test all plugins
|
||||||
|
--test, -t Test all plugins before launching the server
|
||||||
--only-list, --list Do not run a server, only list plugins found
|
--only-list, --list Do not run a server, only list plugins found
|
||||||
--data-folder DATA_FOLDER, --data DATA_FOLDER
|
--data-folder DATA_FOLDER, --data DATA_FOLDER
|
||||||
Where to look for data. It be set with the SENPY_DATA
|
Where to look for data. It be set with the SENPY_DATA
|
||||||
environment variable as well.
|
environment variable as well.
|
||||||
--threaded Run a threaded server
|
--no-threaded Run the server without threading
|
||||||
|
--no-deps, -n Skip installing dependencies
|
||||||
--version, -v Output the senpy version and exit
|
--version, -v Output the senpy version and exit
|
||||||
|
--allow-fail, --fail Do not exit if some plugins fail to activate
|
||||||
|
|
||||||
|
|
||||||
When launched, the server will recursively look for plugins in the specified plugins folder (the current working directory by default).
|
When launched, the server will recursively look for plugins in the specified plugins folder (the current working directory by default).
|
||||||
@@ -40,9 +49,9 @@ Let's run senpy with the default plugins:
|
|||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
senpy -f . --default-plugins
|
senpy -f .
|
||||||
|
|
||||||
Now go to `http://localhost:5000 <http://localhost:5000>`_, you should be greeted by the senpy playground:
|
Now open your browser and go to `http://localhost:5000 <http://localhost:5000>`_, where you should be greeted by the senpy playground:
|
||||||
|
|
||||||
.. image:: senpy-playground.png
|
.. image:: senpy-playground.png
|
||||||
:width: 100%
|
:width: 100%
|
||||||
@@ -51,9 +60,9 @@ Now go to `http://localhost:5000 <http://localhost:5000>`_, you should be greete
|
|||||||
The playground is a user-friendly way to test your plugins, but you can always use the service directly: `http://localhost:5000/api?input=hello <http://localhost:5000/api?input=hello>`_.
|
The playground is a user-friendly way to test your plugins, but you can always use the service directly: `http://localhost:5000/api?input=hello <http://localhost:5000/api?input=hello>`_.
|
||||||
|
|
||||||
|
|
||||||
By default, senpy will listen only on the `127.0.0.1` address.
|
By default, senpy will listen only on `127.0.0.1`.
|
||||||
That means you can only access the API from your (or localhost).
|
That means you can only access the API from your PC (i.e. localhost).
|
||||||
You can listen on a different address using the `--host` flag (e.g., 0.0.0.0).
|
You can listen on a different address using the `--host` flag (e.g., 0.0.0.0, to allow any computer to access it).
|
||||||
The default port is 5000.
|
The default port is 5000.
|
||||||
You can change it with the `--port` flag.
|
You can change it with the `--port` flag.
|
||||||
|
|
||||||
@@ -64,3 +73,14 @@ For instance, to accept connections on port 6000 on any interface:
|
|||||||
senpy --host 0.0.0.0 --port 6000
|
senpy --host 0.0.0.0 --port 6000
|
||||||
|
|
||||||
For more options, see the `--help` page.
|
For more options, see the `--help` page.
|
||||||
|
|
||||||
|
Sentiment analysis in the command line
|
||||||
|
--------------------------------------
|
||||||
|
|
||||||
|
Although the main use of senpy is to publish services, the tool can also be used locally to analyze text in the command line.
|
||||||
|
This is a short video demonstration:
|
||||||
|
|
||||||
|
.. image:: https://asciinema.org/a/9uwef1ghkjk062cw2t4mhzpyk.png
|
||||||
|
:width: 100%
|
||||||
|
:target: https://asciinema.org/a/9uwef1ghkjk062cw2t4mhzpyk
|
||||||
|
:alt: CLI demo
|
@@ -1,15 +0,0 @@
|
|||||||
Usage
|
|
||||||
-----
|
|
||||||
|
|
||||||
First of all, you need to install the package.
|
|
||||||
See :doc:`installation` for instructions.
|
|
||||||
Once installed, the `senpy` command should be available.
|
|
||||||
|
|
||||||
.. toctree::
|
|
||||||
:maxdepth: 1
|
|
||||||
|
|
||||||
server
|
|
||||||
SenpyClientUse
|
|
||||||
commandline
|
|
||||||
|
|
||||||
|
|
@@ -1,6 +1,6 @@
|
|||||||
This is a collection of plugins that exemplify certain aspects of plugin development with senpy.
|
This is a collection of plugins that exemplify certain aspects of plugin development with senpy.
|
||||||
|
|
||||||
The first series of plugins the `basic` ones.
|
The first series of plugins are the `basic` ones.
|
||||||
Their starting point is a classification function defined in `basic.py`.
|
Their starting point is a classification function defined in `basic.py`.
|
||||||
They all include testing and running them as a script will run all tests.
|
They all include testing and running them as a script will run all tests.
|
||||||
In ascending order of customization, the plugins are:
|
In ascending order of customization, the plugins are:
|
||||||
@@ -19,5 +19,5 @@ In rest of the plugins show advanced topics:
|
|||||||
|
|
||||||
All of the plugins in this folder include a set of test cases and they are periodically tested with the latest version of senpy.
|
All of the plugins in this folder include a set of test cases and they are periodically tested with the latest version of senpy.
|
||||||
|
|
||||||
Additioanlly, for an example of stand-alone plugin that can be tested and deployed with docker, take a look at: lab.cluster.gsi.dit.upm.es/senpy/plugin-example
|
Additioanlly, for an example of stand-alone plugin that can be tested and deployed with docker, take a look at: lab.gsi.upm.es/senpy/plugin-example
|
||||||
bbm
|
bbm
|
||||||
|
@@ -1,3 +1,19 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
from senpy import AnalysisPlugin
|
from senpy import AnalysisPlugin
|
||||||
|
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
@@ -11,7 +27,7 @@ class Async(AnalysisPlugin):
|
|||||||
'''An example of an asynchronous module'''
|
'''An example of an asynchronous module'''
|
||||||
author = '@balkian'
|
author = '@balkian'
|
||||||
version = '0.2'
|
version = '0.2'
|
||||||
async = True
|
sync = False
|
||||||
|
|
||||||
def _do_async(self, num_processes):
|
def _do_async(self, num_processes):
|
||||||
pool = multiprocessing.Pool(processes=num_processes)
|
pool = multiprocessing.Pool(processes=num_processes)
|
||||||
|
@@ -1,5 +1,21 @@
|
|||||||
#!/usr/local/bin/python
|
#!/usr/local/bin/python
|
||||||
# coding: utf-8
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
emoticons = {
|
emoticons = {
|
||||||
'pos': [':)', ':]', '=)', ':D'],
|
'pos': [':)', ':]', '=)', ':D'],
|
||||||
@@ -7,17 +23,19 @@ emoticons = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
emojis = {
|
emojis = {
|
||||||
'pos': ['😁', '😂', '😃', '😄', '😆', '😅', '😄' '😍'],
|
'pos': [u'😁', u'😂', u'😃', u'😄', u'😆', u'😅', u'😄', u'😍'],
|
||||||
'neg': ['😢', '😡', '😠', '😞', '😖', '😔', '😓', '😒']
|
'neg': [u'😢', u'😡', u'😠', u'😞', u'😖', u'😔', u'😓', u'😒']
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def get_polarity(text, dictionaries=[emoticons, emojis]):
|
def get_polarity(text, dictionaries=[emoticons, emojis]):
|
||||||
polarity = 'marl:Neutral'
|
polarity = 'marl:Neutral'
|
||||||
|
print('Input for get_polarity', text)
|
||||||
for dictionary in dictionaries:
|
for dictionary in dictionaries:
|
||||||
for label, values in dictionary.items():
|
for label, values in dictionary.items():
|
||||||
for emoticon in values:
|
for emoticon in values:
|
||||||
if emoticon and emoticon in text:
|
if emoticon and emoticon in text:
|
||||||
polarity = label
|
polarity = label
|
||||||
break
|
break
|
||||||
|
print('Polarity', polarity)
|
||||||
return polarity
|
return polarity
|
||||||
|
@@ -1,5 +1,20 @@
|
|||||||
#!/usr/local/bin/python
|
#!/usr/local/bin/python
|
||||||
# coding: utf-8
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
from senpy import easy_test, models, plugins
|
from senpy import easy_test, models, plugins
|
||||||
|
|
||||||
@@ -18,13 +33,13 @@ class BasicAnalyseEntry(plugins.SentimentPlugin):
|
|||||||
'default': 'marl:Neutral'
|
'default': 'marl:Neutral'
|
||||||
}
|
}
|
||||||
|
|
||||||
def analyse_entry(self, entry, params):
|
def analyse_entry(self, entry, activity):
|
||||||
polarity = basic.get_polarity(entry.text)
|
polarity = basic.get_polarity(entry.text)
|
||||||
|
|
||||||
polarity = self.mappings.get(polarity, self.mappings['default'])
|
polarity = self.mappings.get(polarity, self.mappings['default'])
|
||||||
|
|
||||||
s = models.Sentiment(marl__hasPolarity=polarity)
|
s = models.Sentiment(marl__hasPolarity=polarity)
|
||||||
s.prov(self)
|
s.prov(activity)
|
||||||
entry.sentiments.append(s)
|
entry.sentiments.append(s)
|
||||||
yield entry
|
yield entry
|
||||||
|
|
||||||
|
@@ -1,5 +1,20 @@
|
|||||||
#!/usr/local/bin/python
|
#!/usr/local/bin/python
|
||||||
# coding: utf-8
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
from senpy import easy_test, SentimentBox
|
from senpy import easy_test, SentimentBox
|
||||||
|
|
||||||
@@ -12,15 +27,13 @@ class BasicBox(SentimentBox):
|
|||||||
author = '@balkian'
|
author = '@balkian'
|
||||||
version = '0.1'
|
version = '0.1'
|
||||||
|
|
||||||
mappings = {
|
def predict_one(self, features, **kwargs):
|
||||||
'pos': 'marl:Positive',
|
output = basic.get_polarity(features[0])
|
||||||
'neg': 'marl:Negative',
|
if output == 'pos':
|
||||||
'default': 'marl:Neutral'
|
return [1, 0, 0]
|
||||||
}
|
if output == 'neg':
|
||||||
|
return [0, 0, 1]
|
||||||
def predict_one(self, input):
|
return [0, 1, 0]
|
||||||
output = basic.get_polarity(input)
|
|
||||||
return self.mappings.get(output, self.mappings['default'])
|
|
||||||
|
|
||||||
test_cases = [{
|
test_cases = [{
|
||||||
'input': 'Hello :)',
|
'input': 'Hello :)',
|
||||||
|
@@ -1,37 +1,52 @@
|
|||||||
#!/usr/local/bin/python
|
#!/usr/local/bin/python
|
||||||
# coding: utf-8
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
from senpy import easy_test, SentimentBox, MappingMixin
|
|
||||||
|
from senpy import easy_test, SentimentBox
|
||||||
|
|
||||||
import basic
|
import basic
|
||||||
|
|
||||||
|
|
||||||
class Basic(MappingMixin, SentimentBox):
|
class Basic(SentimentBox):
|
||||||
'''Provides sentiment annotation using a lexicon'''
|
'''Provides sentiment annotation using a lexicon'''
|
||||||
|
|
||||||
author = '@balkian'
|
author = '@balkian'
|
||||||
version = '0.1'
|
version = '0.1'
|
||||||
|
|
||||||
mappings = {
|
def predict_one(self, features, **kwargs):
|
||||||
'pos': 'marl:Positive',
|
output = basic.get_polarity(features[0])
|
||||||
'neg': 'marl:Negative',
|
if output == 'pos':
|
||||||
'default': 'marl:Neutral'
|
return [1, 0, 0]
|
||||||
}
|
if output == 'neu':
|
||||||
|
return [0, 1, 0]
|
||||||
def predict_one(self, input):
|
return [0, 0, 1]
|
||||||
return basic.get_polarity(input)
|
|
||||||
|
|
||||||
test_cases = [{
|
test_cases = [{
|
||||||
'input': 'Hello :)',
|
'input': u'Hello :)',
|
||||||
'polarity': 'marl:Positive'
|
'polarity': 'marl:Positive'
|
||||||
}, {
|
}, {
|
||||||
'input': 'So sad :(',
|
'input': u'So sad :(',
|
||||||
'polarity': 'marl:Negative'
|
'polarity': 'marl:Negative'
|
||||||
}, {
|
}, {
|
||||||
'input': 'Yay! Emojis 😁',
|
'input': u'Yay! Emojis 😁',
|
||||||
'polarity': 'marl:Positive'
|
'polarity': 'marl:Positive'
|
||||||
}, {
|
}, {
|
||||||
'input': 'But no emoticons 😢',
|
'input': u'But no emoticons 😢',
|
||||||
'polarity': 'marl:Negative'
|
'polarity': 'marl:Negative'
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
@@ -1,5 +1,21 @@
|
|||||||
#!/usr/local/bin/python
|
#!/usr/local/bin/python
|
||||||
# coding: utf-8
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
from senpy import easy_test, models, plugins
|
from senpy import easy_test, models, plugins
|
||||||
|
|
||||||
@@ -16,7 +32,7 @@ class Dictionary(plugins.SentimentPlugin):
|
|||||||
|
|
||||||
mappings = {'pos': 'marl:Positive', 'neg': 'marl:Negative'}
|
mappings = {'pos': 'marl:Positive', 'neg': 'marl:Negative'}
|
||||||
|
|
||||||
def analyse_entry(self, entry, params):
|
def analyse_entry(self, entry, *args, **kwargs):
|
||||||
polarity = basic.get_polarity(entry.text, self.dictionaries)
|
polarity = basic.get_polarity(entry.text, self.dictionaries)
|
||||||
if polarity in self.mappings:
|
if polarity in self.mappings:
|
||||||
polarity = self.mappings[polarity]
|
polarity = self.mappings[polarity]
|
||||||
@@ -43,7 +59,6 @@ class Dictionary(plugins.SentimentPlugin):
|
|||||||
|
|
||||||
class EmojiOnly(Dictionary):
|
class EmojiOnly(Dictionary):
|
||||||
'''Sentiment annotation with a basic lexicon of emojis'''
|
'''Sentiment annotation with a basic lexicon of emojis'''
|
||||||
description = 'A plugin'
|
|
||||||
dictionaries = [basic.emojis]
|
dictionaries = [basic.emojis]
|
||||||
|
|
||||||
test_cases = [{
|
test_cases = [{
|
||||||
|
@@ -1,3 +1,19 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
from senpy import AnalysisPlugin, easy
|
from senpy import AnalysisPlugin, easy
|
||||||
|
|
||||||
|
|
||||||
|
@@ -1,3 +1,19 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
from senpy import AnalysisPlugin, easy
|
from senpy import AnalysisPlugin, easy
|
||||||
|
|
||||||
|
|
||||||
|
@@ -1,3 +1,19 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
import random
|
import random
|
||||||
|
|
||||||
from senpy.plugins import EmotionPlugin
|
from senpy.plugins import EmotionPlugin
|
||||||
@@ -6,12 +22,13 @@ from senpy.models import EmotionSet, Emotion, Entry
|
|||||||
|
|
||||||
class EmoRand(EmotionPlugin):
|
class EmoRand(EmotionPlugin):
|
||||||
'''A sample plugin that returns a random emotion annotation'''
|
'''A sample plugin that returns a random emotion annotation'''
|
||||||
|
name = 'emotion-random'
|
||||||
author = '@balkian'
|
author = '@balkian'
|
||||||
version = '0.1'
|
version = '0.1'
|
||||||
url = "https://github.com/gsi-upm/senpy-plugins-community"
|
url = "https://github.com/gsi-upm/senpy-plugins-community"
|
||||||
onyx__usesEmotionModel = "emoml:big6"
|
onyx__usesEmotionModel = "emoml:big6"
|
||||||
|
|
||||||
def analyse_entry(self, entry, params):
|
def analyse_entry(self, entry, activity):
|
||||||
category = "emoml:big6happiness"
|
category = "emoml:big6happiness"
|
||||||
number = max(-1, min(1, random.gauss(0, 0.5)))
|
number = max(-1, min(1, random.gauss(0, 0.5)))
|
||||||
if number > 0:
|
if number > 0:
|
||||||
@@ -19,7 +36,7 @@ class EmoRand(EmotionPlugin):
|
|||||||
emotionSet = EmotionSet()
|
emotionSet = EmotionSet()
|
||||||
emotion = Emotion({"onyx:hasEmotionCategory": category})
|
emotion = Emotion({"onyx:hasEmotionCategory": category})
|
||||||
emotionSet.onyx__hasEmotion.append(emotion)
|
emotionSet.onyx__hasEmotion.append(emotion)
|
||||||
emotionSet.prov__wasGeneratedBy = self.id
|
emotionSet.prov(activity)
|
||||||
entry.emotions.append(emotionSet)
|
entry.emotions.append(emotionSet)
|
||||||
yield entry
|
yield entry
|
||||||
|
|
||||||
@@ -27,6 +44,6 @@ class EmoRand(EmotionPlugin):
|
|||||||
params = dict()
|
params = dict()
|
||||||
results = list()
|
results = list()
|
||||||
for i in range(100):
|
for i in range(100):
|
||||||
res = next(self.analyse_entry(Entry(nif__isString="Hello"), params))
|
res = next(self.analyse_entry(Entry(nif__isString="Hello"), self.activity(params)))
|
||||||
res.validate()
|
res.validate()
|
||||||
results.append(res.emotions[0]['onyx:hasEmotion'][0]['onyx:hasEmotionCategory'])
|
results.append(res.emotions[0]['onyx:hasEmotion'][0]['onyx:hasEmotionCategory'])
|
@@ -1,3 +1,19 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
import noop
|
import noop
|
||||||
from senpy.plugins import SentimentPlugin
|
from senpy.plugins import SentimentPlugin
|
||||||
|
|
||||||
|
@@ -1,5 +1,21 @@
|
|||||||
#!/usr/local/bin/python
|
#!/usr/local/bin/python
|
||||||
# coding: utf-8
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
from senpy import easy_test, models, plugins
|
from senpy import easy_test, models, plugins
|
||||||
|
|
||||||
@@ -25,7 +41,8 @@ class ParameterizedDictionary(plugins.SentimentPlugin):
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
def analyse_entry(self, entry, params):
|
def analyse_entry(self, entry, activity):
|
||||||
|
params = activity.params
|
||||||
positive_words = params['positive-words'].split(',')
|
positive_words = params['positive-words'].split(',')
|
||||||
negative_words = params['negative-words'].split(',')
|
negative_words = params['negative-words'].split(',')
|
||||||
dictionary = {
|
dictionary = {
|
||||||
@@ -35,7 +52,7 @@ class ParameterizedDictionary(plugins.SentimentPlugin):
|
|||||||
polarity = basic.get_polarity(entry.text, [dictionary])
|
polarity = basic.get_polarity(entry.text, [dictionary])
|
||||||
|
|
||||||
s = models.Sentiment(marl__hasPolarity=polarity)
|
s = models.Sentiment(marl__hasPolarity=polarity)
|
||||||
s.prov(self)
|
s.prov(activity)
|
||||||
entry.sentiments.append(s)
|
entry.sentiments.append(s)
|
||||||
yield entry
|
yield entry
|
||||||
|
|
||||||
|
@@ -1,16 +1,33 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
import random
|
import random
|
||||||
from senpy import SentimentPlugin, Sentiment, Entry
|
from senpy import SentimentPlugin, Sentiment, Entry
|
||||||
|
|
||||||
|
|
||||||
class Rand(SentimentPlugin):
|
class RandSent(SentimentPlugin):
|
||||||
'''A sample plugin that returns a random sentiment annotation'''
|
'''A sample plugin that returns a random sentiment annotation'''
|
||||||
|
name = 'sentiment-random'
|
||||||
author = "@balkian"
|
author = "@balkian"
|
||||||
version = '0.1'
|
version = '0.1'
|
||||||
url = "https://github.com/gsi-upm/senpy-plugins-community"
|
url = "https://github.com/gsi-upm/senpy-plugins-community"
|
||||||
marl__maxPolarityValue = '1'
|
marl__maxPolarityValue = '1'
|
||||||
marl__minPolarityValue = "-1"
|
marl__minPolarityValue = "-1"
|
||||||
|
|
||||||
def analyse_entry(self, entry, params):
|
def analyse_entry(self, entry, activity):
|
||||||
polarity_value = max(-1, min(1, random.gauss(0.2, 0.2)))
|
polarity_value = max(-1, min(1, random.gauss(0.2, 0.2)))
|
||||||
polarity = "marl:Neutral"
|
polarity = "marl:Neutral"
|
||||||
if polarity_value > 0:
|
if polarity_value > 0:
|
||||||
@@ -19,7 +36,7 @@ class Rand(SentimentPlugin):
|
|||||||
polarity = "marl:Negative"
|
polarity = "marl:Negative"
|
||||||
sentiment = Sentiment(marl__hasPolarity=polarity,
|
sentiment = Sentiment(marl__hasPolarity=polarity,
|
||||||
marl__polarityValue=polarity_value)
|
marl__polarityValue=polarity_value)
|
||||||
sentiment.prov(self)
|
sentiment.prov(activity)
|
||||||
entry.sentiments.append(sentiment)
|
entry.sentiments.append(sentiment)
|
||||||
yield entry
|
yield entry
|
||||||
|
|
||||||
@@ -28,8 +45,9 @@ class Rand(SentimentPlugin):
|
|||||||
params = dict()
|
params = dict()
|
||||||
results = list()
|
results = list()
|
||||||
for i in range(50):
|
for i in range(50):
|
||||||
|
activity = self.activity(params)
|
||||||
res = next(self.analyse_entry(Entry(nif__isString="Hello"),
|
res = next(self.analyse_entry(Entry(nif__isString="Hello"),
|
||||||
params))
|
activity))
|
||||||
res.validate()
|
res.validate()
|
||||||
results.append(res.sentiments[0]['marl:hasPolarity'])
|
results.append(res.sentiments[0]['marl:hasPolarity'])
|
||||||
assert 'marl:Positive' in results
|
assert 'marl:Positive' in results
|
@@ -1,3 +1,19 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
'''
|
'''
|
||||||
Create a dummy dataset.
|
Create a dummy dataset.
|
||||||
Messages with a happy emoticon are labelled positive
|
Messages with a happy emoticon are labelled positive
|
||||||
|
@@ -1,3 +1,19 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
from sklearn.pipeline import Pipeline
|
from sklearn.pipeline import Pipeline
|
||||||
from sklearn.feature_extraction.text import CountVectorizer
|
from sklearn.feature_extraction.text import CountVectorizer
|
||||||
from sklearn.model_selection import train_test_split
|
from sklearn.model_selection import train_test_split
|
||||||
|
@@ -1,25 +1,36 @@
|
|||||||
from senpy import SentimentBox, MappingMixin, easy_test
|
#
|
||||||
|
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from senpy import SentimentBox, easy_test
|
||||||
|
|
||||||
from mypipeline import pipeline
|
from mypipeline import pipeline
|
||||||
|
|
||||||
|
|
||||||
class PipelineSentiment(MappingMixin, SentimentBox):
|
class PipelineSentiment(SentimentBox):
|
||||||
'''
|
'''This is a pipeline plugin that wraps a classifier defined in another module
|
||||||
This is a pipeline plugin that wraps a classifier defined in another module
|
(mypipeline).'''
|
||||||
(mypipeline).
|
|
||||||
'''
|
|
||||||
author = '@balkian'
|
author = '@balkian'
|
||||||
version = 0.1
|
version = 0.1
|
||||||
maxPolarityValue = 1
|
maxPolarityValue = 1
|
||||||
minPolarityValue = -1
|
minPolarityValue = -1
|
||||||
|
|
||||||
mappings = {
|
def predict_one(self, features, **kwargs):
|
||||||
1: 'marl:Positive',
|
if pipeline.predict(features) > 0:
|
||||||
-1: 'marl:Negative'
|
return [1, 0, 0]
|
||||||
}
|
return [0, 0, 1]
|
||||||
|
|
||||||
def predict_one(self, input):
|
|
||||||
return pipeline.predict([input, ])[0]
|
|
||||||
|
|
||||||
test_cases = [
|
test_cases = [
|
||||||
{
|
{
|
||||||
|
@@ -1,3 +1,19 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
from senpy.plugins import AnalysisPlugin
|
from senpy.plugins import AnalysisPlugin
|
||||||
from time import sleep
|
from time import sleep
|
||||||
|
|
||||||
|
@@ -1 +1 @@
|
|||||||
gsitk
|
gsitk>0.1.9.1
|
||||||
|
@@ -15,8 +15,6 @@ spec:
|
|||||||
- name: senpy-latest
|
- name: senpy-latest
|
||||||
image: $IMAGEWTAG
|
image: $IMAGEWTAG
|
||||||
imagePullPolicy: Always
|
imagePullPolicy: Always
|
||||||
args:
|
|
||||||
- "--default-plugins"
|
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
memory: "512Mi"
|
memory: "512Mi"
|
||||||
|
@@ -12,3 +12,10 @@ spec:
|
|||||||
backend:
|
backend:
|
||||||
serviceName: senpy-latest
|
serviceName: senpy-latest
|
||||||
servicePort: 5000
|
servicePort: 5000
|
||||||
|
- host: latest.senpy.gsi.upm.es
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
backend:
|
||||||
|
serviceName: senpy-latest
|
||||||
|
servicePort: 5000
|
||||||
|
@@ -9,3 +9,8 @@ jsonref
|
|||||||
PyYAML
|
PyYAML
|
||||||
rdflib
|
rdflib
|
||||||
rdflib-jsonld
|
rdflib-jsonld
|
||||||
|
numpy
|
||||||
|
scipy
|
||||||
|
scikit-learn>=0.20
|
||||||
|
responses
|
||||||
|
jmespath
|
||||||
|
@@ -1,7 +1,7 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014 J. Fernando Sánchez Rada - Grupo de Sistemas Inteligentes
|
#
|
||||||
# DIT, UPM
|
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -14,6 +14,7 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
#
|
||||||
"""
|
"""
|
||||||
Sentiment analysis server in Python
|
Sentiment analysis server in Python
|
||||||
"""
|
"""
|
||||||
|
@@ -1,7 +1,6 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014 J. Fernando Sánchez Rada - Grupo de Sistemas Inteligentes
|
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||||
# DIT, UPM
|
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -40,8 +39,14 @@ def main():
|
|||||||
'-l',
|
'-l',
|
||||||
metavar='logging_level',
|
metavar='logging_level',
|
||||||
type=str,
|
type=str,
|
||||||
default="WARN",
|
default="INFO",
|
||||||
help='Logging level')
|
help='Logging level')
|
||||||
|
parser.add_argument(
|
||||||
|
'--log-format',
|
||||||
|
metavar='log_format',
|
||||||
|
type=str,
|
||||||
|
default='%(asctime)s %(levelname)-10s %(name)-30s \t %(message)s',
|
||||||
|
help='Logging format')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--debug',
|
'--debug',
|
||||||
'-d',
|
'-d',
|
||||||
@@ -49,10 +54,10 @@ def main():
|
|||||||
default=False,
|
default=False,
|
||||||
help='Run the application in debug mode')
|
help='Run the application in debug mode')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--default-plugins',
|
'--no-default-plugins',
|
||||||
action='store_true',
|
action='store_true',
|
||||||
default=False,
|
default=False,
|
||||||
help='Load the default plugins')
|
help='Do not load the default plugins')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--host',
|
'--host',
|
||||||
type=str,
|
type=str,
|
||||||
@@ -68,7 +73,7 @@ def main():
|
|||||||
'--plugins-folder',
|
'--plugins-folder',
|
||||||
'-f',
|
'-f',
|
||||||
type=str,
|
type=str,
|
||||||
default='.',
|
action='append',
|
||||||
help='Where to look for plugins.')
|
help='Where to look for plugins.')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--only-install',
|
'--only-install',
|
||||||
@@ -78,10 +83,15 @@ def main():
|
|||||||
help='Do not run a server, only install plugin dependencies')
|
help='Do not run a server, only install plugin dependencies')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--only-test',
|
'--only-test',
|
||||||
'-t',
|
|
||||||
action='store_true',
|
action='store_true',
|
||||||
default=False,
|
default=False,
|
||||||
help='Do not run a server, just test all plugins')
|
help='Do not run a server, just test all plugins')
|
||||||
|
parser.add_argument(
|
||||||
|
'--test',
|
||||||
|
'-t',
|
||||||
|
action='store_true',
|
||||||
|
default=False,
|
||||||
|
help='Test all plugins before launching the server')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--only-list',
|
'--only-list',
|
||||||
'--list',
|
'--list',
|
||||||
@@ -95,50 +105,85 @@ def main():
|
|||||||
default=None,
|
default=None,
|
||||||
help='Where to look for data. It be set with the SENPY_DATA environment variable as well.')
|
help='Where to look for data. It be set with the SENPY_DATA environment variable as well.')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--threaded',
|
'--no-threaded',
|
||||||
action='store_false',
|
action='store_true',
|
||||||
default=True,
|
default=False,
|
||||||
help='Run a threaded server')
|
help='Run a single-threaded server')
|
||||||
|
parser.add_argument(
|
||||||
|
'--no-deps',
|
||||||
|
'-n',
|
||||||
|
action='store_true',
|
||||||
|
default=False,
|
||||||
|
help='Skip installing dependencies')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--version',
|
'--version',
|
||||||
'-v',
|
'-v',
|
||||||
action='store_true',
|
action='store_true',
|
||||||
default=False,
|
default=False,
|
||||||
help='Output the senpy version and exit')
|
help='Output the senpy version and exit')
|
||||||
|
parser.add_argument(
|
||||||
|
'--allow-fail',
|
||||||
|
'--fail',
|
||||||
|
action='store_true',
|
||||||
|
default=False,
|
||||||
|
help='Do not exit if some plugins fail to activate')
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
print('Senpy version {}'.format(senpy.__version__))
|
||||||
|
print(sys.version)
|
||||||
if args.version:
|
if args.version:
|
||||||
print('Senpy version {}'.format(senpy.__version__))
|
|
||||||
print(sys.version)
|
|
||||||
exit(1)
|
exit(1)
|
||||||
rl = logging.getLogger()
|
rl = logging.getLogger()
|
||||||
rl.setLevel(getattr(logging, args.level))
|
rl.setLevel(getattr(logging, args.level))
|
||||||
|
logger_handler = rl.handlers[0]
|
||||||
|
|
||||||
|
# First, generic formatter:
|
||||||
|
logger_handler.setFormatter(logging.Formatter(args.log_format))
|
||||||
|
|
||||||
app = Flask(__name__)
|
app = Flask(__name__)
|
||||||
app.debug = args.debug
|
app.debug = args.debug
|
||||||
sp = Senpy(app, args.plugins_folder,
|
sp = Senpy(app,
|
||||||
default_plugins=args.default_plugins,
|
plugin_folder=None,
|
||||||
|
default_plugins=not args.no_default_plugins,
|
||||||
data_folder=args.data_folder)
|
data_folder=args.data_folder)
|
||||||
|
folders = list(args.plugins_folder) if args.plugins_folder else []
|
||||||
|
if not folders:
|
||||||
|
folders.append(".")
|
||||||
|
for p in folders:
|
||||||
|
sp.add_folder(p)
|
||||||
|
|
||||||
|
plugins = sp.plugins(plugin_type=None, is_activated=False)
|
||||||
|
maxname = max(len(x.name) for x in plugins)
|
||||||
|
maxversion = max(len(str(x.version)) for x in plugins)
|
||||||
|
print('Found {} plugins:'.format(len(plugins)))
|
||||||
|
for plugin in plugins:
|
||||||
|
import inspect
|
||||||
|
fpath = inspect.getfile(plugin.__class__)
|
||||||
|
print('\t{: <{maxname}} @ {: <{maxversion}} -> {}'.format(plugin.name,
|
||||||
|
plugin.version,
|
||||||
|
fpath,
|
||||||
|
maxname=maxname,
|
||||||
|
maxversion=maxversion))
|
||||||
if args.only_list:
|
if args.only_list:
|
||||||
plugins = sp.plugins()
|
|
||||||
maxwidth = max(len(x.id) for x in plugins)
|
|
||||||
for plugin in plugins:
|
|
||||||
import inspect
|
|
||||||
fpath = inspect.getfile(plugin.__class__)
|
|
||||||
print('{: <{width}} @ {}'.format(plugin.id, fpath, width=maxwidth))
|
|
||||||
return
|
return
|
||||||
sp.install_deps()
|
if not args.no_deps:
|
||||||
|
sp.install_deps()
|
||||||
if args.only_install:
|
if args.only_install:
|
||||||
return
|
return
|
||||||
sp.activate_all()
|
sp.activate_all(allow_fail=args.allow_fail)
|
||||||
if args.only_test:
|
if args.test or args.only_test:
|
||||||
easy_test(sp.plugins())
|
easy_test(sp.plugins(), debug=args.debug)
|
||||||
return
|
if args.only_test:
|
||||||
|
return
|
||||||
print('Senpy version {}'.format(senpy.__version__))
|
print('Senpy version {}'.format(senpy.__version__))
|
||||||
print('Server running on port %s:%d. Ctrl+C to quit' % (args.host,
|
print('Server running on port %s:%d. Ctrl+C to quit' % (args.host,
|
||||||
args.port))
|
args.port))
|
||||||
app.run(args.host,
|
try:
|
||||||
args.port,
|
app.run(args.host,
|
||||||
threaded=args.threaded,
|
args.port,
|
||||||
debug=app.debug)
|
threaded=not args.no_threaded,
|
||||||
|
debug=app.debug)
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print('Bye!')
|
||||||
sp.deactivate_all()
|
sp.deactivate_all()
|
||||||
|
|
||||||
|
|
||||||
|
295
senpy/api.py
295
senpy/api.py
@@ -1,26 +1,52 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
from future.utils import iteritems
|
from future.utils import iteritems
|
||||||
from .models import Error, Results, Entry, from_string
|
from .models import Error, Results, Entry, from_string
|
||||||
import logging
|
import logging
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
boolean = [True, False]
|
||||||
|
|
||||||
|
processors = {
|
||||||
|
'string_to_tuple': lambda p: p if isinstance(p, (tuple, list)) else tuple(p.split(','))
|
||||||
|
}
|
||||||
|
|
||||||
API_PARAMS = {
|
API_PARAMS = {
|
||||||
"algorithm": {
|
"algorithm": {
|
||||||
"aliases": ["algorithms", "a", "algo"],
|
"aliases": ["algorithms", "a", "algo"],
|
||||||
"required": False,
|
"required": True,
|
||||||
|
"default": 'default',
|
||||||
|
"processor": 'string_to_tuple',
|
||||||
"description": ("Algorithms that will be used to process the request."
|
"description": ("Algorithms that will be used to process the request."
|
||||||
"It may be a list of comma-separated names."),
|
"It may be a list of comma-separated names."),
|
||||||
},
|
},
|
||||||
"expanded-jsonld": {
|
"expanded-jsonld": {
|
||||||
"@id": "expanded-jsonld",
|
"@id": "expanded-jsonld",
|
||||||
"aliases": ["expanded"],
|
"description": "use JSON-LD expansion to get full URIs",
|
||||||
"options": "boolean",
|
"aliases": ["expanded", "expanded_jsonld"],
|
||||||
|
"options": boolean,
|
||||||
"required": True,
|
"required": True,
|
||||||
"default": False
|
"default": False
|
||||||
},
|
},
|
||||||
"with_parameters": {
|
"with-parameters": {
|
||||||
"aliases": ['withparameters',
|
"aliases": ['withparameters',
|
||||||
'with-parameters'],
|
'with_parameters'],
|
||||||
"options": "boolean",
|
"description": "include initial parameters in the response",
|
||||||
|
"options": boolean,
|
||||||
"default": False,
|
"default": False,
|
||||||
"required": True
|
"required": True
|
||||||
},
|
},
|
||||||
@@ -28,25 +54,110 @@ API_PARAMS = {
|
|||||||
"@id": "outformat",
|
"@id": "outformat",
|
||||||
"aliases": ["o"],
|
"aliases": ["o"],
|
||||||
"default": "json-ld",
|
"default": "json-ld",
|
||||||
|
"description": """The data can be semantically formatted (JSON-LD, turtle or n-triples),
|
||||||
|
given as a list of comma-separated fields (see the fields option) or constructed from a Jinja2
|
||||||
|
template (see the template option).""",
|
||||||
"required": True,
|
"required": True,
|
||||||
"options": ["json-ld", "turtle"],
|
"options": ["json-ld", "turtle", "ntriples"],
|
||||||
|
},
|
||||||
|
"template": {
|
||||||
|
"@id": "template",
|
||||||
|
"required": False,
|
||||||
|
"description": """Jinja2 template for the result. The input data for the template will
|
||||||
|
be the results as a dictionary.
|
||||||
|
For example:
|
||||||
|
|
||||||
|
Consider the results before templating:
|
||||||
|
|
||||||
|
```
|
||||||
|
[{
|
||||||
|
"@type": "entry",
|
||||||
|
"onyx:hasEmotionSet": [],
|
||||||
|
"nif:isString": "testing the template",
|
||||||
|
"marl:hasOpinion": [
|
||||||
|
{
|
||||||
|
"@type": "sentiment",
|
||||||
|
"marl:hasPolarity": "marl:Positive"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}]
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
And the template:
|
||||||
|
|
||||||
|
```
|
||||||
|
{% for entry in entries %}
|
||||||
|
{{ entry["nif:isString"] | upper }},{{entry.sentiments[0]["marl:hasPolarity"].split(":")[1]}}
|
||||||
|
{% endfor %}
|
||||||
|
```
|
||||||
|
|
||||||
|
The final result would be:
|
||||||
|
|
||||||
|
```
|
||||||
|
TESTING THE TEMPLATE,Positive
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
|
||||||
|
},
|
||||||
|
"fields": {
|
||||||
|
"@id": "fields",
|
||||||
|
"required": False,
|
||||||
|
"description": """A jmespath selector, that can be used to extract a new dictionary, array or value
|
||||||
|
from the results.
|
||||||
|
jmespath is a powerful query language for json and/or dictionaries.
|
||||||
|
It allows you to change the structure (and data) of your objects through queries.
|
||||||
|
|
||||||
|
e.g., the following expression gets a list of `[emotion label, intensity]` for each entry:
|
||||||
|
`entries[]."onyx:hasEmotionSet"[]."onyx:hasEmotion"[]["onyx:hasEmotionCategory","onyx:hasEmotionIntensity"]`
|
||||||
|
|
||||||
|
For more information, see: https://jmespath.org
|
||||||
|
|
||||||
|
"""
|
||||||
},
|
},
|
||||||
"help": {
|
"help": {
|
||||||
"@id": "help",
|
"@id": "help",
|
||||||
"description": "Show additional help to know more about the possible parameters",
|
"description": "Show additional help to know more about the possible parameters",
|
||||||
"aliases": ["h"],
|
"aliases": ["h"],
|
||||||
"required": True,
|
"required": True,
|
||||||
"options": "boolean",
|
"options": boolean,
|
||||||
"default": False
|
"default": False
|
||||||
},
|
},
|
||||||
"emotionModel": {
|
"verbose": {
|
||||||
|
"@id": "verbose",
|
||||||
|
"description": "Show all properties in the result",
|
||||||
|
"aliases": ["v"],
|
||||||
|
"required": True,
|
||||||
|
"options": boolean,
|
||||||
|
"default": False
|
||||||
|
},
|
||||||
|
"aliases": {
|
||||||
|
"@id": "aliases",
|
||||||
|
"description": "Replace JSON properties with their aliases",
|
||||||
|
"aliases": [],
|
||||||
|
"required": True,
|
||||||
|
"options": boolean,
|
||||||
|
"default": False
|
||||||
|
},
|
||||||
|
"emotion-model": {
|
||||||
"@id": "emotionModel",
|
"@id": "emotionModel",
|
||||||
"aliases": ["emoModel"],
|
"description": """Emotion model to use in the response.
|
||||||
|
Senpy will try to convert the output to this model automatically.
|
||||||
|
|
||||||
|
Examples: `wna:liking` and `emoml:big6`.
|
||||||
|
""",
|
||||||
|
"aliases": ["emoModel", "emotionModel"],
|
||||||
"required": False
|
"required": False
|
||||||
},
|
},
|
||||||
"conversion": {
|
"conversion": {
|
||||||
"@id": "conversion",
|
"@id": "conversion",
|
||||||
"description": "How to show the elements that have (not) been converted",
|
"description": """How to show the elements that have (not) been converted.
|
||||||
|
|
||||||
|
* full: converted and original elements will appear side-by-side
|
||||||
|
* filtered: only converted elements will be shown
|
||||||
|
* nested: converted elements will be shown, and they will include a link to the original element
|
||||||
|
(using `prov:wasGeneratedBy`).
|
||||||
|
""",
|
||||||
"required": True,
|
"required": True,
|
||||||
"options": ["filtered", "nested", "full"],
|
"options": ["filtered", "nested", "full"],
|
||||||
"default": "full"
|
"default": "full"
|
||||||
@@ -56,9 +167,10 @@ API_PARAMS = {
|
|||||||
EVAL_PARAMS = {
|
EVAL_PARAMS = {
|
||||||
"algorithm": {
|
"algorithm": {
|
||||||
"aliases": ["plug", "p", "plugins", "algorithms", 'algo', 'a', 'plugin'],
|
"aliases": ["plug", "p", "plugins", "algorithms", 'algo', 'a', 'plugin'],
|
||||||
"description": "Plugins to be evaluated",
|
"description": "Plugins to evaluate",
|
||||||
"required": True,
|
"required": True,
|
||||||
"help": "See activated plugins in /plugins"
|
"help": "See activated plugins in /plugins",
|
||||||
|
"processor": API_PARAMS['algorithm']['processor']
|
||||||
},
|
},
|
||||||
"dataset": {
|
"dataset": {
|
||||||
"aliases": ["datasets", "data", "d"],
|
"aliases": ["datasets", "data", "d"],
|
||||||
@@ -69,27 +181,28 @@ EVAL_PARAMS = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
PLUGINS_PARAMS = {
|
PLUGINS_PARAMS = {
|
||||||
"plugin_type": {
|
"plugin-type": {
|
||||||
"@id": "pluginType",
|
"@id": "pluginType",
|
||||||
"description": 'What kind of plugins to list',
|
"description": 'What kind of plugins to list',
|
||||||
"aliases": ["pluginType"],
|
"aliases": ["pluginType", "plugin_type"],
|
||||||
"required": True,
|
"required": True,
|
||||||
"default": 'analysisPlugin'
|
"default": 'analysisPlugin'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
WEB_PARAMS = {
|
WEB_PARAMS = {
|
||||||
"inHeaders": {
|
"in-headers": {
|
||||||
"aliases": ["headers"],
|
"aliases": ["headers", "inheaders", "inHeaders", "in-headers", "in_headers"],
|
||||||
|
"description": "Only include the JSON-LD context in the headers",
|
||||||
"required": True,
|
"required": True,
|
||||||
"default": False,
|
"default": False,
|
||||||
"options": "boolean"
|
"options": boolean
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
CLI_PARAMS = {
|
CLI_PARAMS = {
|
||||||
"plugin_folder": {
|
"plugin-folder": {
|
||||||
"aliases": ["folder"],
|
"aliases": ["folder", "plugin_folder"],
|
||||||
"required": True,
|
"required": True,
|
||||||
"default": "."
|
"default": "."
|
||||||
},
|
},
|
||||||
@@ -104,6 +217,7 @@ NIF_PARAMS = {
|
|||||||
},
|
},
|
||||||
"intype": {
|
"intype": {
|
||||||
"@id": "intype",
|
"@id": "intype",
|
||||||
|
"description": "input type",
|
||||||
"aliases": ["t"],
|
"aliases": ["t"],
|
||||||
"required": False,
|
"required": False,
|
||||||
"default": "direct",
|
"default": "direct",
|
||||||
@@ -111,6 +225,7 @@ NIF_PARAMS = {
|
|||||||
},
|
},
|
||||||
"informat": {
|
"informat": {
|
||||||
"@id": "informat",
|
"@id": "informat",
|
||||||
|
"description": "input format",
|
||||||
"aliases": ["f"],
|
"aliases": ["f"],
|
||||||
"required": False,
|
"required": False,
|
||||||
"default": "text",
|
"default": "text",
|
||||||
@@ -118,24 +233,36 @@ NIF_PARAMS = {
|
|||||||
},
|
},
|
||||||
"language": {
|
"language": {
|
||||||
"@id": "language",
|
"@id": "language",
|
||||||
|
"description": "language of the input",
|
||||||
"aliases": ["l"],
|
"aliases": ["l"],
|
||||||
"required": False,
|
"required": False,
|
||||||
},
|
},
|
||||||
"prefix": {
|
"prefix": {
|
||||||
"@id": "prefix",
|
"@id": "prefix",
|
||||||
|
"description": "prefix to use for new entities",
|
||||||
"aliases": ["p"],
|
"aliases": ["p"],
|
||||||
"required": True,
|
"required": True,
|
||||||
"default": "",
|
"default": "",
|
||||||
},
|
},
|
||||||
"urischeme": {
|
"urischeme": {
|
||||||
"@id": "urischeme",
|
"@id": "urischeme",
|
||||||
|
"description": "scheme for NIF URIs",
|
||||||
"aliases": ["u"],
|
"aliases": ["u"],
|
||||||
"required": False,
|
"required": False,
|
||||||
"default": "RFC5147String",
|
"default": "RFC5147String",
|
||||||
"options": "RFC5147String"
|
"options": ["RFC5147String", ]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BUILTIN_PARAMS = {}
|
||||||
|
|
||||||
|
for d in [
|
||||||
|
NIF_PARAMS, CLI_PARAMS, WEB_PARAMS, PLUGINS_PARAMS, EVAL_PARAMS,
|
||||||
|
API_PARAMS
|
||||||
|
]:
|
||||||
|
for k, v in d.items():
|
||||||
|
BUILTIN_PARAMS[k] = v
|
||||||
|
|
||||||
|
|
||||||
def parse_params(indict, *specs):
|
def parse_params(indict, *specs):
|
||||||
if not specs:
|
if not specs:
|
||||||
@@ -147,10 +274,10 @@ def parse_params(indict, *specs):
|
|||||||
for param, options in iteritems(spec):
|
for param, options in iteritems(spec):
|
||||||
for alias in options.get("aliases", []):
|
for alias in options.get("aliases", []):
|
||||||
# Replace each alias with the correct name of the parameter
|
# Replace each alias with the correct name of the parameter
|
||||||
if alias in indict and alias is not param:
|
if alias in indict and alias != param:
|
||||||
outdict[param] = indict[alias]
|
outdict[param] = indict[alias]
|
||||||
del outdict[alias]
|
del outdict[alias]
|
||||||
continue
|
break
|
||||||
if param not in outdict:
|
if param not in outdict:
|
||||||
if "default" in options:
|
if "default" in options:
|
||||||
# We assume the default is correct
|
# We assume the default is correct
|
||||||
@@ -158,9 +285,11 @@ def parse_params(indict, *specs):
|
|||||||
elif options.get("required", False):
|
elif options.get("required", False):
|
||||||
wrong_params[param] = spec[param]
|
wrong_params[param] = spec[param]
|
||||||
continue
|
continue
|
||||||
|
if 'processor' in options:
|
||||||
|
outdict[param] = processors[options['processor']](outdict[param])
|
||||||
if "options" in options:
|
if "options" in options:
|
||||||
if options["options"] == "boolean":
|
if options["options"] == boolean:
|
||||||
outdict[param] = outdict[param] in [None, True, 'true', '1']
|
outdict[param] = str(outdict[param]).lower() in ['true', '1', '']
|
||||||
elif outdict[param] not in options["options"]:
|
elif outdict[param] not in options["options"]:
|
||||||
wrong_params[param] = spec[param]
|
wrong_params[param] = spec[param]
|
||||||
if wrong_params:
|
if wrong_params:
|
||||||
@@ -171,30 +300,126 @@ def parse_params(indict, *specs):
|
|||||||
parameters=outdict,
|
parameters=outdict,
|
||||||
errors=wrong_params)
|
errors=wrong_params)
|
||||||
raise message
|
raise message
|
||||||
if 'algorithm' in outdict and not isinstance(outdict['algorithm'], list):
|
|
||||||
outdict['algorithm'] = outdict['algorithm'].split(',')
|
|
||||||
return outdict
|
return outdict
|
||||||
|
|
||||||
|
|
||||||
def parse_extra_params(request, plugin=None):
|
def get_all_params(plugins, *specs):
|
||||||
params = request.parameters.copy()
|
'''Return a list of parameters for a given set of specifications and plugins.'''
|
||||||
if plugin:
|
dic = {}
|
||||||
extra_params = parse_params(params, plugin.get('extra_params', {}))
|
for s in specs:
|
||||||
params.update(extra_params)
|
dic.update(s)
|
||||||
|
dic.update(get_extra_params(plugins))
|
||||||
|
return dic
|
||||||
|
|
||||||
|
|
||||||
|
def get_extra_params(plugins):
|
||||||
|
'''Get a list of possible parameters given a list of plugins'''
|
||||||
|
params = {}
|
||||||
|
extra_params = {}
|
||||||
|
for plugin in plugins:
|
||||||
|
this_params = plugin.get('extra_params', {})
|
||||||
|
for k, v in this_params.items():
|
||||||
|
if k not in extra_params:
|
||||||
|
extra_params[k] = {}
|
||||||
|
extra_params[k][plugin.name] = v
|
||||||
|
for k, v in extra_params.items(): # Resolve conflicts
|
||||||
|
if len(v) == 1: # Add the extra options that do not collide
|
||||||
|
params[k] = list(v.values())[0]
|
||||||
|
else:
|
||||||
|
required = False
|
||||||
|
aliases = None
|
||||||
|
options = None
|
||||||
|
default = None
|
||||||
|
nodefault = False # Set when defaults are not compatible
|
||||||
|
|
||||||
|
for plugin, opt in v.items():
|
||||||
|
params['{}.{}'.format(plugin, k)] = opt
|
||||||
|
required = required or opt.get('required', False)
|
||||||
|
newaliases = set(opt.get('aliases', []))
|
||||||
|
if aliases is None:
|
||||||
|
aliases = newaliases
|
||||||
|
else:
|
||||||
|
aliases = aliases & newaliases
|
||||||
|
if 'options' in opt:
|
||||||
|
newoptions = set(opt['options'])
|
||||||
|
options = newoptions if options is None else options & newoptions
|
||||||
|
if 'default' in opt:
|
||||||
|
newdefault = opt['default']
|
||||||
|
if newdefault:
|
||||||
|
if default is None and not nodefault:
|
||||||
|
default = newdefault
|
||||||
|
elif newdefault != default:
|
||||||
|
nodefault = True
|
||||||
|
default = None
|
||||||
|
# Check for incompatibilities
|
||||||
|
if options != set():
|
||||||
|
params[k] = {
|
||||||
|
'default': default,
|
||||||
|
'aliases': list(aliases),
|
||||||
|
'required': required,
|
||||||
|
'options': list(options)
|
||||||
|
}
|
||||||
return params
|
return params
|
||||||
|
|
||||||
|
|
||||||
|
def parse_analyses(params, plugins):
|
||||||
|
'''
|
||||||
|
Parse the given parameters individually for each plugin, and get a list of the parameters that
|
||||||
|
belong to each of the plugins. Each item can then be used in the plugin.analyse_entries method.
|
||||||
|
'''
|
||||||
|
analysis_list = []
|
||||||
|
for i, plugin in enumerate(plugins):
|
||||||
|
if not plugin:
|
||||||
|
continue
|
||||||
|
this_params = filter_params(params, plugin, i)
|
||||||
|
parsed = parse_params(this_params, plugin.get('extra_params', {}))
|
||||||
|
analysis = plugin.activity(parsed)
|
||||||
|
analysis_list.append(analysis)
|
||||||
|
return analysis_list
|
||||||
|
|
||||||
|
|
||||||
|
def filter_params(params, plugin, ith=-1):
|
||||||
|
'''
|
||||||
|
Get the values within params that apply to a plugin.
|
||||||
|
More specific names override more general names, in this order:
|
||||||
|
|
||||||
|
<index_order>.parameter > <plugin.name>.parameter > parameter
|
||||||
|
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
>>> filter_params({'0.hello': True, 'hello': False}, Plugin(), 0)
|
||||||
|
{ '0.hello': True, 'hello': True}
|
||||||
|
|
||||||
|
'''
|
||||||
|
thisparams = {}
|
||||||
|
if ith >= 0:
|
||||||
|
ith = '{}.'.format(ith)
|
||||||
|
else:
|
||||||
|
ith = ""
|
||||||
|
for k, v in params.items():
|
||||||
|
if ith and k.startswith(str(ith)):
|
||||||
|
thisparams[k[len(ith):]] = v
|
||||||
|
elif k.startswith(plugin.name):
|
||||||
|
thisparams[k[len(plugin.name) + 1:]] = v
|
||||||
|
elif k not in thisparams:
|
||||||
|
thisparams[k] = v
|
||||||
|
return thisparams
|
||||||
|
|
||||||
|
|
||||||
def parse_call(params):
|
def parse_call(params):
|
||||||
'''Return a results object based on the parameters used in a call/request.
|
'''
|
||||||
|
Return a results object based on the parameters used in a call/request.
|
||||||
'''
|
'''
|
||||||
params = parse_params(params, NIF_PARAMS)
|
params = parse_params(params, NIF_PARAMS)
|
||||||
if params['informat'] == 'text':
|
if params['informat'] == 'text':
|
||||||
results = Results()
|
results = Results()
|
||||||
entry = Entry(nif__isString=params['input'])
|
entry = Entry(nif__isString=params['input'], id='prefix:') # Use @base
|
||||||
results.entries.append(entry)
|
results.entries.append(entry)
|
||||||
elif params['informat'] == 'json-ld':
|
elif params['informat'] == 'json-ld':
|
||||||
results = from_string(params['input'], cls=Results)
|
results = from_string(params['input'], cls=Results)
|
||||||
else: # pragma: no cover
|
else: # pragma: no cover
|
||||||
raise NotImplementedError('Informat {} is not implemented'.format(params['informat']))
|
raise NotImplementedError('Informat {} is not implemented'.format(
|
||||||
|
params['informat']))
|
||||||
results.parameters = params
|
results.parameters = params
|
||||||
return results
|
return results
|
||||||
|
@@ -1,32 +1,35 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2014 J. Fernando Sánchez Rada - Grupo de Sistemas Inteligentes
|
|
||||||
# DIT, UPM
|
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
#
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
#
|
#
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
#
|
||||||
"""
|
"""
|
||||||
Blueprints for Senpy
|
Blueprints for Senpy
|
||||||
"""
|
"""
|
||||||
from flask import (Blueprint, request, current_app, render_template, url_for,
|
from flask import (Blueprint, request, current_app, render_template, url_for,
|
||||||
jsonify)
|
jsonify, redirect)
|
||||||
from .models import Error, Response, Help, Plugins, read_schema, Datasets
|
from .models import Error, Response, Help, Plugins, read_schema, dump_schema, Datasets
|
||||||
from . import api
|
from . import api
|
||||||
from .version import __version__
|
from .version import __version__
|
||||||
from functools import wraps
|
from functools import wraps
|
||||||
|
|
||||||
|
from .gsitk_compat import GSITK_AVAILABLE, datasets
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import traceback
|
|
||||||
import json
|
import json
|
||||||
|
import base64
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -34,6 +37,24 @@ api_blueprint = Blueprint("api", __name__)
|
|||||||
demo_blueprint = Blueprint("demo", __name__, template_folder='templates')
|
demo_blueprint = Blueprint("demo", __name__, template_folder='templates')
|
||||||
ns_blueprint = Blueprint("ns", __name__)
|
ns_blueprint = Blueprint("ns", __name__)
|
||||||
|
|
||||||
|
_mimetypes_r = {'json-ld': ['application/ld+json'],
|
||||||
|
'turtle': ['text/turtle'],
|
||||||
|
'ntriples': ['application/n-triples'],
|
||||||
|
'text': ['text/plain']}
|
||||||
|
|
||||||
|
MIMETYPES = {}
|
||||||
|
|
||||||
|
for k, vs in _mimetypes_r.items():
|
||||||
|
for v in vs:
|
||||||
|
if v in MIMETYPES:
|
||||||
|
raise Exception('MIMETYPE {} specified for two formats: {} and {}'.format(v,
|
||||||
|
v,
|
||||||
|
MIMETYPES[v]))
|
||||||
|
MIMETYPES[v] = k
|
||||||
|
|
||||||
|
DEFAULT_MIMETYPE = 'application/ld+json'
|
||||||
|
DEFAULT_FORMAT = 'json-ld'
|
||||||
|
|
||||||
|
|
||||||
def get_params(req):
|
def get_params(req):
|
||||||
if req.method == 'POST':
|
if req.method == 'POST':
|
||||||
@@ -45,93 +66,179 @@ def get_params(req):
|
|||||||
return indict
|
return indict
|
||||||
|
|
||||||
|
|
||||||
|
def encode_url(url=None):
|
||||||
|
code = ''
|
||||||
|
if not url:
|
||||||
|
url = request.parameters.get('prefix', request.full_path[1:] + '#')
|
||||||
|
return code or base64.urlsafe_b64encode(url.encode()).decode()
|
||||||
|
|
||||||
|
|
||||||
|
def url_for_code(code, base=None):
|
||||||
|
# if base:
|
||||||
|
# return base + code
|
||||||
|
# return url_for('api.decode', code=code, _external=True)
|
||||||
|
# This was producing unique yet very long URIs, which wasn't ideal for visualization.
|
||||||
|
return 'http://senpy.invalid/'
|
||||||
|
|
||||||
|
|
||||||
|
def decoded_url(code, base=None):
|
||||||
|
path = base64.urlsafe_b64decode(code.encode()).decode()
|
||||||
|
if path[:4] == 'http':
|
||||||
|
return path
|
||||||
|
base = base or request.url_root
|
||||||
|
return base + path
|
||||||
|
|
||||||
|
|
||||||
@demo_blueprint.route('/')
|
@demo_blueprint.route('/')
|
||||||
def index():
|
def index():
|
||||||
return render_template("index.html", version=__version__)
|
# ev = str(get_params(request).get('evaluation', True))
|
||||||
|
# evaluation_enabled = ev.lower() not in ['false', 'no', 'none']
|
||||||
|
evaluation_enabled = GSITK_AVAILABLE
|
||||||
|
|
||||||
|
return render_template("index.html",
|
||||||
|
evaluation=evaluation_enabled,
|
||||||
|
version=__version__)
|
||||||
|
|
||||||
|
|
||||||
@api_blueprint.route('/contexts/<entity>.jsonld')
|
@api_blueprint.route('/contexts/<code>')
|
||||||
def context(entity="context"):
|
def context(code=''):
|
||||||
context = Response._context
|
context = Response._context
|
||||||
context['@vocab'] = url_for('ns.index', _external=True)
|
context['@base'] = url_for('api.decode', code=code, _external=True)
|
||||||
|
context['endpoint'] = url_for('api.api_root', _external=True)
|
||||||
return jsonify({"@context": context})
|
return jsonify({"@context": context})
|
||||||
|
|
||||||
|
|
||||||
|
@api_blueprint.route('/d/<code>')
|
||||||
|
def decode(code):
|
||||||
|
try:
|
||||||
|
return redirect(decoded_url(code))
|
||||||
|
except Exception:
|
||||||
|
return Error('invalid URL').flask()
|
||||||
|
|
||||||
|
|
||||||
@ns_blueprint.route('/') # noqa: F811
|
@ns_blueprint.route('/') # noqa: F811
|
||||||
def index():
|
def index():
|
||||||
context = Response._context
|
context = Response._context.copy()
|
||||||
context['@vocab'] = url_for('.ns', _external=True)
|
context['endpoint'] = url_for('api.api_root', _external=True)
|
||||||
return jsonify({"@context": context})
|
return jsonify({"@context": context})
|
||||||
|
|
||||||
|
|
||||||
@api_blueprint.route('/schemas/<schema>')
|
@api_blueprint.route('/schemas/<schema>')
|
||||||
def schema(schema="definitions"):
|
def schema(schema="definitions"):
|
||||||
try:
|
try:
|
||||||
return jsonify(read_schema(schema))
|
return dump_schema(read_schema(schema))
|
||||||
except Exception: # Should be FileNotFoundError, but it's missing from py2
|
except Exception as ex: # Should be FileNotFoundError, but it's missing from py2
|
||||||
return Error(message="Schema not found", status=404).flask()
|
return Error(message="Schema not found: {}".format(ex), status=404).flask()
|
||||||
|
|
||||||
|
|
||||||
def basic_api(f):
|
def basic_api(f):
|
||||||
default_params = {
|
default_params = {
|
||||||
'inHeaders': False,
|
'in-headers': False,
|
||||||
'expanded-jsonld': False,
|
'expanded-jsonld': False,
|
||||||
'outformat': 'json-ld',
|
'outformat': None,
|
||||||
'with_parameters': True,
|
'with-parameters': True,
|
||||||
}
|
}
|
||||||
|
|
||||||
@wraps(f)
|
@wraps(f)
|
||||||
def decorated_function(*args, **kwargs):
|
def decorated_function(*args, **kwargs):
|
||||||
raw_params = get_params(request)
|
raw_params = get_params(request)
|
||||||
logger.info('Getting request: {}'.format(raw_params))
|
# logger.info('Getting request: {}'.format(raw_params))
|
||||||
|
logger.debug('Getting request. Params: {}'.format(raw_params))
|
||||||
headers = {'X-ORIGINAL-PARAMS': json.dumps(raw_params)}
|
headers = {'X-ORIGINAL-PARAMS': json.dumps(raw_params)}
|
||||||
params = default_params
|
params = default_params
|
||||||
|
|
||||||
|
mime = request.accept_mimetypes\
|
||||||
|
.best_match(MIMETYPES.keys(),
|
||||||
|
DEFAULT_MIMETYPE)
|
||||||
|
|
||||||
|
mimeformat = MIMETYPES.get(mime, DEFAULT_FORMAT)
|
||||||
|
outformat = mimeformat
|
||||||
|
|
||||||
try:
|
try:
|
||||||
params = api.parse_params(raw_params, api.WEB_PARAMS, api.API_PARAMS)
|
params = api.parse_params(raw_params, api.WEB_PARAMS, api.API_PARAMS)
|
||||||
|
outformat = params.get('outformat', mimeformat)
|
||||||
if hasattr(request, 'parameters'):
|
if hasattr(request, 'parameters'):
|
||||||
request.parameters.update(params)
|
request.parameters.update(params)
|
||||||
else:
|
else:
|
||||||
request.parameters = params
|
request.parameters = params
|
||||||
response = f(*args, **kwargs)
|
response = f(*args, **kwargs)
|
||||||
|
|
||||||
|
if 'parameters' in response and not params['with-parameters']:
|
||||||
|
del response.parameters
|
||||||
|
|
||||||
|
logger.debug('Response: {}'.format(response))
|
||||||
|
|
||||||
|
prefix = params.get('prefix')
|
||||||
|
code = encode_url(prefix)
|
||||||
|
|
||||||
|
return response.flask(
|
||||||
|
in_headers=params['in-headers'],
|
||||||
|
headers=headers,
|
||||||
|
prefix=prefix or url_for_code(code),
|
||||||
|
base=prefix,
|
||||||
|
context_uri=url_for('api.context',
|
||||||
|
code=code,
|
||||||
|
_external=True),
|
||||||
|
outformat=outformat,
|
||||||
|
expanded=params['expanded-jsonld'],
|
||||||
|
template=params.get('template'),
|
||||||
|
verbose=params['verbose'],
|
||||||
|
aliases=params['aliases'],
|
||||||
|
fields=params.get('fields'))
|
||||||
|
|
||||||
except (Exception) as ex:
|
except (Exception) as ex:
|
||||||
if current_app.debug:
|
if current_app.debug or current_app.config['TESTING']:
|
||||||
raise
|
raise
|
||||||
if not isinstance(ex, Error):
|
if not isinstance(ex, Error):
|
||||||
msg = "{}:\n\t{}".format(ex,
|
msg = "{}".format(ex)
|
||||||
traceback.format_exc())
|
|
||||||
ex = Error(message=msg, status=500)
|
ex = Error(message=msg, status=500)
|
||||||
logger.exception('Error returning analysis result')
|
|
||||||
response = ex
|
response = ex
|
||||||
response.parameters = raw_params
|
response.parameters = raw_params
|
||||||
logger.error(ex)
|
logger.exception(ex)
|
||||||
|
return response.flask(
|
||||||
if 'parameters' in response and not params['with_parameters']:
|
outformat=outformat,
|
||||||
del response.parameters
|
expanded=params['expanded-jsonld'],
|
||||||
|
verbose=params.get('verbose', True),
|
||||||
logger.info('Response: {}'.format(response))
|
)
|
||||||
return response.flask(
|
|
||||||
in_headers=params['inHeaders'],
|
|
||||||
headers=headers,
|
|
||||||
prefix=url_for('.api_root', _external=True),
|
|
||||||
context_uri=url_for('api.context',
|
|
||||||
entity=type(response).__name__,
|
|
||||||
_external=True),
|
|
||||||
outformat=params['outformat'],
|
|
||||||
expanded=params['expanded-jsonld'])
|
|
||||||
|
|
||||||
return decorated_function
|
return decorated_function
|
||||||
|
|
||||||
|
|
||||||
@api_blueprint.route('/', methods=['POST', 'GET'])
|
@api_blueprint.route('/', defaults={'plugins': None}, methods=['POST', 'GET'])
|
||||||
|
@api_blueprint.route('/<path:plugins>', methods=['POST', 'GET'])
|
||||||
@basic_api
|
@basic_api
|
||||||
def api_root():
|
def api_root(plugins):
|
||||||
|
if plugins:
|
||||||
|
if request.parameters['algorithm'] != api.API_PARAMS['algorithm']['default']:
|
||||||
|
raise Error('You cannot specify the algorithm with a parameter and a URL variable.'
|
||||||
|
' Please, remove one of them')
|
||||||
|
plugins = plugins.replace('+', ',').replace('/', ',')
|
||||||
|
plugins = api.processors['string_to_tuple'](plugins)
|
||||||
|
else:
|
||||||
|
plugins = request.parameters['algorithm']
|
||||||
|
|
||||||
|
print(plugins)
|
||||||
|
|
||||||
|
sp = current_app.senpy
|
||||||
|
plugins = sp.get_plugins(plugins)
|
||||||
|
|
||||||
if request.parameters['help']:
|
if request.parameters['help']:
|
||||||
dic = dict(api.API_PARAMS, **api.NIF_PARAMS)
|
apis = [api.WEB_PARAMS, api.API_PARAMS, api.NIF_PARAMS]
|
||||||
response = Help(valid_parameters=dic)
|
# Verbose is set to False as default, but we want it to default to
|
||||||
|
# True for help. This checks the original value, to make sure it wasn't
|
||||||
|
# set by default.
|
||||||
|
if not request.parameters['verbose'] and get_params(request).get('verbose'):
|
||||||
|
apis = []
|
||||||
|
if request.parameters['algorithm'] == ['default', ]:
|
||||||
|
plugins = []
|
||||||
|
allparameters = api.get_all_params(plugins, *apis)
|
||||||
|
response = Help(valid_parameters=allparameters)
|
||||||
return response
|
return response
|
||||||
req = api.parse_call(request.parameters)
|
req = api.parse_call(request.parameters)
|
||||||
return current_app.senpy.analyse(req)
|
analyses = api.parse_analyses(req.parameters, plugins)
|
||||||
|
results = current_app.senpy.analyse(req, analyses)
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
@api_blueprint.route('/evaluate/', methods=['POST', 'GET'])
|
@api_blueprint.route('/evaluate/', methods=['POST', 'GET'])
|
||||||
@basic_api
|
@basic_api
|
||||||
@@ -145,28 +252,27 @@ def evaluate():
|
|||||||
response = current_app.senpy.evaluate(params)
|
response = current_app.senpy.evaluate(params)
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
|
||||||
@api_blueprint.route('/plugins/', methods=['POST', 'GET'])
|
@api_blueprint.route('/plugins/', methods=['POST', 'GET'])
|
||||||
@basic_api
|
@basic_api
|
||||||
def plugins():
|
def plugins():
|
||||||
sp = current_app.senpy
|
sp = current_app.senpy
|
||||||
params = api.parse_params(request.parameters, api.PLUGINS_PARAMS)
|
params = api.parse_params(request.parameters, api.PLUGINS_PARAMS)
|
||||||
ptype = params.get('plugin_type')
|
ptype = params.get('plugin-type')
|
||||||
plugins = list(sp.plugins(plugin_type=ptype))
|
plugins = list(sp.analysis_plugins(plugin_type=ptype))
|
||||||
dic = Plugins(plugins=plugins)
|
dic = Plugins(plugins=plugins)
|
||||||
return dic
|
return dic
|
||||||
|
|
||||||
|
|
||||||
@api_blueprint.route('/plugins/<plugin>/', methods=['POST', 'GET'])
|
@api_blueprint.route('/plugins/<plugin>/', methods=['POST', 'GET'])
|
||||||
@basic_api
|
@basic_api
|
||||||
def plugin(plugin=None):
|
def plugin(plugin):
|
||||||
sp = current_app.senpy
|
sp = current_app.senpy
|
||||||
return sp.get_plugin(plugin)
|
return sp.get_plugin(plugin)
|
||||||
|
|
||||||
|
|
||||||
@api_blueprint.route('/datasets/', methods=['POST','GET'])
|
@api_blueprint.route('/datasets/', methods=['POST', 'GET'])
|
||||||
@basic_api
|
@basic_api
|
||||||
def datasets():
|
def get_datasets():
|
||||||
sp = current_app.senpy
|
dic = Datasets(datasets=list(datasets.values()))
|
||||||
datasets = sp.datasets
|
return dic
|
||||||
dic = Datasets(datasets = list(datasets.values()))
|
|
||||||
return dic
|
|
||||||
|
27
senpy/cli.py
27
senpy/cli.py
@@ -1,3 +1,20 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
from .models import Error
|
from .models import Error
|
||||||
from .extensions import Senpy
|
from .extensions import Senpy
|
||||||
@@ -27,14 +44,14 @@ def main_function(argv):
|
|||||||
api.CLI_PARAMS,
|
api.CLI_PARAMS,
|
||||||
api.API_PARAMS,
|
api.API_PARAMS,
|
||||||
api.NIF_PARAMS)
|
api.NIF_PARAMS)
|
||||||
plugin_folder = params['plugin_folder']
|
plugin_folder = params['plugin-folder']
|
||||||
default_plugins = params.get('default-plugins', False)
|
default_plugins = not params.get('no-default-plugins', False)
|
||||||
sp = Senpy(default_plugins=default_plugins, plugin_folder=plugin_folder)
|
sp = Senpy(default_plugins=default_plugins, plugin_folder=plugin_folder)
|
||||||
request = api.parse_call(params)
|
request = api.parse_call(params)
|
||||||
algos = request.parameters.get('algorithm', None)
|
algos = sp.get_plugins(request.parameters.get('algorithm', None))
|
||||||
if algos:
|
if algos:
|
||||||
for algo in algos:
|
for algo in algos:
|
||||||
sp.activate_plugin(algo)
|
sp.activate_plugin(algo.name)
|
||||||
else:
|
else:
|
||||||
sp.activate_all()
|
sp.activate_all()
|
||||||
res = sp.analyse(request)
|
res = sp.analyse(request)
|
||||||
@@ -48,7 +65,7 @@ def main():
|
|||||||
res = main_function(sys.argv[1:])
|
res = main_function(sys.argv[1:])
|
||||||
print(res.serialize())
|
print(res.serialize())
|
||||||
except Error as err:
|
except Error as err:
|
||||||
print(err.serialize())
|
print(err.serialize(), file=sys.stderr)
|
||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
|
|
||||||
|
|
||||||
|
@@ -1,3 +1,19 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
import logging
|
import logging
|
||||||
from . import models
|
from . import models
|
||||||
@@ -13,7 +29,7 @@ class Client(object):
|
|||||||
return self.request('/', method=method, input=input, **kwargs)
|
return self.request('/', method=method, input=input, **kwargs)
|
||||||
|
|
||||||
def evaluate(self, input, method='GET', **kwargs):
|
def evaluate(self, input, method='GET', **kwargs):
|
||||||
return self.request('/evaluate', method = method, input=input, **kwargs)
|
return self.request('/evaluate', method=method, input=input, **kwargs)
|
||||||
|
|
||||||
def plugins(self, *args, **kwargs):
|
def plugins(self, *args, **kwargs):
|
||||||
resp = self.request(path='/plugins').plugins
|
resp = self.request(path='/plugins').plugins
|
||||||
@@ -24,8 +40,12 @@ class Client(object):
|
|||||||
return {d.name: d for d in resp}
|
return {d.name: d for d in resp}
|
||||||
|
|
||||||
def request(self, path=None, method='GET', **params):
|
def request(self, path=None, method='GET', **params):
|
||||||
url = '{}{}'.format(self.endpoint, path)
|
url = '{}{}'.format(self.endpoint.rstrip('/'), path)
|
||||||
response = requests.request(method=method, url=url, params=params)
|
if method == 'POST':
|
||||||
|
response = requests.post(url=url, data=params)
|
||||||
|
else:
|
||||||
|
response = requests.request(method=method, url=url, params=params)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
resp = models.from_dict(response.json())
|
resp = models.from_dict(response.json())
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
|
@@ -1,3 +1,18 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
"""
|
"""
|
||||||
Main class for Senpy.
|
Main class for Senpy.
|
||||||
It orchestrates plugin (de)activation and analysis.
|
It orchestrates plugin (de)activation and analysis.
|
||||||
@@ -6,8 +21,8 @@ from future import standard_library
|
|||||||
standard_library.install_aliases()
|
standard_library.install_aliases()
|
||||||
|
|
||||||
from . import plugins, api
|
from . import plugins, api
|
||||||
from .plugins import Plugin, evaluate
|
|
||||||
from .models import Error, AggregatedEvaluation
|
from .models import Error, AggregatedEvaluation
|
||||||
|
from .plugins import AnalysisPlugin
|
||||||
from .blueprints import api_blueprint, demo_blueprint, ns_blueprint
|
from .blueprints import api_blueprint, demo_blueprint, ns_blueprint
|
||||||
|
|
||||||
from threading import Thread
|
from threading import Thread
|
||||||
@@ -17,19 +32,14 @@ import copy
|
|||||||
import errno
|
import errno
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
from . import gsitk_compat
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
try:
|
|
||||||
from gsitk.datasets.datasets import DatasetManager
|
|
||||||
GSITK_AVAILABLE = True
|
|
||||||
except ImportError:
|
|
||||||
logger.warn('GSITK is not installed. Some functions will be unavailable.')
|
|
||||||
GSITK_AVAILABLE = False
|
|
||||||
|
|
||||||
|
|
||||||
class Senpy(object):
|
class Senpy(object):
|
||||||
""" Default Senpy extension for Flask """
|
""" Default Senpy extension for Flask """
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
app=None,
|
app=None,
|
||||||
plugin_folder=".",
|
plugin_folder=".",
|
||||||
@@ -55,11 +65,12 @@ class Senpy(object):
|
|||||||
self.add_folder('plugins', from_root=True)
|
self.add_folder('plugins', from_root=True)
|
||||||
else:
|
else:
|
||||||
# Add only conversion plugins
|
# Add only conversion plugins
|
||||||
self.add_folder(os.path.join('plugins', 'conversion'),
|
self.add_folder(os.path.join('plugins', 'postprocessing'),
|
||||||
from_root=True)
|
from_root=True)
|
||||||
self.app = app
|
self.app = app
|
||||||
if app is not None:
|
if app is not None:
|
||||||
self.init_app(app)
|
self.init_app(app)
|
||||||
|
self._conversion_candidates = {}
|
||||||
|
|
||||||
def init_app(self, app):
|
def init_app(self, app):
|
||||||
""" Initialise a flask app to add plugins to its context """
|
""" Initialise a flask app to add plugins to its context """
|
||||||
@@ -80,31 +91,55 @@ class Senpy(object):
|
|||||||
|
|
||||||
def add_plugin(self, plugin):
|
def add_plugin(self, plugin):
|
||||||
self._plugins[plugin.name.lower()] = plugin
|
self._plugins[plugin.name.lower()] = plugin
|
||||||
|
self._conversion_candidates = {}
|
||||||
|
|
||||||
def delete_plugin(self, plugin):
|
def delete_plugin(self, plugin):
|
||||||
del self._plugins[plugin.name.lower()]
|
del self._plugins[plugin.name.lower()]
|
||||||
|
|
||||||
def plugins(self, **kwargs):
|
def plugins(self, plugin_type=None, is_activated=True, **kwargs):
|
||||||
""" Return the plugins registered for a given application. Filtered by criteria """
|
""" Return the plugins registered for a given application. Filtered by criteria """
|
||||||
return list(plugins.pfilter(self._plugins, **kwargs))
|
return sorted(plugins.pfilter(self._plugins,
|
||||||
|
plugin_type=plugin_type,
|
||||||
|
is_activated=is_activated,
|
||||||
|
**kwargs),
|
||||||
|
key=lambda x: x.id)
|
||||||
|
|
||||||
def get_plugin(self, name, default=None):
|
def get_plugin(self, name, default=None):
|
||||||
if name == 'default':
|
if name == 'default':
|
||||||
return self.default_plugin
|
return self.default_plugin
|
||||||
plugin = name.lower()
|
elif name == 'conversion':
|
||||||
if plugin in self._plugins:
|
return None
|
||||||
return self._plugins[plugin]
|
|
||||||
|
|
||||||
results = self.plugins(id='plugins/{}'.format(name))
|
if name.lower() in self._plugins:
|
||||||
|
return self._plugins[name.lower()]
|
||||||
|
|
||||||
if not results:
|
results = self.plugins(id='endpoint:plugins/{}'.format(name.lower()),
|
||||||
return Error(message="Plugin not found", status=404)
|
plugin_type=None)
|
||||||
return results[0]
|
if results:
|
||||||
|
return results[0]
|
||||||
|
|
||||||
@property
|
results = self.plugins(id=name,
|
||||||
def analysis_plugins(self):
|
plugin_type=None)
|
||||||
""" Return only the analysis plugins """
|
if results:
|
||||||
return self.plugins(plugin_type='analysisPlugin')
|
return results[0]
|
||||||
|
|
||||||
|
msg = ("Plugin not found: '{}'\n"
|
||||||
|
"Make sure it is ACTIVATED\n"
|
||||||
|
"Valid algorithms: {}").format(name,
|
||||||
|
self._plugins.keys())
|
||||||
|
raise Error(message=msg, status=404)
|
||||||
|
|
||||||
|
def get_plugins(self, name):
|
||||||
|
try:
|
||||||
|
name = name.split(',')
|
||||||
|
except AttributeError:
|
||||||
|
pass # Assume it is a tuple or a list
|
||||||
|
return tuple(self.get_plugin(n) for n in name)
|
||||||
|
|
||||||
|
def analysis_plugins(self, **kwargs):
|
||||||
|
""" Return only the analysis plugins that are active"""
|
||||||
|
candidates = self.plugins(**kwargs)
|
||||||
|
return list(plugins.pfilter(candidates, plugin_type=AnalysisPlugin))
|
||||||
|
|
||||||
def add_folder(self, folder, from_root=False):
|
def add_folder(self, folder, from_root=False):
|
||||||
""" Find plugins in this folder and add them to this instance """
|
""" Find plugins in this folder and add them to this instance """
|
||||||
@@ -119,135 +154,49 @@ class Senpy(object):
|
|||||||
else:
|
else:
|
||||||
raise AttributeError("Not a folder or does not exist: %s", folder)
|
raise AttributeError("Not a folder or does not exist: %s", folder)
|
||||||
|
|
||||||
def _get_plugins(self, request):
|
def _process(self, req, pending, done=None):
|
||||||
if not self.analysis_plugins:
|
|
||||||
raise Error(
|
|
||||||
status=404,
|
|
||||||
message=("No plugins found."
|
|
||||||
" Please install one."))
|
|
||||||
algos = request.parameters.get('algorithm', None)
|
|
||||||
if not algos:
|
|
||||||
if self.default_plugin:
|
|
||||||
algos = [self.default_plugin.name, ]
|
|
||||||
else:
|
|
||||||
raise Error(
|
|
||||||
status=404,
|
|
||||||
message="No default plugin found, and None provided")
|
|
||||||
|
|
||||||
plugins = list()
|
|
||||||
for algo in algos:
|
|
||||||
algo = algo.lower()
|
|
||||||
if algo not in self._plugins:
|
|
||||||
msg = ("The algorithm '{}' is not valid\n"
|
|
||||||
"Valid algorithms: {}").format(algo,
|
|
||||||
self._plugins.keys())
|
|
||||||
logger.debug(msg)
|
|
||||||
raise Error(
|
|
||||||
status=404,
|
|
||||||
message=msg)
|
|
||||||
plugins.append(self._plugins[algo])
|
|
||||||
return plugins
|
|
||||||
|
|
||||||
def _process_entries(self, entries, req, plugins):
|
|
||||||
"""
|
"""
|
||||||
Recursively process the entries with the first plugin in the list, and pass the results
|
Recursively process the entries with the first plugin in the list, and pass the results
|
||||||
to the rest of the plugins.
|
to the rest of the plugins.
|
||||||
"""
|
"""
|
||||||
if not plugins:
|
done = done or []
|
||||||
for i in entries:
|
if not pending:
|
||||||
yield i
|
return req
|
||||||
return
|
|
||||||
plugin = plugins[0]
|
analysis = pending[0]
|
||||||
self._activate(plugin) # Make sure the plugin is activated
|
results = analysis.run(req)
|
||||||
specific_params = api.parse_extra_params(req, plugin)
|
results.activities.append(analysis)
|
||||||
req.analysis.append({'plugin': plugin,
|
done += analysis
|
||||||
'parameters': specific_params})
|
return self._process(results, pending[1:], done)
|
||||||
results = plugin.analyse_entries(entries, specific_params)
|
|
||||||
for i in self._process_entries(results, req, plugins[1:]):
|
|
||||||
yield i
|
|
||||||
|
|
||||||
def install_deps(self):
|
def install_deps(self):
|
||||||
for plugin in self.plugins(is_activated=True):
|
logger.info('Installing dependencies')
|
||||||
plugins.install_deps(plugin)
|
# If a plugin is activated, its dependencies should already be installed
|
||||||
|
# Otherwise, it would've failed to activate.
|
||||||
|
plugins.install_deps(*self.plugins(is_activated=False))
|
||||||
|
|
||||||
def analyse(self, request):
|
def analyse(self, request, analyses=None):
|
||||||
"""
|
"""
|
||||||
Main method that analyses a request, either from CLI or HTTP.
|
Main method that analyses a request, either from CLI or HTTP.
|
||||||
It takes a processed request, provided by the user, as returned
|
It takes a processed request, provided by the user, as returned
|
||||||
by api.parse_call().
|
by api.parse_call().
|
||||||
"""
|
"""
|
||||||
logger.debug("analysing request: {}".format(request))
|
if not self.plugins():
|
||||||
entries = request.entries
|
|
||||||
request.entries = []
|
|
||||||
plugins = self._get_plugins(request)
|
|
||||||
results = request
|
|
||||||
for i in self._process_entries(entries, results, plugins):
|
|
||||||
results.entries.append(i)
|
|
||||||
self.convert_emotions(results)
|
|
||||||
logger.debug("Returning analysis result: {}".format(results))
|
|
||||||
results.analysis = [i['plugin'].id for i in results.analysis]
|
|
||||||
return results
|
|
||||||
|
|
||||||
def _get_datasets(self, request):
|
|
||||||
if not self.datasets:
|
|
||||||
raise Error(
|
raise Error(
|
||||||
status=404,
|
status=404,
|
||||||
message=("No datasets found."
|
message=("No plugins found."
|
||||||
" Please verify DatasetManager"))
|
" Please install one."))
|
||||||
datasets_name = request.parameters.get('dataset', None).split(',')
|
if analyses is None:
|
||||||
for dataset in datasets_name:
|
plugins = self.get_plugins(request.parameters['algorithm'])
|
||||||
if dataset not in self.datasets:
|
analyses = api.parse_analyses(request.parameters, plugins)
|
||||||
logger.debug(("The dataset '{}' is not valid\n"
|
logger.debug("analysing request: {}".format(request))
|
||||||
"Valid datasets: {}").format(dataset,
|
results = self._process(request, analyses)
|
||||||
self.datasets.keys()))
|
logger.debug("Got analysis result: {}".format(results))
|
||||||
raise Error(
|
results = self.postprocess(results, analyses)
|
||||||
status=404,
|
logger.debug("Returning post-processed result: {}".format(results))
|
||||||
message="The dataset '{}' is not valid".format(dataset))
|
|
||||||
dm = DatasetManager()
|
|
||||||
datasets = dm.prepare_datasets(datasets_name)
|
|
||||||
return datasets
|
|
||||||
|
|
||||||
@property
|
|
||||||
def datasets(self):
|
|
||||||
if not GSITK_AVAILABLE:
|
|
||||||
raise Exception('GSITK is not available. Install it to use this function.')
|
|
||||||
self._dataset_list = {}
|
|
||||||
dm = DatasetManager()
|
|
||||||
for item in dm.get_datasets():
|
|
||||||
for key in item:
|
|
||||||
if key in self._dataset_list:
|
|
||||||
continue
|
|
||||||
properties = item[key]
|
|
||||||
properties['@id'] = key
|
|
||||||
self._dataset_list[key] = properties
|
|
||||||
return self._dataset_list
|
|
||||||
|
|
||||||
def evaluate(self, params):
|
|
||||||
if not GSITK_AVAILABLE:
|
|
||||||
raise Exception('GSITK is not available. Install it to use this function.')
|
|
||||||
logger.debug("evaluating request: {}".format(params))
|
|
||||||
results = AggregatedEvaluation()
|
|
||||||
results.parameters = params
|
|
||||||
datasets = self._get_datasets(results)
|
|
||||||
plugins = self._get_plugins(results)
|
|
||||||
for eval in evaluate(plugins, datasets):
|
|
||||||
results.evaluations.append(eval)
|
|
||||||
if 'with_parameters' not in results.parameters:
|
|
||||||
del results.parameters
|
|
||||||
logger.debug("Returning evaluation result: {}".format(results))
|
|
||||||
return results
|
return results
|
||||||
|
|
||||||
def _conversion_candidates(self, fromModel, toModel):
|
def convert_emotions(self, resp, analyses):
|
||||||
candidates = self.plugins(plugin_type='emotionConversionPlugin')
|
|
||||||
for candidate in candidates:
|
|
||||||
for pair in candidate.onyx__doesConversion:
|
|
||||||
logging.debug(pair)
|
|
||||||
|
|
||||||
if pair['onyx:conversionFrom'] == fromModel \
|
|
||||||
and pair['onyx:conversionTo'] == toModel:
|
|
||||||
yield candidate
|
|
||||||
|
|
||||||
def convert_emotions(self, resp):
|
|
||||||
"""
|
"""
|
||||||
Conversion of all emotions in a response **in place**.
|
Conversion of all emotions in a response **in place**.
|
||||||
In addition to converting from one model to another, it has
|
In addition to converting from one model to another, it has
|
||||||
@@ -255,52 +204,125 @@ class Senpy(object):
|
|||||||
Needless to say, this is far from an elegant solution, but it works.
|
Needless to say, this is far from an elegant solution, but it works.
|
||||||
@todo refactor and clean up
|
@todo refactor and clean up
|
||||||
"""
|
"""
|
||||||
plugins = [i['plugin'] for i in resp.analysis]
|
|
||||||
params = resp.parameters
|
logger.debug("Converting emotions")
|
||||||
toModel = params.get('emotionModel', None)
|
if 'parameters' not in resp:
|
||||||
|
logger.debug("NO PARAMETERS")
|
||||||
|
return resp
|
||||||
|
|
||||||
|
params = resp['parameters']
|
||||||
|
toModel = params.get('emotion-model', None)
|
||||||
if not toModel:
|
if not toModel:
|
||||||
return
|
logger.debug("NO tomodel PARAMETER")
|
||||||
|
return resp
|
||||||
|
|
||||||
logger.debug('Asked for model: {}'.format(toModel))
|
logger.debug('Asked for model: {}'.format(toModel))
|
||||||
output = params.get('conversion', None)
|
output = params.get('conversion', None)
|
||||||
candidates = {}
|
|
||||||
for plugin in plugins:
|
|
||||||
try:
|
|
||||||
fromModel = plugin.get('onyx:usesEmotionModel', None)
|
|
||||||
candidates[plugin.id] = next(self._conversion_candidates(fromModel, toModel))
|
|
||||||
logger.debug('Analysis plugin {} uses model: {}'.format(plugin.id, fromModel))
|
|
||||||
except StopIteration:
|
|
||||||
e = Error(('No conversion plugin found for: '
|
|
||||||
'{} -> {}'.format(fromModel, toModel)),
|
|
||||||
status=404)
|
|
||||||
e.original_response = resp
|
|
||||||
e.parameters = params
|
|
||||||
raise e
|
|
||||||
newentries = []
|
newentries = []
|
||||||
|
done = []
|
||||||
for i in resp.entries:
|
for i in resp.entries:
|
||||||
|
|
||||||
if output == "full":
|
if output == "full":
|
||||||
newemotions = copy.deepcopy(i.emotions)
|
newemotions = copy.deepcopy(i.emotions)
|
||||||
else:
|
else:
|
||||||
newemotions = []
|
newemotions = []
|
||||||
for j in i.emotions:
|
for j in i.emotions:
|
||||||
plugname = j['prov:wasGeneratedBy']
|
activity = j['prov:wasGeneratedBy']
|
||||||
candidate = candidates[plugname]
|
act = resp.activity(activity)
|
||||||
resp.analysis.append({'plugin': candidate,
|
if not act:
|
||||||
'parameters': params})
|
raise Error('Could not find the emotion model for {}'.format(activity))
|
||||||
|
fromModel = act.plugin['onyx:usesEmotionModel']
|
||||||
|
if toModel == fromModel:
|
||||||
|
continue
|
||||||
|
candidate = self._conversion_candidate(fromModel, toModel)
|
||||||
|
if not candidate:
|
||||||
|
e = Error(('No conversion plugin found for: '
|
||||||
|
'{} -> {}'.format(fromModel, toModel)),
|
||||||
|
status=404)
|
||||||
|
e.original_response = resp
|
||||||
|
e.parameters = params
|
||||||
|
raise e
|
||||||
|
|
||||||
|
analysis = candidate.activity(params)
|
||||||
|
done.append(analysis)
|
||||||
for k in candidate.convert(j, fromModel, toModel, params):
|
for k in candidate.convert(j, fromModel, toModel, params):
|
||||||
k.prov__wasGeneratedBy = candidate.id
|
k.prov__wasGeneratedBy = analysis.id
|
||||||
if output == 'nested':
|
if output == 'nested':
|
||||||
k.prov__wasDerivedFrom = j
|
k.prov__wasDerivedFrom = j
|
||||||
newemotions.append(k)
|
newemotions.append(k)
|
||||||
i.emotions = newemotions
|
i.emotions = newemotions
|
||||||
newentries.append(i)
|
newentries.append(i)
|
||||||
resp.entries = newentries
|
resp.entries = newentries
|
||||||
|
return resp
|
||||||
|
|
||||||
|
def _conversion_candidate(self, fromModel, toModel):
|
||||||
|
if not self._conversion_candidates:
|
||||||
|
candidates = {}
|
||||||
|
for conv in self.plugins(plugin_type=plugins.EmotionConversion):
|
||||||
|
for pair in conv.onyx__doesConversion:
|
||||||
|
logging.debug(pair)
|
||||||
|
key = (pair['onyx:conversionFrom'], pair['onyx:conversionTo'])
|
||||||
|
if key not in candidates:
|
||||||
|
candidates[key] = []
|
||||||
|
candidates[key].append(conv)
|
||||||
|
self._conversion_candidates = candidates
|
||||||
|
|
||||||
|
key = (fromModel, toModel)
|
||||||
|
if key not in self._conversion_candidates:
|
||||||
|
return None
|
||||||
|
return self._conversion_candidates[key][0]
|
||||||
|
|
||||||
|
def postprocess(self, response, analyses):
|
||||||
|
'''
|
||||||
|
Transform the results from the analysis plugins.
|
||||||
|
It has some pre-defined post-processing like emotion conversion,
|
||||||
|
and it also allows plugins to auto-select themselves.
|
||||||
|
'''
|
||||||
|
|
||||||
|
response = self.convert_emotions(response, analyses)
|
||||||
|
|
||||||
|
for plug in self.plugins(plugin_type=plugins.PostProcessing):
|
||||||
|
if plug.check(response, response.activities):
|
||||||
|
activity = plug.activity(response.parameters)
|
||||||
|
response = plug.process(response, activity)
|
||||||
|
return response
|
||||||
|
|
||||||
|
def _get_datasets(self, request):
|
||||||
|
datasets_name = request.parameters.get('dataset', None).split(',')
|
||||||
|
for dataset in datasets_name:
|
||||||
|
if dataset not in gsitk_compat.datasets:
|
||||||
|
logger.debug(("The dataset '{}' is not valid\n"
|
||||||
|
"Valid datasets: {}").format(
|
||||||
|
dataset, gsitk_compat.datasets.keys()))
|
||||||
|
raise Error(
|
||||||
|
status=404,
|
||||||
|
message="The dataset '{}' is not valid".format(dataset))
|
||||||
|
return datasets_name
|
||||||
|
|
||||||
|
def evaluate(self, params):
|
||||||
|
logger.debug("evaluating request: {}".format(params))
|
||||||
|
results = AggregatedEvaluation()
|
||||||
|
results.parameters = params
|
||||||
|
datasets = self._get_datasets(results)
|
||||||
|
plugs = []
|
||||||
|
for plugname in params['algorithm']:
|
||||||
|
plugs = self.get_plugins(plugname)
|
||||||
|
for plug in plugs:
|
||||||
|
if not isinstance(plug, plugins.Evaluable):
|
||||||
|
raise Exception('Plugin {} can not be evaluated', plug.id)
|
||||||
|
|
||||||
|
for eval in plugins.evaluate(plugs, datasets):
|
||||||
|
results.evaluations.append(eval)
|
||||||
|
if 'with-parameters' not in results.parameters:
|
||||||
|
del results.parameters
|
||||||
|
logger.debug("Returning evaluation result: {}".format(results))
|
||||||
|
return results
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def default_plugin(self):
|
def default_plugin(self):
|
||||||
if not self._default or not self._default.is_activated:
|
if not self._default or not self._default.is_activated:
|
||||||
candidates = self.plugins(plugin_type='analysisPlugin',
|
candidates = self.analysis_plugins()
|
||||||
is_activated=True)
|
|
||||||
if len(candidates) > 0:
|
if len(candidates) > 0:
|
||||||
self._default = candidates[0]
|
self._default = candidates[0]
|
||||||
else:
|
else:
|
||||||
@@ -310,7 +332,7 @@ class Senpy(object):
|
|||||||
|
|
||||||
@default_plugin.setter
|
@default_plugin.setter
|
||||||
def default_plugin(self, value):
|
def default_plugin(self, value):
|
||||||
if isinstance(value, Plugin):
|
if isinstance(value, plugins.Plugin):
|
||||||
if not value.is_activated:
|
if not value.is_activated:
|
||||||
raise AttributeError('The default plugin has to be activated.')
|
raise AttributeError('The default plugin has to be activated.')
|
||||||
self._default = value
|
self._default = value
|
||||||
@@ -318,10 +340,15 @@ class Senpy(object):
|
|||||||
else:
|
else:
|
||||||
self._default = self._plugins[value.lower()]
|
self._default = self._plugins[value.lower()]
|
||||||
|
|
||||||
def activate_all(self, sync=True):
|
def activate_all(self, sync=True, allow_fail=False):
|
||||||
ps = []
|
ps = []
|
||||||
for plug in self._plugins.keys():
|
for plug in self._plugins.keys():
|
||||||
ps.append(self.activate_plugin(plug, sync=sync))
|
try:
|
||||||
|
self.activate_plugin(plug, sync=sync)
|
||||||
|
except Exception as ex:
|
||||||
|
if not allow_fail:
|
||||||
|
raise
|
||||||
|
logger.error('Could not activate {}: {}'.format(plug, ex))
|
||||||
return ps
|
return ps
|
||||||
|
|
||||||
def deactivate_all(self, sync=True):
|
def deactivate_all(self, sync=True):
|
||||||
@@ -330,22 +357,16 @@ class Senpy(object):
|
|||||||
ps.append(self.deactivate_plugin(plug, sync=sync))
|
ps.append(self.deactivate_plugin(plug, sync=sync))
|
||||||
return ps
|
return ps
|
||||||
|
|
||||||
def _set_active(self, plugin, active=True, *args, **kwargs):
|
|
||||||
''' We're using a variable in the plugin itself to activate/deactivate plugins.\
|
|
||||||
Note that plugins may activate themselves by setting this variable.
|
|
||||||
'''
|
|
||||||
plugin.is_activated = active
|
|
||||||
|
|
||||||
def _activate(self, plugin):
|
def _activate(self, plugin):
|
||||||
success = False
|
success = False
|
||||||
with plugin._lock:
|
with plugin._lock:
|
||||||
if plugin.is_activated:
|
if plugin.is_activated:
|
||||||
return
|
return
|
||||||
plugin.activate()
|
plugin._activate()
|
||||||
msg = "Plugin activated: {}".format(plugin.name)
|
msg = "Plugin activated: {}".format(plugin.name)
|
||||||
logger.info(msg)
|
logger.info(msg)
|
||||||
success = True
|
success = plugin.is_activated
|
||||||
self._set_active(plugin, success)
|
return success
|
||||||
|
|
||||||
def activate_plugin(self, plugin_name, sync=True):
|
def activate_plugin(self, plugin_name, sync=True):
|
||||||
plugin_name = plugin_name.lower()
|
plugin_name = plugin_name.lower()
|
||||||
@@ -356,8 +377,9 @@ class Senpy(object):
|
|||||||
|
|
||||||
logger.info("Activating plugin: {}".format(plugin.name))
|
logger.info("Activating plugin: {}".format(plugin.name))
|
||||||
|
|
||||||
if sync or 'async' in plugin and not plugin.async:
|
if sync or not getattr(plugin, 'async', True) or getattr(
|
||||||
self._activate(plugin)
|
plugin, 'sync', False):
|
||||||
|
return self._activate(plugin)
|
||||||
else:
|
else:
|
||||||
th = Thread(target=partial(self._activate, plugin))
|
th = Thread(target=partial(self._activate, plugin))
|
||||||
th.start()
|
th.start()
|
||||||
@@ -367,7 +389,7 @@ class Senpy(object):
|
|||||||
with plugin._lock:
|
with plugin._lock:
|
||||||
if not plugin.is_activated:
|
if not plugin.is_activated:
|
||||||
return
|
return
|
||||||
plugin.deactivate()
|
plugin._deactivate()
|
||||||
logger.info("Plugin deactivated: {}".format(plugin.name))
|
logger.info("Plugin deactivated: {}".format(plugin.name))
|
||||||
|
|
||||||
def deactivate_plugin(self, plugin_name, sync=True):
|
def deactivate_plugin(self, plugin_name, sync=True):
|
||||||
@@ -377,12 +399,11 @@ class Senpy(object):
|
|||||||
message="Plugin not found: {}".format(plugin_name), status=404)
|
message="Plugin not found: {}".format(plugin_name), status=404)
|
||||||
plugin = self._plugins[plugin_name]
|
plugin = self._plugins[plugin_name]
|
||||||
|
|
||||||
self._set_active(plugin, False)
|
if sync or not getattr(plugin, 'async', True) or not getattr(
|
||||||
|
plugin, 'sync', False):
|
||||||
if sync or 'async' in plugin and not plugin.async:
|
plugin._deactivate()
|
||||||
self._deactivate(plugin)
|
|
||||||
else:
|
else:
|
||||||
th = Thread(target=partial(self._deactivate, plugin))
|
th = Thread(target=plugin.deactivate)
|
||||||
th.start()
|
th.start()
|
||||||
return th
|
return th
|
||||||
|
|
||||||
|
67
senpy/gsitk_compat.py
Normal file
67
senpy/gsitk_compat.py
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
|
||||||
|
from pkg_resources import parse_version, get_distribution, DistributionNotFound
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
MSG = 'GSITK is not (properly) installed.'
|
||||||
|
IMPORTMSG = '{} Some functions will be unavailable.'.format(MSG)
|
||||||
|
RUNMSG = '{} Install it to use this function.'.format(MSG)
|
||||||
|
|
||||||
|
|
||||||
|
def raise_exception(*args, **kwargs):
|
||||||
|
raise Exception(RUNMSG)
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
gsitk_distro = get_distribution("gsitk")
|
||||||
|
GSITK_VERSION = parse_version(gsitk_distro.version)
|
||||||
|
|
||||||
|
if not os.environ.get('DATA_PATH'):
|
||||||
|
os.environ['DATA_PATH'] = os.environ.get('SENPY_DATA', 'senpy_data')
|
||||||
|
|
||||||
|
from gsitk.datasets.datasets import DatasetManager
|
||||||
|
from gsitk.evaluation.evaluation import Evaluation as Eval # noqa: F401
|
||||||
|
from gsitk.evaluation.evaluation import EvalPipeline # noqa: F401
|
||||||
|
from sklearn.pipeline import Pipeline
|
||||||
|
modules = locals()
|
||||||
|
GSITK_AVAILABLE = True
|
||||||
|
datasets = {}
|
||||||
|
manager = DatasetManager()
|
||||||
|
|
||||||
|
for item in manager.get_datasets():
|
||||||
|
for key in item:
|
||||||
|
if key in datasets:
|
||||||
|
continue
|
||||||
|
properties = item[key]
|
||||||
|
properties['@id'] = key
|
||||||
|
datasets[key] = properties
|
||||||
|
|
||||||
|
def prepare(ds, *args, **kwargs):
|
||||||
|
return manager.prepare_datasets(ds, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
except (DistributionNotFound, ImportError) as err:
|
||||||
|
logger.debug('Error importing GSITK: {}'.format(err))
|
||||||
|
logger.warning(IMPORTMSG)
|
||||||
|
GSITK_AVAILABLE = False
|
||||||
|
GSITK_VERSION = ()
|
||||||
|
DatasetManager = Eval = Pipeline = prepare = raise_exception
|
||||||
|
datasets = {}
|
@@ -1,3 +1,18 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
'''
|
'''
|
||||||
Meta-programming for the models.
|
Meta-programming for the models.
|
||||||
'''
|
'''
|
||||||
@@ -34,6 +49,7 @@ class BaseMeta(ABCMeta):
|
|||||||
def __new__(mcs, name, bases, attrs, **kwargs):
|
def __new__(mcs, name, bases, attrs, **kwargs):
|
||||||
register_afterwards = False
|
register_afterwards = False
|
||||||
defaults = {}
|
defaults = {}
|
||||||
|
aliases = {}
|
||||||
|
|
||||||
attrs = mcs.expand_with_schema(name, attrs)
|
attrs = mcs.expand_with_schema(name, attrs)
|
||||||
if 'schema' in attrs:
|
if 'schema' in attrs:
|
||||||
@@ -41,17 +57,21 @@ class BaseMeta(ABCMeta):
|
|||||||
for base in bases:
|
for base in bases:
|
||||||
if hasattr(base, '_defaults'):
|
if hasattr(base, '_defaults'):
|
||||||
defaults.update(getattr(base, '_defaults'))
|
defaults.update(getattr(base, '_defaults'))
|
||||||
|
if hasattr(base, '_aliases'):
|
||||||
|
aliases.update(getattr(base, '_aliases'))
|
||||||
|
|
||||||
info, rest = mcs.split_attrs(attrs)
|
info, rest = mcs.split_attrs(attrs)
|
||||||
|
|
||||||
for i in list(info.keys()):
|
for i in list(info.keys()):
|
||||||
if isinstance(info[i], _Alias):
|
if isinstance(info[i], _Alias):
|
||||||
fget, fset, fdel = make_property(info[i].indict)
|
aliases[i] = info[i].indict
|
||||||
rest[i] = property(fget=fget, fset=fset, fdel=fdel)
|
if info[i].default is not None:
|
||||||
|
defaults[i] = info[i].default
|
||||||
else:
|
else:
|
||||||
defaults[i] = info[i]
|
defaults[i] = info[i]
|
||||||
|
|
||||||
rest['_defaults'] = defaults
|
rest['_defaults'] = defaults
|
||||||
|
rest['_aliases'] = aliases
|
||||||
|
|
||||||
cls = super(BaseMeta, mcs).__new__(mcs, name, tuple(bases), rest)
|
cls = super(BaseMeta, mcs).__new__(mcs, name, tuple(bases), rest)
|
||||||
|
|
||||||
@@ -85,7 +105,8 @@ class BaseMeta(ABCMeta):
|
|||||||
schema = json.load(f)
|
schema = json.load(f)
|
||||||
|
|
||||||
resolver = jsonschema.RefResolver(schema_path, schema)
|
resolver = jsonschema.RefResolver(schema_path, schema)
|
||||||
attrs['@type'] = "".join((name[0].lower(), name[1:]))
|
if '@type' not in attrs:
|
||||||
|
attrs['@type'] = name
|
||||||
attrs['_schema_file'] = schema_file
|
attrs['_schema_file'] = schema_file
|
||||||
attrs['schema'] = schema
|
attrs['schema'] = schema
|
||||||
attrs['_validator'] = jsonschema.Draft4Validator(schema, resolver=resolver)
|
attrs['_validator'] = jsonschema.Draft4Validator(schema, resolver=resolver)
|
||||||
@@ -139,9 +160,11 @@ class BaseMeta(ABCMeta):
|
|||||||
return temp
|
return temp
|
||||||
|
|
||||||
|
|
||||||
def make_property(key):
|
def make_property(key, default=None):
|
||||||
|
|
||||||
def fget(self):
|
def fget(self):
|
||||||
|
if default:
|
||||||
|
return self.get(key, copy.copy(default))
|
||||||
return self[key]
|
return self[key]
|
||||||
|
|
||||||
def fdel(self):
|
def fdel(self):
|
||||||
@@ -167,7 +190,7 @@ class CustomDict(MutableMapping, object):
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
_defaults = {}
|
_defaults = {}
|
||||||
_map_attr_key = {'id': '@id'}
|
_aliases = {'id': '@id'}
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super(CustomDict, self).__init__()
|
super(CustomDict, self).__init__()
|
||||||
@@ -176,13 +199,13 @@ class CustomDict(MutableMapping, object):
|
|||||||
for arg in args:
|
for arg in args:
|
||||||
self.update(arg)
|
self.update(arg)
|
||||||
for k, v in kwargs.items():
|
for k, v in kwargs.items():
|
||||||
self[self._attr_to_key(k)] = v
|
self[k] = v
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def serializable(self):
|
def serializable(self, **kwargs):
|
||||||
def ser_or_down(item):
|
def ser_or_down(item):
|
||||||
if hasattr(item, 'serializable'):
|
if hasattr(item, 'serializable'):
|
||||||
return item.serializable()
|
return item.serializable(**kwargs)
|
||||||
elif isinstance(item, dict):
|
elif isinstance(item, dict):
|
||||||
temp = dict()
|
temp = dict()
|
||||||
for kp in item:
|
for kp in item:
|
||||||
@@ -194,10 +217,9 @@ class CustomDict(MutableMapping, object):
|
|||||||
else:
|
else:
|
||||||
return item
|
return item
|
||||||
|
|
||||||
return ser_or_down(self.as_dict())
|
return ser_or_down(self.as_dict(**kwargs))
|
||||||
|
|
||||||
def __getitem__(self, key):
|
def __getitem__(self, key):
|
||||||
key = self._key_to_attr(key)
|
|
||||||
return self.__dict__[key]
|
return self.__dict__[key]
|
||||||
|
|
||||||
def __setitem__(self, key, value):
|
def __setitem__(self, key, value):
|
||||||
@@ -205,9 +227,23 @@ class CustomDict(MutableMapping, object):
|
|||||||
key = self._key_to_attr(key)
|
key = self._key_to_attr(key)
|
||||||
return setattr(self, key, value)
|
return setattr(self, key, value)
|
||||||
|
|
||||||
def as_dict(self):
|
def __delitem__(self, key):
|
||||||
return {self._attr_to_key(k): v for k, v in self.__dict__.items()
|
key = self._key_to_attr(key)
|
||||||
if not self._internal_key(k)}
|
del self.__dict__[key]
|
||||||
|
|
||||||
|
def as_dict(self, verbose=True, aliases=False):
|
||||||
|
attrs = self.__dict__.keys()
|
||||||
|
if not verbose and hasattr(self, '_terse_keys'):
|
||||||
|
attrs = self._terse_keys + ['@type', '@id']
|
||||||
|
res = {k: getattr(self, k) for k in attrs
|
||||||
|
if not self._internal_key(k) and hasattr(self, k)}
|
||||||
|
if not aliases:
|
||||||
|
return res
|
||||||
|
for k, ok in self._aliases.items():
|
||||||
|
if ok in res:
|
||||||
|
res[k] = getattr(res, ok)
|
||||||
|
del res[ok]
|
||||||
|
return res
|
||||||
|
|
||||||
def __iter__(self):
|
def __iter__(self):
|
||||||
return (k for k in self.__dict__ if not self._internal_key(k))
|
return (k for k in self.__dict__ if not self._internal_key(k))
|
||||||
@@ -215,43 +251,52 @@ class CustomDict(MutableMapping, object):
|
|||||||
def __len__(self):
|
def __len__(self):
|
||||||
return len(self.__dict__)
|
return len(self.__dict__)
|
||||||
|
|
||||||
def __delitem__(self, key):
|
|
||||||
del self.__dict__[key]
|
|
||||||
|
|
||||||
def update(self, other):
|
def update(self, other):
|
||||||
for k, v in other.items():
|
for k, v in other.items():
|
||||||
self[k] = v
|
self[k] = v
|
||||||
|
|
||||||
def _attr_to_key(self, key):
|
def _attr_to_key(self, key):
|
||||||
key = key.replace("__", ":", 1)
|
key = key.replace("__", ":", 1)
|
||||||
key = self._map_attr_key.get(key, key)
|
key = self._aliases.get(key, key)
|
||||||
return key
|
return key
|
||||||
|
|
||||||
def _key_to_attr(self, key):
|
def _key_to_attr(self, key):
|
||||||
if self._internal_key(key):
|
if self._internal_key(key):
|
||||||
return key
|
return key
|
||||||
key = key.replace(":", "__", 1)
|
|
||||||
|
if key in self._aliases:
|
||||||
|
key = self._aliases[key]
|
||||||
|
else:
|
||||||
|
key = key.replace(":", "__", 1)
|
||||||
return key
|
return key
|
||||||
|
|
||||||
def __getattr__(self, key):
|
def __getattr__(self, key):
|
||||||
try:
|
nkey = self._attr_to_key(key)
|
||||||
return self.__dict__[self._attr_to_key(key)]
|
if nkey in self.__dict__:
|
||||||
except KeyError:
|
return self.__dict__[nkey]
|
||||||
raise AttributeError
|
elif nkey == key:
|
||||||
|
raise AttributeError("Key not found: {}".format(key))
|
||||||
|
return getattr(self, nkey)
|
||||||
|
|
||||||
|
def __setattr__(self, key, value):
|
||||||
|
super(CustomDict, self).__setattr__(self._attr_to_key(key), value)
|
||||||
|
|
||||||
|
def __delattr__(self, key):
|
||||||
|
super(CustomDict, self).__delattr__(self._attr_to_key(key))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _internal_key(key):
|
def _internal_key(key):
|
||||||
return key[0] == '_'
|
return key[0] == '_'
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return str(self.serializable())
|
return json.dumps(self.serializable(), sort_keys=True, indent=4)
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return str(self.serializable())
|
return json.dumps(self.serializable(), sort_keys=True, indent=4)
|
||||||
|
|
||||||
|
|
||||||
_Alias = namedtuple('Alias', 'indict')
|
_Alias = namedtuple('Alias', ['indict', 'default'])
|
||||||
|
|
||||||
|
|
||||||
def alias(key):
|
def alias(key, default=None):
|
||||||
return _Alias(key)
|
return _Alias(key, default)
|
||||||
|
314
senpy/models.py
314
senpy/models.py
@@ -1,3 +1,18 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
'''
|
'''
|
||||||
Senpy Models.
|
Senpy Models.
|
||||||
|
|
||||||
@@ -12,6 +27,8 @@ standard_library.install_aliases()
|
|||||||
from future.utils import with_metaclass
|
from future.utils import with_metaclass
|
||||||
from past.builtins import basestring
|
from past.builtins import basestring
|
||||||
|
|
||||||
|
from jinja2 import Environment, BaseLoader
|
||||||
|
|
||||||
import time
|
import time
|
||||||
import copy
|
import copy
|
||||||
import json
|
import json
|
||||||
@@ -21,6 +38,7 @@ from flask import Response as FlaskResponse
|
|||||||
from pyld import jsonld
|
from pyld import jsonld
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
import jmespath
|
||||||
|
|
||||||
logging.getLogger('rdflib').setLevel(logging.WARN)
|
logging.getLogger('rdflib').setLevel(logging.WARN)
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -31,8 +49,9 @@ from rdflib import Graph
|
|||||||
from .meta import BaseMeta, CustomDict, alias
|
from .meta import BaseMeta, CustomDict, alias
|
||||||
|
|
||||||
DEFINITIONS_FILE = 'definitions.json'
|
DEFINITIONS_FILE = 'definitions.json'
|
||||||
CONTEXT_PATH = os.path.join(
|
CONTEXT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)),
|
||||||
os.path.dirname(os.path.realpath(__file__)), 'schemas', 'context.jsonld')
|
'schemas',
|
||||||
|
'context.jsonld')
|
||||||
|
|
||||||
|
|
||||||
def get_schema_path(schema_file, absolute=False):
|
def get_schema_path(schema_file, absolute=False):
|
||||||
@@ -51,6 +70,10 @@ def read_schema(schema_file, absolute=False):
|
|||||||
return jsonref.load(f, base_uri=schema_uri)
|
return jsonref.load(f, base_uri=schema_uri)
|
||||||
|
|
||||||
|
|
||||||
|
def dump_schema(schema):
|
||||||
|
return jsonref.dumps(schema)
|
||||||
|
|
||||||
|
|
||||||
def load_context(context):
|
def load_context(context):
|
||||||
logging.debug('Loading context: {}'.format(context))
|
logging.debug('Loading context: {}'.format(context))
|
||||||
if not context:
|
if not context:
|
||||||
@@ -117,24 +140,21 @@ class BaseModel(with_metaclass(BaseMeta, CustomDict)):
|
|||||||
|
|
||||||
'''
|
'''
|
||||||
|
|
||||||
schema_file = DEFINITIONS_FILE
|
# schema_file = DEFINITIONS_FILE
|
||||||
_context = base_context["@context"]
|
_context = base_context["@context"]
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
auto_id = kwargs.pop('_auto_id', True)
|
auto_id = kwargs.pop('_auto_id', False)
|
||||||
|
|
||||||
super(BaseModel, self).__init__(*args, **kwargs)
|
super(BaseModel, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
if auto_id:
|
if auto_id:
|
||||||
self.id
|
self.id
|
||||||
|
|
||||||
if '@type' not in self:
|
|
||||||
logger.warn('Created an instance of an unknown model')
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def id(self):
|
def id(self):
|
||||||
if '@id' not in self:
|
if '@id' not in self:
|
||||||
self['@id'] = ':{}_{}'.format(type(self).__name__, time.time())
|
self['@id'] = 'prefix:{}_{}'.format(type(self).__name__, time.time())
|
||||||
return self['@id']
|
return self['@id']
|
||||||
|
|
||||||
@id.setter
|
@id.setter
|
||||||
@@ -142,7 +162,7 @@ class BaseModel(with_metaclass(BaseMeta, CustomDict)):
|
|||||||
self['@id'] = value
|
self['@id'] = value
|
||||||
|
|
||||||
def flask(self,
|
def flask(self,
|
||||||
in_headers=True,
|
in_headers=False,
|
||||||
headers=None,
|
headers=None,
|
||||||
outformat='json-ld',
|
outformat='json-ld',
|
||||||
**kwargs):
|
**kwargs):
|
||||||
@@ -170,22 +190,33 @@ class BaseModel(with_metaclass(BaseMeta, CustomDict)):
|
|||||||
headers=headers,
|
headers=headers,
|
||||||
mimetype=mimetype)
|
mimetype=mimetype)
|
||||||
|
|
||||||
def serialize(self, format='json-ld', with_mime=False, **kwargs):
|
def serialize(self, format='json-ld', with_mime=False,
|
||||||
js = self.jsonld(**kwargs)
|
template=None, prefix=None, fields=None, **kwargs):
|
||||||
if format == 'json-ld':
|
js = self.jsonld(prefix=prefix, **kwargs)
|
||||||
|
if template is not None:
|
||||||
|
rtemplate = Environment(loader=BaseLoader).from_string(template)
|
||||||
|
content = rtemplate.render(**self)
|
||||||
|
mimetype = 'text'
|
||||||
|
elif fields is not None:
|
||||||
|
# Emulate field selection by constructing a template
|
||||||
|
content = json.dumps(jmespath.search(fields, js))
|
||||||
|
mimetype = 'text'
|
||||||
|
elif format == 'json-ld':
|
||||||
content = json.dumps(js, indent=2, sort_keys=True)
|
content = json.dumps(js, indent=2, sort_keys=True)
|
||||||
mimetype = "application/json"
|
mimetype = "application/json"
|
||||||
elif format in ['turtle', ]:
|
elif format in ['turtle', 'ntriples']:
|
||||||
logger.debug(js)
|
|
||||||
content = json.dumps(js, indent=2, sort_keys=True)
|
content = json.dumps(js, indent=2, sort_keys=True)
|
||||||
|
logger.debug(js)
|
||||||
|
context = [self._context, {'prefix': prefix, '@base': prefix}]
|
||||||
g = Graph().parse(
|
g = Graph().parse(
|
||||||
data=content,
|
data=content,
|
||||||
format='json-ld',
|
format='json-ld',
|
||||||
base=kwargs.get('prefix'),
|
prefix=prefix,
|
||||||
context=self._context)
|
context=context)
|
||||||
logger.debug(
|
logger.debug(
|
||||||
'Parsing with prefix: {}'.format(kwargs.get('prefix')))
|
'Parsing with prefix: {}'.format(kwargs.get('prefix')))
|
||||||
content = g.serialize(format='turtle').decode('utf-8')
|
content = g.serialize(format=format,
|
||||||
|
prefix=prefix).decode('utf-8')
|
||||||
mimetype = 'text/{}'.format(format)
|
mimetype = 'text/{}'.format(format)
|
||||||
else:
|
else:
|
||||||
raise Error('Unknown outformat: {}'.format(format))
|
raise Error('Unknown outformat: {}'.format(format))
|
||||||
@@ -198,25 +229,35 @@ class BaseModel(with_metaclass(BaseMeta, CustomDict)):
|
|||||||
with_context=False,
|
with_context=False,
|
||||||
context_uri=None,
|
context_uri=None,
|
||||||
prefix=None,
|
prefix=None,
|
||||||
expanded=False):
|
base=None,
|
||||||
ser = self.serializable()
|
expanded=False,
|
||||||
|
**kwargs):
|
||||||
|
|
||||||
|
result = self.serializable(**kwargs)
|
||||||
|
|
||||||
result = jsonld.compact(
|
|
||||||
ser,
|
|
||||||
self._context,
|
|
||||||
options={
|
|
||||||
'base': prefix,
|
|
||||||
'expandContext': self._context,
|
|
||||||
'senpy': prefix
|
|
||||||
})
|
|
||||||
if context_uri:
|
|
||||||
result['@context'] = context_uri
|
|
||||||
if expanded:
|
if expanded:
|
||||||
result = jsonld.expand(
|
result = jsonld.expand(
|
||||||
result, options={'base': prefix,
|
result,
|
||||||
'expandContext': self._context})
|
options={
|
||||||
|
'expandContext': [
|
||||||
|
self._context,
|
||||||
|
{
|
||||||
|
'prefix': prefix,
|
||||||
|
'endpoint': prefix
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
)[0]
|
||||||
if not with_context:
|
if not with_context:
|
||||||
del result['@context']
|
try:
|
||||||
|
del result['@context']
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
elif context_uri:
|
||||||
|
result['@context'] = context_uri
|
||||||
|
else:
|
||||||
|
result['@context'] = self._context
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def validate(self, obj=None):
|
def validate(self, obj=None):
|
||||||
@@ -234,7 +275,7 @@ def subtypes():
|
|||||||
return BaseMeta._subtypes
|
return BaseMeta._subtypes
|
||||||
|
|
||||||
|
|
||||||
def from_dict(indict, cls=None):
|
def from_dict(indict, cls=None, warn=True):
|
||||||
if not cls:
|
if not cls:
|
||||||
target = indict.get('@type', None)
|
target = indict.get('@type', None)
|
||||||
cls = BaseModel
|
cls = BaseModel
|
||||||
@@ -242,6 +283,10 @@ def from_dict(indict, cls=None):
|
|||||||
cls = subtypes()[target]
|
cls = subtypes()[target]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
if cls == BaseModel and warn:
|
||||||
|
logger.warning('Created an instance of an unknown model')
|
||||||
|
|
||||||
outdict = dict()
|
outdict = dict()
|
||||||
for k, v in indict.items():
|
for k, v in indict.items():
|
||||||
if k == '@context':
|
if k == '@context':
|
||||||
@@ -261,22 +306,24 @@ def from_string(string, **kwargs):
|
|||||||
return from_dict(json.loads(string), **kwargs)
|
return from_dict(json.loads(string), **kwargs)
|
||||||
|
|
||||||
|
|
||||||
def from_json(injson):
|
def from_json(injson, **kwargs):
|
||||||
indict = json.loads(injson)
|
indict = json.loads(injson)
|
||||||
return from_dict(indict)
|
return from_dict(indict, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
class Entry(BaseModel):
|
class Entry(BaseModel):
|
||||||
schema = 'entry'
|
schema = 'entry'
|
||||||
|
|
||||||
text = alias('nif:isString')
|
text = alias('nif:isString')
|
||||||
|
sentiments = alias('marl:hasOpinion', [])
|
||||||
|
emotions = alias('onyx:hasEmotionSet', [])
|
||||||
|
|
||||||
|
|
||||||
class Sentiment(BaseModel):
|
class Sentiment(BaseModel):
|
||||||
schema = 'sentiment'
|
schema = 'sentiment'
|
||||||
|
|
||||||
polarity = alias('marl:hasPolarity')
|
polarity = alias('marl:hasPolarity')
|
||||||
polarityValue = alias('marl:hasPolarityValue')
|
polarityValue = alias('marl:polarityValue')
|
||||||
|
|
||||||
|
|
||||||
class Error(BaseModel, Exception):
|
class Error(BaseModel, Exception):
|
||||||
@@ -296,7 +343,173 @@ class Error(BaseModel, Exception):
|
|||||||
return Exception.__hash__(self)
|
return Exception.__hash__(self)
|
||||||
|
|
||||||
|
|
||||||
# Add the remaining schemas programmatically
|
class AggregatedEvaluation(BaseModel):
|
||||||
|
schema = 'aggregatedEvaluation'
|
||||||
|
|
||||||
|
evaluations = alias('senpy:evaluations', [])
|
||||||
|
|
||||||
|
|
||||||
|
class Dataset(BaseModel):
|
||||||
|
schema = 'dataset'
|
||||||
|
|
||||||
|
|
||||||
|
class Datasets(BaseModel):
|
||||||
|
schema = 'datasets'
|
||||||
|
|
||||||
|
datasets = []
|
||||||
|
|
||||||
|
|
||||||
|
class Emotion(BaseModel):
|
||||||
|
schema = 'emotion'
|
||||||
|
|
||||||
|
|
||||||
|
class EmotionConversion(BaseModel):
|
||||||
|
schema = 'emotionConversion'
|
||||||
|
|
||||||
|
|
||||||
|
class EmotionConversionPlugin(BaseModel):
|
||||||
|
schema = 'emotionConversionPlugin'
|
||||||
|
|
||||||
|
|
||||||
|
class EmotionAnalysis(BaseModel):
|
||||||
|
schema = 'emotionAnalysis'
|
||||||
|
|
||||||
|
|
||||||
|
class EmotionModel(BaseModel):
|
||||||
|
schema = 'emotionModel'
|
||||||
|
onyx__hasEmotionCategory = []
|
||||||
|
|
||||||
|
|
||||||
|
class EmotionPlugin(BaseModel):
|
||||||
|
schema = 'emotionPlugin'
|
||||||
|
|
||||||
|
|
||||||
|
class EmotionSet(BaseModel):
|
||||||
|
schema = 'emotionSet'
|
||||||
|
|
||||||
|
onyx__hasEmotion = []
|
||||||
|
|
||||||
|
|
||||||
|
class Evaluation(BaseModel):
|
||||||
|
schema = 'evaluation'
|
||||||
|
|
||||||
|
metrics = alias('senpy:metrics', [])
|
||||||
|
|
||||||
|
|
||||||
|
class Entity(BaseModel):
|
||||||
|
schema = 'entity'
|
||||||
|
|
||||||
|
|
||||||
|
class Help(BaseModel):
|
||||||
|
schema = 'help'
|
||||||
|
|
||||||
|
|
||||||
|
class Metric(BaseModel):
|
||||||
|
schema = 'metric'
|
||||||
|
|
||||||
|
|
||||||
|
class Parameter(BaseModel):
|
||||||
|
schema = 'parameter'
|
||||||
|
|
||||||
|
|
||||||
|
class Plugins(BaseModel):
|
||||||
|
schema = 'plugins'
|
||||||
|
|
||||||
|
plugins = []
|
||||||
|
|
||||||
|
|
||||||
|
class Response(BaseModel):
|
||||||
|
schema = 'response'
|
||||||
|
|
||||||
|
|
||||||
|
class Results(BaseModel):
|
||||||
|
schema = 'results'
|
||||||
|
|
||||||
|
_terse_keys = ['entries', ]
|
||||||
|
|
||||||
|
activities = []
|
||||||
|
entries = []
|
||||||
|
|
||||||
|
def activity(self, id):
|
||||||
|
for i in self.activities:
|
||||||
|
if i.id == id:
|
||||||
|
return i
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
class SentimentPlugin(BaseModel):
|
||||||
|
schema = 'sentimentPlugin'
|
||||||
|
|
||||||
|
|
||||||
|
class Suggestion(BaseModel):
|
||||||
|
schema = 'suggestion'
|
||||||
|
|
||||||
|
|
||||||
|
class Topic(BaseModel):
|
||||||
|
schema = 'topic'
|
||||||
|
|
||||||
|
|
||||||
|
class Analysis(BaseModel):
|
||||||
|
'''
|
||||||
|
A prov:Activity that results of executing a Plugin on an entry with a set of
|
||||||
|
parameters.
|
||||||
|
'''
|
||||||
|
schema = 'analysis'
|
||||||
|
|
||||||
|
parameters = alias('prov:used', [])
|
||||||
|
algorithm = alias('prov:wasAssociatedWith', [])
|
||||||
|
|
||||||
|
@property
|
||||||
|
def params(self):
|
||||||
|
outdict = {}
|
||||||
|
outdict['algorithm'] = self.algorithm
|
||||||
|
for param in self.parameters:
|
||||||
|
outdict[param['name']] = param['value']
|
||||||
|
return outdict
|
||||||
|
|
||||||
|
@params.setter
|
||||||
|
def params(self, value):
|
||||||
|
for k, v in value.items():
|
||||||
|
for param in self.parameters:
|
||||||
|
if param.name == k:
|
||||||
|
param.value = v
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
self.parameters.append(Parameter(name=k, value=v)) # noqa: F821
|
||||||
|
|
||||||
|
def param(self, key, default=None):
|
||||||
|
for param in self.parameters:
|
||||||
|
if param['name'] == key:
|
||||||
|
return param['value']
|
||||||
|
return default
|
||||||
|
|
||||||
|
@property
|
||||||
|
def plugin(self):
|
||||||
|
return self._plugin
|
||||||
|
|
||||||
|
@plugin.setter
|
||||||
|
def plugin(self, value):
|
||||||
|
self._plugin = value
|
||||||
|
self['prov:wasAssociatedWith'] = value.id
|
||||||
|
|
||||||
|
def run(self, request):
|
||||||
|
return self.plugin.process(request, self)
|
||||||
|
|
||||||
|
|
||||||
|
class Plugin(BaseModel):
|
||||||
|
schema = 'plugin'
|
||||||
|
extra_params = {}
|
||||||
|
|
||||||
|
def activity(self, parameters=None):
|
||||||
|
'''Generate an Analysis (prov:Activity) from this plugin and the given parameters'''
|
||||||
|
a = Analysis()
|
||||||
|
a.plugin = self
|
||||||
|
if parameters:
|
||||||
|
a.params = parameters
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
# More classes could be added programmatically
|
||||||
|
|
||||||
def _class_from_schema(name, schema=None, schema_file=None, base_classes=None):
|
def _class_from_schema(name, schema=None, schema_file=None, base_classes=None):
|
||||||
base_classes = base_classes or []
|
base_classes = base_classes or []
|
||||||
@@ -316,30 +529,3 @@ def _add_class_from_schema(*args, **kwargs):
|
|||||||
generatedClass = _class_from_schema(*args, **kwargs)
|
generatedClass = _class_from_schema(*args, **kwargs)
|
||||||
globals()[generatedClass.__name__] = generatedClass
|
globals()[generatedClass.__name__] = generatedClass
|
||||||
del generatedClass
|
del generatedClass
|
||||||
|
|
||||||
|
|
||||||
for i in [
|
|
||||||
'analysis',
|
|
||||||
'emotion',
|
|
||||||
'emotionConversion',
|
|
||||||
'emotionConversionPlugin',
|
|
||||||
'emotionAnalysis',
|
|
||||||
'emotionModel',
|
|
||||||
'emotionPlugin',
|
|
||||||
'emotionSet',
|
|
||||||
'entity',
|
|
||||||
'help',
|
|
||||||
'plugin',
|
|
||||||
'plugins',
|
|
||||||
'response',
|
|
||||||
'results',
|
|
||||||
'sentimentPlugin',
|
|
||||||
'suggestion',
|
|
||||||
'aggregatedEvaluation',
|
|
||||||
'evaluation',
|
|
||||||
'metric',
|
|
||||||
'dataset',
|
|
||||||
'datasets',
|
|
||||||
|
|
||||||
]:
|
|
||||||
_add_class_from_schema(i)
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@@ -1,34 +0,0 @@
|
|||||||
import random
|
|
||||||
|
|
||||||
from senpy.plugins import EmotionPlugin
|
|
||||||
from senpy.models import EmotionSet, Emotion, Entry
|
|
||||||
|
|
||||||
|
|
||||||
class EmoRand(EmotionPlugin):
|
|
||||||
name = "emoRand"
|
|
||||||
description = 'A sample plugin that returns a random emotion annotation'
|
|
||||||
author = '@balkian'
|
|
||||||
version = '0.1'
|
|
||||||
url = "https://github.com/gsi-upm/senpy-plugins-community"
|
|
||||||
requirements = {}
|
|
||||||
onyx__usesEmotionModel = "emoml:big6"
|
|
||||||
|
|
||||||
def analyse_entry(self, entry, params):
|
|
||||||
category = "emoml:big6happiness"
|
|
||||||
number = max(-1, min(1, random.gauss(0, 0.5)))
|
|
||||||
if number > 0:
|
|
||||||
category = "emoml:big6anger"
|
|
||||||
emotionSet = EmotionSet()
|
|
||||||
emotion = Emotion({"onyx:hasEmotionCategory": category})
|
|
||||||
emotionSet.onyx__hasEmotion.append(emotion)
|
|
||||||
emotionSet.prov__wasGeneratedBy = self.id
|
|
||||||
entry.emotions.append(emotionSet)
|
|
||||||
yield entry
|
|
||||||
|
|
||||||
def test(self):
|
|
||||||
params = dict()
|
|
||||||
results = list()
|
|
||||||
for i in range(100):
|
|
||||||
res = next(self.analyse_entry(Entry(nif__isString="Hello"), params))
|
|
||||||
res.validate()
|
|
||||||
results.append(res.emotions[0]['onyx:hasEmotion'][0]['onyx:hasEmotionCategory'])
|
|
@@ -1,66 +0,0 @@
|
|||||||
from senpy.plugins import AnalysisPlugin
|
|
||||||
from senpy.models import Entry
|
|
||||||
from nltk.tokenize.punkt import PunktSentenceTokenizer
|
|
||||||
from nltk.tokenize.simple import LineTokenizer
|
|
||||||
import nltk
|
|
||||||
|
|
||||||
|
|
||||||
class SplitPlugin(AnalysisPlugin):
|
|
||||||
'''description: A sample plugin that chunks input text'''
|
|
||||||
|
|
||||||
def activate(self):
|
|
||||||
nltk.download('punkt')
|
|
||||||
|
|
||||||
def analyse_entry(self, entry, params):
|
|
||||||
chunker_type = params["delimiter"]
|
|
||||||
original_text = entry['nif:isString']
|
|
||||||
if chunker_type == "sentence":
|
|
||||||
tokenizer = PunktSentenceTokenizer()
|
|
||||||
if chunker_type == "paragraph":
|
|
||||||
tokenizer = LineTokenizer()
|
|
||||||
chars = list(tokenizer.span_tokenize(original_text))
|
|
||||||
for i, chunk in enumerate(tokenizer.tokenize(original_text)):
|
|
||||||
print(chunk)
|
|
||||||
e = Entry()
|
|
||||||
e['nif:isString'] = chunk
|
|
||||||
if entry.id:
|
|
||||||
e.id = entry.id + "#char={},{}".format(chars[i][0], chars[i][1])
|
|
||||||
yield e
|
|
||||||
|
|
||||||
test_cases = [
|
|
||||||
{
|
|
||||||
'entry': {
|
|
||||||
'nif:isString': 'Hello. World.'
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
'delimiter': 'sentence',
|
|
||||||
},
|
|
||||||
'expected': [
|
|
||||||
{
|
|
||||||
'nif:isString': 'Hello.'
|
|
||||||
},
|
|
||||||
{
|
|
||||||
'nif:isString': 'World.'
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
'entry': {
|
|
||||||
"@id": ":test",
|
|
||||||
'nif:isString': 'Hello\nWorld'
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
'delimiter': 'paragraph',
|
|
||||||
},
|
|
||||||
'expected': [
|
|
||||||
{
|
|
||||||
"@id": ":test#char=0,5",
|
|
||||||
'nif:isString': 'Hello'
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"@id": ":test#char=6,11",
|
|
||||||
'nif:isString': 'World'
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
@@ -1,19 +0,0 @@
|
|||||||
---
|
|
||||||
name: split
|
|
||||||
module: senpy.plugins.misc.split
|
|
||||||
description: A sample plugin that chunks input text
|
|
||||||
author: "@militarpancho"
|
|
||||||
version: '0.2'
|
|
||||||
url: "https://github.com/gsi-upm/senpy"
|
|
||||||
requirements:
|
|
||||||
- nltk
|
|
||||||
extra_params:
|
|
||||||
delimiter:
|
|
||||||
aliases:
|
|
||||||
- type
|
|
||||||
- t
|
|
||||||
required: false
|
|
||||||
default: sentence
|
|
||||||
options:
|
|
||||||
- sentence
|
|
||||||
- paragraph
|
|
103
senpy/plugins/misc/split_plugin.py
Normal file
103
senpy/plugins/misc/split_plugin.py
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from senpy.plugins import Transformation
|
||||||
|
from senpy.models import Entry
|
||||||
|
from nltk.tokenize.punkt import PunktSentenceTokenizer
|
||||||
|
from nltk.tokenize.simple import LineTokenizer
|
||||||
|
|
||||||
|
|
||||||
|
class Split(Transformation):
|
||||||
|
'''
|
||||||
|
A plugin that chunks input text, into paragraphs or sentences.
|
||||||
|
|
||||||
|
It does not provide any sort of annotation, and it is meant to precede
|
||||||
|
other annotation plugins, when the annotation of individual sentences
|
||||||
|
(or paragraphs) is required.
|
||||||
|
'''
|
||||||
|
|
||||||
|
author = ["@militarpancho", '@balkian']
|
||||||
|
version = '0.3'
|
||||||
|
url = "https://github.com/gsi-upm/senpy"
|
||||||
|
nltk_resources = ['punkt']
|
||||||
|
|
||||||
|
extra_params = {
|
||||||
|
'delimiter': {
|
||||||
|
'description': 'Split text into paragraphs or sentences.',
|
||||||
|
'aliases': ['type', 't'],
|
||||||
|
'required': False,
|
||||||
|
'default': 'sentence',
|
||||||
|
'options': ['sentence', 'paragraph']
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def analyse_entry(self, entry, activity):
|
||||||
|
yield entry
|
||||||
|
chunker_type = activity.params["delimiter"]
|
||||||
|
original_text = entry['nif:isString']
|
||||||
|
if chunker_type == "sentence":
|
||||||
|
tokenizer = PunktSentenceTokenizer()
|
||||||
|
if chunker_type == "paragraph":
|
||||||
|
tokenizer = LineTokenizer()
|
||||||
|
chars = list(tokenizer.span_tokenize(original_text))
|
||||||
|
if len(chars) == 1:
|
||||||
|
# This sentence was already split
|
||||||
|
return
|
||||||
|
for i, chunk in enumerate(chars):
|
||||||
|
start, end = chunk
|
||||||
|
e = Entry()
|
||||||
|
e['nif:isString'] = original_text[start:end]
|
||||||
|
if entry.id:
|
||||||
|
e.id = entry.id + "#char={},{}".format(start, end)
|
||||||
|
yield e
|
||||||
|
|
||||||
|
test_cases = [
|
||||||
|
{
|
||||||
|
'entry': {
|
||||||
|
'nif:isString': 'Hello. World.'
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'delimiter': 'sentence',
|
||||||
|
},
|
||||||
|
'expected': [
|
||||||
|
{
|
||||||
|
'nif:isString': 'Hello.'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'nif:isString': 'World.'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'entry': {
|
||||||
|
"@id": ":test",
|
||||||
|
'nif:isString': 'Hello\nWorld'
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'delimiter': 'paragraph',
|
||||||
|
},
|
||||||
|
'expected': [
|
||||||
|
{
|
||||||
|
"@id": ":test#char=0,5",
|
||||||
|
'nif:isString': 'Hello'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@id": ":test#char=6,11",
|
||||||
|
'nif:isString': 'World'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
@@ -1,3 +1,19 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
from senpy.plugins import EmotionConversionPlugin
|
from senpy.plugins import EmotionConversionPlugin
|
||||||
from senpy.models import EmotionSet, Emotion, Error
|
from senpy.models import EmotionSet, Emotion, Error
|
||||||
|
|
||||||
@@ -85,7 +101,13 @@ class CentroidConversion(EmotionConversionPlugin):
|
|||||||
def distance(centroid):
|
def distance(centroid):
|
||||||
return sum(distance_k(centroid, original, k) for k in dimensions)
|
return sum(distance_k(centroid, original, k) for k in dimensions)
|
||||||
|
|
||||||
emotion = min(centroids, key=lambda x: distance(centroids[x]))
|
distances = {k: distance(centroids[k]) for k in centroids}
|
||||||
|
|
||||||
|
logger.debug('Converting %s', original)
|
||||||
|
logger.debug('Centroids: %s', centroids)
|
||||||
|
logger.debug('Distances: %s', distances)
|
||||||
|
|
||||||
|
emotion = min(distances, key=lambda x: distances[x])
|
||||||
|
|
||||||
result = Emotion(onyx__hasEmotionCategory=emotion)
|
result = Emotion(onyx__hasEmotionCategory=emotion)
|
||||||
result.onyx__algorithmConfidence = distance(centroids[emotion])
|
result.onyx__algorithmConfidence = distance(centroids[emotion])
|
||||||
@@ -103,7 +125,9 @@ class CentroidConversion(EmotionConversionPlugin):
|
|||||||
for i in emotionSet.onyx__hasEmotion:
|
for i in emotionSet.onyx__hasEmotion:
|
||||||
e.onyx__hasEmotion.append(self._backwards_conversion(i))
|
e.onyx__hasEmotion.append(self._backwards_conversion(i))
|
||||||
else:
|
else:
|
||||||
raise Error('EMOTION MODEL NOT KNOWN')
|
raise Error('EMOTION MODEL NOT KNOWN. '
|
||||||
|
'Cannot convert from {} to {}'.format(fromModel,
|
||||||
|
toModel))
|
||||||
yield e
|
yield e
|
||||||
|
|
||||||
def test(self, info=None):
|
def test(self, info=None):
|
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
name: Ekman2FSRE
|
name: Ekman2FSRE
|
||||||
module: senpy.plugins.conversion.emotion.centroids
|
module: senpy.plugins.postprocessing.emotion.centroids
|
||||||
description: Plugin to convert emotion sets from Ekman to VAD
|
description: Plugin to convert emotion sets from Ekman to VAD
|
||||||
version: 0.2
|
version: 0.2
|
||||||
# No need to specify onyx:doesConversion because centroids.py adds it automatically from centroids_direction
|
# No need to specify onyx:doesConversion because centroids.py adds it automatically from centroids_direction
|
@@ -1,6 +1,6 @@
|
|||||||
---
|
---
|
||||||
name: Ekman2PAD
|
name: Ekman2PAD
|
||||||
module: senpy.plugins.conversion.emotion.centroids
|
module: senpy.plugins.postprocessing.emotion.centroids
|
||||||
description: Plugin to convert emotion sets from Ekman to VAD
|
description: Plugin to convert emotion sets from Ekman to VAD
|
||||||
version: 0.2
|
version: 0.2
|
||||||
# No need to specify onyx:doesConversion because centroids.py adds it automatically from centroids_direction
|
# No need to specify onyx:doesConversion because centroids.py adds it automatically from centroids_direction
|
||||||
@@ -9,30 +9,30 @@ centroids:
|
|||||||
anger:
|
anger:
|
||||||
A: 6.95
|
A: 6.95
|
||||||
D: 5.1
|
D: 5.1
|
||||||
V: 2.7
|
P: 2.7
|
||||||
disgust:
|
disgust:
|
||||||
A: 5.3
|
A: 5.3
|
||||||
D: 8.05
|
D: 8.05
|
||||||
V: 2.7
|
P: 2.7
|
||||||
fear:
|
fear:
|
||||||
A: 6.5
|
A: 6.5
|
||||||
D: 3.6
|
D: 3.6
|
||||||
V: 3.2
|
P: 3.2
|
||||||
happiness:
|
happiness:
|
||||||
A: 7.22
|
A: 7.22
|
||||||
D: 6.28
|
D: 6.28
|
||||||
V: 8.6
|
P: 8.6
|
||||||
sadness:
|
sadness:
|
||||||
A: 5.21
|
A: 5.21
|
||||||
D: 2.82
|
D: 2.82
|
||||||
V: 2.21
|
P: 2.21
|
||||||
centroids_direction:
|
centroids_direction:
|
||||||
- emoml:big6
|
- emoml:big6
|
||||||
- emoml:pad
|
- emoml:pad-dimensions
|
||||||
aliases: # These are aliases for any key in the centroid, to avoid repeating a long name several times
|
aliases: # These are aliases for any key in the centroid, to avoid repeating a long name several times
|
||||||
A: emoml:pad-dimensions:arousal
|
P: emoml:pad-dimensions_pleasure
|
||||||
V: emoml:pad-dimensions:pleasure
|
A: emoml:pad-dimensions_arousal
|
||||||
D: emoml:pad-dimensions:dominance
|
D: emoml:pad-dimensions_dominance
|
||||||
anger: emoml:big6anger
|
anger: emoml:big6anger
|
||||||
disgust: emoml:big6disgust
|
disgust: emoml:big6disgust
|
||||||
fear: emoml:big6fear
|
fear: emoml:big6fear
|
207
senpy/plugins/postprocessing/emotion/maxEmotion_plugin.py
Normal file
207
senpy/plugins/postprocessing/emotion/maxEmotion_plugin.py
Normal file
@@ -0,0 +1,207 @@
|
|||||||
|
#
|
||||||
|
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
from senpy import PostProcessing, easy_test
|
||||||
|
|
||||||
|
|
||||||
|
class MaxEmotion(PostProcessing):
|
||||||
|
'''Plugin to extract the emotion with highest value from an EmotionSet'''
|
||||||
|
author = '@dsuarezsouto'
|
||||||
|
version = '0.1'
|
||||||
|
|
||||||
|
def process_entry(self, entry, activity):
|
||||||
|
if len(entry.emotions) < 1:
|
||||||
|
yield entry
|
||||||
|
return
|
||||||
|
|
||||||
|
set_emotions = entry.emotions[0]['onyx:hasEmotion']
|
||||||
|
|
||||||
|
# If there is only one emotion, do not modify it
|
||||||
|
if len(set_emotions) < 2:
|
||||||
|
yield entry
|
||||||
|
return
|
||||||
|
|
||||||
|
max_emotion = set_emotions[0]
|
||||||
|
|
||||||
|
# Extract max emotion from the set emotions (emotion with highest intensity)
|
||||||
|
for tmp_emotion in set_emotions:
|
||||||
|
if tmp_emotion['onyx:hasEmotionIntensity'] > max_emotion[
|
||||||
|
'onyx:hasEmotionIntensity']:
|
||||||
|
max_emotion = tmp_emotion
|
||||||
|
|
||||||
|
if max_emotion['onyx:hasEmotionIntensity'] == 0:
|
||||||
|
max_emotion['onyx:hasEmotionCategory'] = "neutral"
|
||||||
|
max_emotion['onyx:hasEmotionIntensity'] = 1.0
|
||||||
|
|
||||||
|
entry.emotions[0]['onyx:hasEmotion'] = [max_emotion]
|
||||||
|
|
||||||
|
entry.emotions[0]['prov:wasGeneratedBy'] = activity.id
|
||||||
|
yield entry
|
||||||
|
|
||||||
|
def check(self, request, plugins):
|
||||||
|
return 'maxemotion' in request.parameters and self not in plugins
|
||||||
|
|
||||||
|
# Test Cases:
|
||||||
|
# 1 Normal Situation.
|
||||||
|
# 2 Case to return a Neutral Emotion.
|
||||||
|
test_cases = [
|
||||||
|
{
|
||||||
|
"name": "If there are several emotions within an emotion set, reduce it to one.",
|
||||||
|
"entry": {
|
||||||
|
"@type":
|
||||||
|
"entry",
|
||||||
|
"onyx:hasEmotionSet": [
|
||||||
|
{
|
||||||
|
"@id":
|
||||||
|
"Emotions0",
|
||||||
|
"@type":
|
||||||
|
"emotionSet",
|
||||||
|
"onyx:hasEmotion": [
|
||||||
|
{
|
||||||
|
"@id": "_:Emotion_1538121033.74",
|
||||||
|
"@type": "emotion",
|
||||||
|
"onyx:hasEmotionCategory": "anger",
|
||||||
|
"onyx:hasEmotionIntensity": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@id": "_:Emotion_1538121033.74",
|
||||||
|
"@type": "emotion",
|
||||||
|
"onyx:hasEmotionCategory": "joy",
|
||||||
|
"onyx:hasEmotionIntensity": 0.3333333333333333
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@id": "_:Emotion_1538121033.74",
|
||||||
|
"@type": "emotion",
|
||||||
|
"onyx:hasEmotionCategory": "negative-fear",
|
||||||
|
"onyx:hasEmotionIntensity": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@id": "_:Emotion_1538121033.74",
|
||||||
|
"@type": "emotion",
|
||||||
|
"onyx:hasEmotionCategory": "sadness",
|
||||||
|
"onyx:hasEmotionIntensity": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@id": "_:Emotion_1538121033.74",
|
||||||
|
"@type": "emotion",
|
||||||
|
"onyx:hasEmotionCategory": "disgust",
|
||||||
|
"onyx:hasEmotionIntensity": 0
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"nif:isString":
|
||||||
|
"Test"
|
||||||
|
},
|
||||||
|
'expected': {
|
||||||
|
"@type":
|
||||||
|
"entry",
|
||||||
|
"onyx:hasEmotionSet": [
|
||||||
|
{
|
||||||
|
"@id":
|
||||||
|
"Emotions0",
|
||||||
|
"@type":
|
||||||
|
"emotionSet",
|
||||||
|
"onyx:hasEmotion": [
|
||||||
|
{
|
||||||
|
"@id": "_:Emotion_1538121033.74",
|
||||||
|
"@type": "emotion",
|
||||||
|
"onyx:hasEmotionCategory": "joy",
|
||||||
|
"onyx:hasEmotionIntensity": 0.3333333333333333
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"nif:isString":
|
||||||
|
"Test"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name":
|
||||||
|
"If the maximum emotion has an intensity of 0, return a neutral emotion.",
|
||||||
|
"entry": {
|
||||||
|
"@type":
|
||||||
|
"entry",
|
||||||
|
"onyx:hasEmotionSet": [{
|
||||||
|
"@id":
|
||||||
|
"Emotions0",
|
||||||
|
"@type":
|
||||||
|
"emotionSet",
|
||||||
|
"onyx:hasEmotion": [
|
||||||
|
{
|
||||||
|
"@id": "_:Emotion_1538121033.74",
|
||||||
|
"@type": "emotion",
|
||||||
|
"onyx:hasEmotionCategory": "anger",
|
||||||
|
"onyx:hasEmotionIntensity": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@id": "_:Emotion_1538121033.74",
|
||||||
|
"@type": "emotion",
|
||||||
|
"onyx:hasEmotionCategory": "joy",
|
||||||
|
"onyx:hasEmotionIntensity": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@id":
|
||||||
|
"_:Emotion_1538121033.74",
|
||||||
|
"@type":
|
||||||
|
"emotion",
|
||||||
|
"onyx:hasEmotionCategory":
|
||||||
|
"negative-fear",
|
||||||
|
"onyx:hasEmotionIntensity":
|
||||||
|
0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@id": "_:Emotion_1538121033.74",
|
||||||
|
"@type": "emotion",
|
||||||
|
"onyx:hasEmotionCategory":
|
||||||
|
"sadness",
|
||||||
|
"onyx:hasEmotionIntensity": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"@id": "_:Emotion_1538121033.74",
|
||||||
|
"@type": "emotion",
|
||||||
|
"onyx:hasEmotionCategory":
|
||||||
|
"disgust",
|
||||||
|
"onyx:hasEmotionIntensity": 0
|
||||||
|
}]
|
||||||
|
}],
|
||||||
|
"nif:isString":
|
||||||
|
"Test"
|
||||||
|
},
|
||||||
|
'expected': {
|
||||||
|
"@type":
|
||||||
|
"entry",
|
||||||
|
"onyx:hasEmotionSet": [{
|
||||||
|
"@id":
|
||||||
|
"Emotions0",
|
||||||
|
"@type":
|
||||||
|
"emotionSet",
|
||||||
|
"onyx:hasEmotion": [{
|
||||||
|
"@id": "_:Emotion_1538121033.74",
|
||||||
|
"@type": "emotion",
|
||||||
|
"onyx:hasEmotionCategory": "neutral",
|
||||||
|
"onyx:hasEmotionIntensity": 1
|
||||||
|
}]
|
||||||
|
}],
|
||||||
|
"nif:isString":
|
||||||
|
"Test"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
easy_test()
|
@@ -1,66 +0,0 @@
|
|||||||
import requests
|
|
||||||
import json
|
|
||||||
|
|
||||||
from senpy.plugins import SentimentPlugin
|
|
||||||
from senpy.models import Sentiment
|
|
||||||
|
|
||||||
|
|
||||||
class Sentiment140Plugin(SentimentPlugin):
|
|
||||||
'''Connects to the sentiment140 free API: http://sentiment140.com'''
|
|
||||||
def analyse_entry(self, entry, params):
|
|
||||||
lang = params["language"]
|
|
||||||
res = requests.post("http://www.sentiment140.com/api/bulkClassifyJson",
|
|
||||||
json.dumps({
|
|
||||||
"language": lang,
|
|
||||||
"data": [{
|
|
||||||
"text": entry['nif:isString']
|
|
||||||
}]
|
|
||||||
}))
|
|
||||||
p = params.get("prefix", None)
|
|
||||||
polarity_value = self.maxPolarityValue * int(
|
|
||||||
res.json()["data"][0]["polarity"]) * 0.25
|
|
||||||
polarity = "marl:Neutral"
|
|
||||||
neutral_value = self.maxPolarityValue / 2.0
|
|
||||||
if polarity_value > neutral_value:
|
|
||||||
polarity = "marl:Positive"
|
|
||||||
elif polarity_value < neutral_value:
|
|
||||||
polarity = "marl:Negative"
|
|
||||||
|
|
||||||
sentiment = Sentiment(
|
|
||||||
prefix=p,
|
|
||||||
marl__hasPolarity=polarity,
|
|
||||||
marl__polarityValue=polarity_value)
|
|
||||||
sentiment.prov__wasGeneratedBy = self.id
|
|
||||||
entry.sentiments = []
|
|
||||||
entry.sentiments.append(sentiment)
|
|
||||||
entry.language = lang
|
|
||||||
yield entry
|
|
||||||
|
|
||||||
def test(self, *args, **kwargs):
|
|
||||||
'''
|
|
||||||
To avoid calling the sentiment140 API, we will mock the results
|
|
||||||
from requests.
|
|
||||||
'''
|
|
||||||
from senpy.test import patch_requests
|
|
||||||
expected = {"data": [{"polarity": 4}]}
|
|
||||||
with patch_requests(expected) as (request, response):
|
|
||||||
super(Sentiment140Plugin, self).test(*args, **kwargs)
|
|
||||||
assert request.called
|
|
||||||
assert response.json.called
|
|
||||||
|
|
||||||
test_cases = [
|
|
||||||
{
|
|
||||||
'entry': {
|
|
||||||
'nif:isString': 'I love Titanic'
|
|
||||||
},
|
|
||||||
'params': {},
|
|
||||||
'expected': {
|
|
||||||
"nif:isString": "I love Titanic",
|
|
||||||
'sentiments': [
|
|
||||||
{
|
|
||||||
'marl:hasPolarity': 'marl:Positive',
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user