1
0
mirror of https://github.com/gsi-upm/senpy synced 2025-10-19 09:48:26 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
J. Fernando Sánchez
5493070d40 Filter conversion plugins
Closes #12

* Shows only analysis plugins by default on /api/plugins
* Adds a plugin_type parameter to get other types of plugins
* default_plugin chosen from analysis plugins
2017-03-06 11:27:49 +01:00
131 changed files with 1776 additions and 6410 deletions

View File

@@ -1,25 +1,22 @@
# Uncomment if you want to use docker-in-docker image: gsiupm/dockermake:latest
# image: gsiupm/dockermake:latest
# services:
# - docker:dind
# When using dind, it's wise to use the overlayfs driver for # When using dind, it's wise to use the overlayfs driver for
# improved performance. # improved performance.
variables:
DOCKER_DRIVER: overlay
DOCKERFILE: Dockerfile
IMAGENAME: $CI_REGISTRY_IMAGE
stages: stages:
- test - test
- push - push
- deploy
- clean - clean
before_script:
- make -e login
.test: &test_definition .test: &test_definition
stage: test stage: test
script: script:
- make -e test-$PYTHON_VERSION - make -e test-$PYTHON_VERSION
except:
- tags # Avoid unnecessary double testing
test-3.5: test-3.5:
<<: *test_definition <<: *test_definition
@@ -31,14 +28,16 @@ test-2.7:
variables: variables:
PYTHON_VERSION: "2.7" PYTHON_VERSION: "2.7"
.image: &image_definition .image: &image_definition
stage: push stage: push
before_script:
- docker login -u gitlab-ci-token -p $CI_BUILD_TOKEN $CI_REGISTRY
script: script:
- make -e push-$PYTHON_VERSION - make -e push-$PYTHON_VERSION
only: only:
- tags - tags
- triggers - triggers
- fix-makefiles
push-3.5: push-3.5:
<<: *image_definition <<: *image_definition
@@ -57,57 +56,10 @@ push-latest:
only: only:
- master - master
- triggers - triggers
- fix-makefiles
push-github:
stage: deploy
script:
- make -e push-github
only:
- master
- triggers
- fix-makefiles
deploy_pypi:
stage: deploy
script: # Configure the PyPI credentials, then push the package, and cleanup the creds.
- echo "[server-login]" >> ~/.pypirc
- echo "repository=https://upload.pypi.org/legacy/" >> ~/.pypirc
- echo "username=" ${PYPI_USER} >> ~/.pypirc
- echo "password=" ${PYPI_PASSWORD} >> ~/.pypirc
- make pip_upload
- echo "" > ~/.pypirc && rm ~/.pypirc # If the above fails, this won't run.
only:
- /^v?\d+\.\d+\.\d+([abc]\d*)?$/ # PEP-440 compliant version (tags)
except:
- branches
deploy:
stage: deploy
environment: test
script:
- make -e deploy
only:
- master
- fix-makefiles
push-github:
stage: deploy
script:
- make -e push-github
only:
- master
- triggers
clean : clean :
stage: clean stage: clean
script: script:
- make -e clean - make -e clean
when: manual only:
- master
cleanup_py:
stage: clean
when: always # this is important; run even if preceding stages failed.
script:
- rm -vf ~/.pypirc # we don't want to leave these around, but GitLab may clean up anyway.
- docker logout

View File

@@ -1,27 +0,0 @@
These makefiles are recipes for several common tasks in different types of projects.
To add them to your project, simply do:
```
git remote add makefiles ssh://git@lab.cluster.gsi.dit.upm.es:2200/docs/templates/makefiles.git
git subtree add --prefix=.makefiles/ makefiles master
touch Makefile
echo "include .makefiles/base.mk" >> Makefile
```
Now you can take advantage of the recipes.
For instance, to add useful targets for a python project, just add this to your Makefile:
```
include .makefiles/python.mk
```
You may need to set special variables like the name of your project or the python versions you're targetting.
Take a look at each specific `.mk` file for more information, and the `Makefile` in the [senpy](https://lab.cluster.gsi.dit.upm.es/senpy/senpy) project for a real use case.
If you update the makefiles from your repository, make sure to push the changes for review in upstream (this repository):
```
make makefiles-push
```
It will automatically commit all unstaged changes in the .makefiles folder.

View File

@@ -1,36 +0,0 @@
export
NAME ?= $(shell basename $(CURDIR))
VERSION ?= $(shell git describe --tags --dirty 2>/dev/null)
ifeq ($(VERSION),)
VERSION:=unknown
endif
# Get the location of this makefile.
MK_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
-include .env
-include ../.env
help: ## Show this help.
@fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/\(.*:\)[^#]*##\s*\(.*\)/\1\t\2/' | column -t -s " "
config: ## Load config from the environment. You should run it once in every session before other tasks. Run: eval $(make config)
@awk '{ print "export " $$0}' ../.env
@awk '{ print "export " $$0}' .env
@echo "# Please, run: "
@echo "# eval \$$(make config)"
# If you need to run a command on the key/value pairs, use this:
# @awk '{ split($$0, a, "="); "echo " a[2] " | base64 -w 0" |& getline b64; print "export " a[1] "=" a[2]; print "export " a[1] "_BASE64=" b64}' .env
ci: ## Run a task using gitlab-runner. Only use to debug problems in the CI pipeline
gitlab-runner exec shell --builds-dir '.builds' --env CI_PROJECT_NAME=$(NAME) ${action}
include $(MK_DIR)/makefiles.mk
include $(MK_DIR)/docker.mk
include $(MK_DIR)/git.mk
info:: ## List all variables
env
.PHONY:: config help ci

View File

@@ -1,51 +0,0 @@
ifndef IMAGENAME
ifdef CI_REGISTRY_IMAGE
IMAGENAME=$(CI_REGISTRY_IMAGE)
else
IMAGENAME=$(NAME)
endif
endif
IMAGEWTAG?=$(IMAGENAME):$(VERSION)
DOCKER_FLAGS?=$(-ti)
DOCKER_CMD?=
docker-login: ## Log in to the registry. It will only be used in the server, or when running a CI task locally (if CI_BUILD_TOKEN is set).
ifeq ($(CI_BUILD_TOKEN),)
@echo "Not logging in to the docker registry" "$(CI_REGISTRY)"
else
@docker login -u gitlab-ci-token -p $(CI_BUILD_TOKEN) $(CI_REGISTRY)
endif
ifeq ($(HUB_USER),)
@echo "Not logging in to global the docker registry"
else
@docker login -u $(HUB_USER) -p $(HUB_PASSWORD)
endif
docker-clean: ## Remove docker credentials
ifeq ($(HUB_USER),)
else
@docker logout
endif
docker-run: ## Build a generic docker image
docker run $(DOCKER_FLAGS) $(IMAGEWTAG) $(DOCKER_CMD)
docker-build: ## Build a generic docker image
docker build . -t $(IMAGEWTAG)
docker-push: docker-login ## Push a generic docker image
docker push $(IMAGEWTAG)
docker-latest-push: docker-login ## Push the latest image
docker tag $(IMAGEWTAG) $(IMAGENAME)
docker push $(IMAGENAME)
login:: docker-login
clean:: docker-clean
docker-info:
@echo IMAGEWTAG=${IMAGEWTAG}
.PHONY:: docker-login docker-clean login clean

View File

@@ -1,28 +0,0 @@
commit:
git commit -a
tag:
git tag ${VERSION}
git-push::
git push --tags -u origin HEAD
git-pull:
git pull --all
push-github: ## Push the code to github. You need to set up GITHUB_DEPLOY_KEY
ifeq ($(GITHUB_DEPLOY_KEY),)
else
$(eval KEY_FILE := "$(shell mktemp)")
@printf '%b' '$(GITHUB_DEPLOY_KEY)' > $(KEY_FILE)
@git remote rm github-deploy || true
git remote add github-deploy $(GITHUB_REPO)
-@GIT_SSH_COMMAND="ssh -i $(KEY_FILE)" git fetch github-deploy $(CI_COMMIT_REF_NAME)
@GIT_SSH_COMMAND="ssh -i $(KEY_FILE)" git push github-deploy HEAD:$(CI_COMMIT_REF_NAME)
rm $(KEY_FILE)
endif
push:: git-push
pull:: git-pull
.PHONY:: commit tag push git-push git-pull push-github

View File

@@ -1,51 +0,0 @@
# Deployment with Kubernetes
# KUBE_CA_PEM_FILE is the path of a certificate file. It automatically set by GitLab
# if you enable Kubernetes integration in a project.
#
# As of this writing, Kubernetes integration can not be set on a group level, so it has to
# be manually set in every project.
# Alternatively, we use a custom KUBE_CA_BUNDLE environment variable, which can be set at
# the group level. In this case, the variable contains the whole content of the certificate,
# which we dump to a temporary file
#
# Check if the KUBE_CA_PEM_FILE exists. Otherwise, create it from KUBE_CA_BUNDLE
KUBE_CA_TEMP=false
ifndef KUBE_CA_PEM_FILE
KUBE_CA_PEM_FILE:=$$PWD/.ca.crt
CREATED:=$(shell printf '%b\n' '$(KUBE_CA_BUNDLE)' > $(KUBE_CA_PEM_FILE))
endif
KUBE_TOKEN?=""
KUBE_NAMESPACE?=$(NAME)
KUBECTL=docker run --rm -v $(KUBE_CA_PEM_FILE):/tmp/ca.pem -i lachlanevenson/k8s-kubectl --server="$(KUBE_URL)" --token="$(KUBE_TOKEN)" --certificate-authority="/tmp/ca.pem" -n $(KUBE_NAMESPACE)
CI_COMMIT_REF_NAME?=master
info:: ## Print variables. Useful for debugging.
@echo "#KUBERNETES"
@echo KUBE_URL=$(KUBE_URL)
@echo KUBE_CA_PEM_FILE=$(KUBE_CA_PEM_FILE)
@echo KUBE_CA_BUNDLE=$$KUBE_CA_BUNDLE
@echo KUBE_TOKEN=$(KUBE_TOKEN)
@echo KUBE_NAMESPACE=$(KUBE_NAMESPACE)
@echo KUBECTL=$(KUBECTL)
@echo "#CI"
@echo CI_PROJECT_NAME=$(CI_PROJECT_NAME)
@echo CI_REGISTRY=$(CI_REGISTRY)
@echo CI_REGISTRY_USER=$(CI_REGISTRY_USER)
@echo CI_COMMIT_REF_NAME=$(CI_COMMIT_REF_NAME)
@echo "CREATED=$(CREATED)"
#
# Deployment and advanced features
#
deploy: ## Deploy to kubernetes using the credentials in KUBE_CA_PEM_FILE (or KUBE_CA_BUNDLE ) and TOKEN
@ls k8s/*.yaml k8s/*.yml k8s/*.tmpl 2>/dev/null || true
@cat k8s/*.yaml k8s/*.yml k8s/*.tmpl 2>/dev/null | envsubst | $(KUBECTL) apply -f -
deploy-check: ## Get the deployed configuration.
@$(KUBECTL) get deploy,pods,svc,ingress
.PHONY:: info deploy deploy-check

View File

@@ -1,17 +0,0 @@
makefiles-remote:
@git remote add makefiles ssh://git@lab.cluster.gsi.dit.upm.es:2200/docs/templates/makefiles.git 2>/dev/null || true
makefiles-commit: makefiles-remote
git add -f .makefiles
git commit -em "Updated makefiles from ${NAME}"
makefiles-push:
git subtree push --prefix=.makefiles/ makefiles $(NAME)
makefiles-pull: makefiles-remote
git subtree pull --prefix=.makefiles/ makefiles master --squash
pull:: makefiles-pull
push:: makefiles-push
.PHONY:: makefiles-remote makefiles-commit makefiles-push makefiles-pull pull push

View File

@@ -1,5 +0,0 @@
init: ## Init pre-commit hooks (i.e. enforcing format checking before allowing a commit)
pip install --user pre-commit
pre-commit install
.PHONY:: init

View File

@@ -1,101 +0,0 @@
PYVERSIONS ?= 3.5
PYMAIN ?= $(firstword $(PYVERSIONS))
TARNAME ?= $(NAME)-$(VERSION).tar.gz
VERSIONFILE ?= $(NAME)/VERSION
DEVPORT ?= 6000
.FORCE:
version: .FORCE
@echo $(VERSION) > $(VERSIONFILE)
@echo $(VERSION)
yapf: ## Format python code
yapf -i -r $(NAME)
yapf -i -r tests
dockerfiles: $(addprefix Dockerfile-,$(PYVERSIONS)) ## Generate dockerfiles for each python version
@unlink Dockerfile >/dev/null
ln -s Dockerfile-$(PYMAIN) Dockerfile
Dockerfile-%: Dockerfile.template ## Generate a specific dockerfile (e.g. Dockerfile-2.7)
sed "s/{{PYVERSION}}/$*/" Dockerfile.template > Dockerfile-$*
quick_build: $(addprefix build-, $(PYMAIN))
build: $(addprefix build-, $(PYVERSIONS)) ## Build all images / python versions
docker tag $(IMAGEWTAG)-python$(PYMAIN) $(IMAGEWTAG)
build-%: version Dockerfile-% ## Build a specific version (e.g. build-2.7)
docker build -t '$(IMAGEWTAG)-python$*' -f Dockerfile-$* .;
dev-%: ## Launch a specific development environment using docker (e.g. dev-2.7)
@docker start $(NAME)-dev$* || (\
$(MAKE) build-$*; \
docker run -d -w /usr/src/app/ -p $(DEVPORT):5000 -v $$PWD:/usr/src/app --entrypoint=/bin/bash -ti --name $(NAME)-dev$* '$(IMAGEWTAG)-python$*'; \
)\
docker exec -ti $(NAME)-dev$* bash
dev: dev-$(PYMAIN) ## Launch a development environment using docker, using the default python version
quick_test: test-$(PYMAIN)
test-%: build-% ## Run setup.py from in an isolated container, built from the base image. (e.g. test-2.7)
# This speeds tests up because the image has most (if not all) of the dependencies already.
docker rm $(NAME)-test-$* || true
docker create -ti --name $(NAME)-test-$* --entrypoint="" -w /usr/src/app/ $(IMAGEWTAG)-python$* python setup.py test
docker cp . $(NAME)-test-$*:/usr/src/app
docker start -a $(NAME)-test-$*
test: $(addprefix test-,$(PYVERSIONS)) ## Run the tests with the main python version
run-%: build-%
docker run --rm -p $(DEVPORT):5000 -ti '$(IMAGEWTAG)-python$(PYMAIN)' --default-plugins
run: run-$(PYMAIN)
# Pypy - Upload a package
dist/$(TARNAME): version
python setup.py sdist;
sdist: dist/$(TARNAME) ## Generate the distribution file (wheel)
pip_test-%: sdist ## Test the distribution file using pip install and a specific python version (e.g. pip_test-2.7)
docker run --rm -v $$PWD/dist:/dist/ python:$* pip install /dist/$(TARNAME);
pip_test: $(addprefix pip_test-,$(PYVERSIONS)) ## Test pip installation with the main python version
pip_upload: pip_test ## Upload package to pip
python setup.py sdist upload ;
# Pushing to docker
push-latest: $(addprefix push-latest-,$(PYVERSIONS)) ## Push the "latest" tag to dockerhub
docker tag '$(IMAGEWTAG)-python$(PYMAIN)' '$(IMAGEWTAG)'
docker tag '$(IMAGEWTAG)-python$(PYMAIN)' '$(IMAGENAME):latest'
docker push '$(IMAGENAME):latest'
docker push '$(IMAGEWTAG)'
push-latest-%: build-% ## Push the latest image for a specific python version
docker tag $(IMAGENAME):$(VERSION)-python$* $(IMAGENAME):python$*
docker push $(IMAGENAME):$(VERSION)-python$*
docker push $(IMAGENAME):python$*
push-%: build-% ## Push the image of the current version (tagged). e.g. push-2.7
docker push $(IMAGENAME):$(VERSION)-python$*
push:: $(addprefix push-,$(PYVERSIONS)) ## Push an image with the current version for every python version
docker tag '$(IMAGEWTAG)-python$(PYMAIN)' '$(IMAGEWTAG)'
docker push $(IMAGENAME):$(VERSION)
clean:: ## Clean older docker images and containers related to this project and dev environments
@docker stop $(addprefix $(NAME)-dev,$(PYVERSIONS)) 2>/dev/null || true
@docker rm $(addprefix $(NAME)-dev,$(PYVERSIONS)) 2>/dev/null || true
@docker ps -a | grep $(IMAGENAME) | awk '{ split($$2, vers, "-"); if(vers[0] != "${VERSION}"){ print $$1;}}' | xargs docker rm -v 2>/dev/null|| true
@docker images | grep $(IMAGENAME) | awk '{ split($$2, vers, "-"); if(vers[0] != "${VERSION}"){ print $$1":"$$2;}}' | xargs docker rmi 2>/dev/null|| true
.PHONY:: yapf dockerfiles Dockerfile-% quick_build build build-% dev-% quick-dev test quick_test push-latest push-latest-% push-% push version .FORCE

View File

@@ -7,6 +7,7 @@ language: python
env: env:
- PYV=2.7 - PYV=2.7
- PYV=3.4
- PYV=3.5 - PYV=3.5
# run nosetests - Tests # run nosetests - Tests
script: make test-$PYV script: make test-$PYV

View File

@@ -2,10 +2,6 @@ from python:{{PYVERSION}}
MAINTAINER J. Fernando Sánchez <jf.sanchez@upm.es> MAINTAINER J. Fernando Sánchez <jf.sanchez@upm.es>
RUN apt-get update && apt-get install -y \
libblas-dev liblapack-dev liblapacke-dev gfortran \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir /cache/ /senpy-plugins /data/ RUN mkdir /cache/ /senpy-plugins /data/
VOLUME /data/ VOLUME /data/
@@ -18,9 +14,9 @@ ONBUILD WORKDIR /senpy-plugins/
WORKDIR /usr/src/app WORKDIR /usr/src/app
COPY test-requirements.txt requirements.txt extra-requirements.txt /usr/src/app/ COPY test-requirements.txt requirements.txt /usr/src/app/
RUN pip install --no-cache-dir -r test-requirements.txt -r requirements.txt -r extra-requirements.txt RUN pip install --use-wheel -r test-requirements.txt -r requirements.txt
COPY . /usr/src/app/ COPY . /usr/src/app/
RUN pip install --no-cache-dir --no-index --no-deps --editable . RUN pip install --no-deps --no-index .
ENTRYPOINT ["python", "-m", "senpy", "-f", "/senpy-plugins/", "--host", "0.0.0.0"] ENTRYPOINT ["python", "-m", "senpy", "-f", "/senpy-plugins/", "--host", "0.0.0.0"]

View File

@@ -1,6 +1,5 @@
include requirements.txt include requirements.txt
include test-requirements.txt include test-requirements.txt
include extra-requirements.txt
include README.rst include README.rst
include senpy/VERSION include senpy/VERSION
graft senpy/plugins graft senpy/plugins

120
Makefile
View File

@@ -1,17 +1,109 @@
NAME=senpy
GITHUB_REPO=git@github.com:gsi-upm/senpy.git
IMAGENAME=gsiupm/senpy
# The first version is the main one (used for quick builds)
# See .makefiles/python.mk for more info
PYVERSIONS=3.5 2.7 PYVERSIONS=3.5 2.7
PYMAIN=$(firstword $(PYVERSIONS))
DEVPORT=5000 NAME=senpy
REPO=gsiupm
VERSION=$(shell git describe --tags --dirty 2>/dev/null)
TARNAME=$(NAME)-$(VERSION).tar.gz
IMAGENAME=$(REPO)/$(NAME)
IMAGEWTAG=$(IMAGENAME):$(VERSION)
action="test-${PYMAIN}" action="test-${PYMAIN}"
GITHUB_REPO=git@github.com:gsi-upm/senpy.git
include .makefiles/base.mk all: build run
include .makefiles/k8s.mk
include .makefiles/python.mk .FORCE:
version: .FORCE
@echo $(VERSION) > $(NAME)/VERSION
@echo $(VERSION)
yapf:
yapf -i -r senpy
yapf -i -r tests
init:
pip install --user pre-commit
pre-commit install
dockerfiles: $(addprefix Dockerfile-,$(PYVERSIONS))
@unlink Dockerfile >/dev/null
ln -s Dockerfile-$(PYMAIN) Dockerfile
Dockerfile-%: Dockerfile.template
sed "s/{{PYVERSION}}/$*/" Dockerfile.template > Dockerfile-$*
quick_build: $(addprefix build-, $(PYMAIN))
build: $(addprefix build-, $(PYVERSIONS))
build-%: version Dockerfile-%
docker build -t '$(IMAGEWTAG)-python$*' -f Dockerfile-$* .;
quick_test: $(addprefix test-,$(PYMAIN))
dev-%:
@docker start $(NAME)-dev$* || (\
$(MAKE) build-$*; \
docker run -d -w /usr/src/app/ -v $$PWD:/usr/src/app --entrypoint=/bin/bash -ti --name $(NAME)-dev$* '$(IMAGEWTAG)-python$*'; \
)\
docker exec -ti $(NAME)-dev$* bash
dev: dev-$(PYMAIN)
test-all: $(addprefix test-,$(PYVERSIONS))
test-%: build-%
docker run --rm --entrypoint /usr/local/bin/python -w /usr/src/app $(IMAGEWTAG)-python$* setup.py test
test: test-$(PYMAIN)
dist/$(TARNAME):
docker run --rm -ti -v $$PWD:/usr/src/app/ -w /usr/src/app/ python:$(PYMAIN) python setup.py sdist;
sdist: dist/$(TARNAME)
pip_test-%: sdist
docker run --rm -v $$PWD/dist:/dist/ -ti python:$* pip install /dist/$(TARNAME);
pip_test: $(addprefix pip_test-,$(PYVERSIONS))
clean:
@docker ps -a | awk '/$(REPO)\/$(NAME)/{ split($$2, vers, "-"); if(vers[0] != "${VERSION}"){ print $$1;}}' | xargs docker rm -v 2>/dev/null|| true
@docker images | awk '/$(REPO)\/$(NAME)/{ split($$2, vers, "-"); if(vers[0] != "${VERSION}"){ print $$1":"$$2;}}' | xargs docker rmi 2>/dev/null|| true
@docker rmi $(NAME)-dev 2>/dev/null || true
git_commit:
git commit -a
git_tag:
git tag ${VERSION}
git_push:
git push --tags origin master
pip_upload:
python setup.py sdist upload ;
run-%: build-%
docker run --rm -p 5000:5000 -ti '$(IMAGEWTAG)-python$(PYMAIN)' --default-plugins
run: run-$(PYMAIN)
push-latest: build-$(PYMAIN)
docker tag '$(IMAGEWTAG)-python$(PYMAIN)' '$(IMAGEWTAG)'
docker tag '$(IMAGEWTAG)-python$(PYMAIN)' '$(IMAGENAME)'
docker push '$(IMAGENAME):latest'
docker push '$(IMAGEWTAG)'
push-%: build-%
docker push $(IMAGENAME):$(VERSION)-python$*
push: $(addprefix push-,$(PYVERSIONS))
docker tag '$(IMAGEWTAG)-python$(PYMAIN)' '$(IMAGEWTAG)'
docker push $(IMAGENAME):$(VERSION)
ci:
gitlab-runner exec docker --docker-volumes /var/run/docker.sock:/var/run/docker.sock --env CI_PROJECT_NAME=$(NAME) ${action}
.PHONY: test test-% test-all build-% build test pip_test run yapf push-main push-% dev ci version .FORCE

View File

@@ -1,5 +1,5 @@
.. image:: img/header.png .. image:: img/header.png
:width: 100% :height: 6em
:target: http://demos.gsi.dit.upm.es/senpy :target: http://demos.gsi.dit.upm.es/senpy
.. image:: https://travis-ci.org/gsi-upm/senpy.svg?branch=master .. image:: https://travis-ci.org/gsi-upm/senpy.svg?branch=master
@@ -23,7 +23,7 @@ Through PIP
.. code:: bash .. code:: bash
pip install -U --user senpy pip install --user senpy
Alternatively, you can use the development version: Alternatively, you can use the development version:
@@ -42,53 +42,6 @@ Build the image or use the pre-built one: ``docker run -ti -p 5000:5000 gsiupm/s
To add custom plugins, add a volume and tell senpy where to find the plugins: ``docker run -ti -p 5000:5000 -v <PATH OF PLUGINS>:/plugins gsiupm/senpy --default-plugins -f /plugins`` To add custom plugins, add a volume and tell senpy where to find the plugins: ``docker run -ti -p 5000:5000 -v <PATH OF PLUGINS>:/plugins gsiupm/senpy --default-plugins -f /plugins``
Developing
----------
Developing/debugging
********************
This command will run the senpy container using the latest image available, mounting your current folder so you get your latest code:
.. code:: bash
# Python 3.5
make dev
# Python 2.7
make dev-2.7
Building a docker image
***********************
.. code:: bash
# Python 3.5
make build-3.5
# Python 2.7
make build-2.7
Testing
*******
.. code:: bash
make test
Running
*******
This command will run the senpy server listening on localhost:5000
.. code:: bash
# Python 3.5
make run-3.5
# Python 2.7
make run-2.7
Usage Usage
----- -----
@@ -96,14 +49,12 @@ However, the easiest and recommended way is to just use the command-line tool to
.. code:: bash .. code:: bash
senpy senpy
or, alternatively: or, alternatively:
.. code:: bash .. code:: bash
python -m senpy python -m senpy

View File

@@ -1,10 +0,0 @@
version: '3'
services:
senpy:
image: "${IMAGENAME-gsiupm/senpy}:${VERSION-latest}"
entrypoint: ["/bin/bash"]
working_dir: "/senpy-plugins"
ports:
- 5000:5000
volumes:
- ".:/usr/src/app/"

View File

@@ -1,9 +0,0 @@
version: '3'
services:
test:
image: "${IMAGENAME-gsiupm/senpy}:${VERSION-dev}"
entrypoint: ["py.test"]
volumes:
- ".:/usr/src/app/"
command:
[]

View File

@@ -1,11 +0,0 @@
version: '3'
services:
senpy:
image: "${IMAGENAME-gsiupm/senpy}:${VERSION-dev}"
build:
context: .
dockerfile: Dockerfile${PYVERSION--2.7}
ports:
- 5001:5000
volumes:
- "./data:/data"

View File

@@ -1,317 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:05:31.465571Z",
"start_time": "2017-04-10T19:05:31.458282+02:00"
},
"deletable": true,
"editable": true
},
"source": [
"# Client"
]
},
{
"cell_type": "markdown",
"metadata": {
"collapsed": true,
"deletable": true,
"editable": true
},
"source": [
"The built-in senpy client allows you to query any Senpy endpoint. We will illustrate how to use it with the public demo endpoint, and then show you how to spin up your own endpoint using docker."
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"Demo Endpoint\n",
"-------------"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"To start using senpy, simply create a new Client and point it to your endpoint. In this case, the latest version of Senpy at GSI."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:29:12.827640Z",
"start_time": "2017-04-10T19:29:12.818617+02:00"
},
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"from senpy.client import Client\n",
"\n",
"c = Client('http://latest.senpy.cluster.gsi.dit.upm.es/api')\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"Now, let's use that client analyse some queries:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:29:14.011657Z",
"start_time": "2017-04-10T19:29:13.701808+02:00"
},
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"r = c.analyse('I like sugar!!', algorithm='sentiment140')\n",
"r"
]
},
{
"cell_type": "markdown",
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:08:19.616754Z",
"start_time": "2017-04-10T19:08:19.610767+02:00"
},
"deletable": true,
"editable": true
},
"source": [
"As you can see, that gave us the full JSON result. A more concise way to print it would be:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:29:14.854213Z",
"start_time": "2017-04-10T19:29:14.842068+02:00"
},
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"for entry in r.entries:\n",
" print('{} -> {}'.format(entry['text'], entry['sentiments'][0]['marl:hasPolarity']))"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"We can also obtain a list of available plugins with the client:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:29:16.245198Z",
"start_time": "2017-04-10T19:29:16.056545+02:00"
},
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"c.plugins()"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"Or, more concisely:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:29:17.663275Z",
"start_time": "2017-04-10T19:29:17.484623+02:00"
},
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"c.plugins().keys()"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"Local Endpoint\n",
"--------------"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"To run your own instance of senpy, just create a docker container with the latest Senpy image. Using `--default-plugins` you will get some extra plugins to start playing with the API."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:29:20.637539Z",
"start_time": "2017-04-10T19:29:19.938322+02:00"
},
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"!docker run -ti --name 'SenpyEndpoint' -d -p 6000:5000 gsiupm/senpy:0.8.6 --host 0.0.0.0 --default-plugins"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"To use this endpoint:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:29:21.263976Z",
"start_time": "2017-04-10T19:29:21.260595+02:00"
},
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"c_local = Client('http://127.0.0.1:6000/api')"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"That's all! After you are done with your analysis, stop the docker container:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:29:33.226686Z",
"start_time": "2017-04-10T19:29:22.392121+02:00"
},
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"!docker stop SenpyEndpoint\n",
"!docker rm SenpyEndpoint"
]
}
],
"metadata": {
"anaconda-cloud": {},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.0"
},
"toc": {
"colors": {
"hover_highlight": "#DAA520",
"running_highlight": "#FF0000",
"selected_highlight": "#FFD700"
},
"moveMenuLeft": true,
"nav_menu": {
"height": "68px",
"width": "252px"
},
"navigate_menu": true,
"number_sections": true,
"sideBar": true,
"threshold": 4,
"toc_cell": false,
"toc_section_display": "block",
"toc_window_display": false
}
},
"nbformat": 4,
"nbformat_minor": 1
}

View File

@@ -1,106 +0,0 @@
Client
======
Demo Endpoint
-------------
Import Client and send a request
.. code:: python
from senpy.client import Client
c = Client('http://latest.senpy.cluster.gsi.dit.upm.es/api')
r = c.analyse('I like Pizza', algorithm='sentiment140')
Print response
.. code:: python
for entry in r.entries:
print('{} -> {}'.format(entry['text'], entry['sentiments'][0]['marl:hasPolarity']))
.. parsed-literal::
I like Pizza -> marl:Positive
Obtain a list of available plugins
.. code:: python
for plugin in c.request('/plugins')['plugins']:
print(plugin['name'])
.. parsed-literal::
emoRand
rand
sentiment140
Local Endpoint
--------------
Run a docker container with Senpy image and default plugins
.. code::
docker run -ti --name 'SenpyEndpoint' -d -p 5000:5000 gsiupm/senpy:0.8.6 --host 0.0.0.0 --default-plugins
.. parsed-literal::
a0157cd98057072388bfebeed78a830da7cf0a796f4f1a3fd9188f9f2e5fe562
Import client and send a request to localhost
.. code:: python
c_local = Client('http://127.0.0.1:5000/api')
r = c_local.analyse('Hello world', algorithm='sentiment140')
Print response
.. code:: python
for entry in r.entries:
print('{} -> {}'.format(entry['text'], entry['sentiments'][0]['marl:hasPolarity']))
.. parsed-literal::
Hello world -> marl:Neutral
Obtain a list of available plugins deployed locally
.. code:: python
c_local.plugins().keys()
.. parsed-literal::
rand
sentiment140
emoRand
Stop the docker container
.. code:: python
!docker stop SenpyEndpoint
!docker rm SenpyEndpoint
.. parsed-literal::
SenpyEndpoint
SenpyEndpoint

Binary file not shown.

Before

Width:  |  Height:  |  Size: 208 KiB

View File

@@ -1,11 +0,0 @@
About
--------
If you use Senpy in your research, please cite `Senpy: A Pragmatic Linked Sentiment Analysis Framework <http://gsi.dit.upm.es/index.php/es/investigacion/publicaciones?view=publication&task=show&id=417>`__ (`BibTex <http://gsi.dit.upm.es/index.php/es/investigacion/publicaciones?controller=publications&task=export&format=bibtex&id=417>`__):
.. code-block:: text
Sánchez-Rada, J. F., Iglesias, C. A., Corcuera, I., & Araque, Ó. (2016, October).
Senpy: A Pragmatic Linked Sentiment Analysis Framework.
In Data Science and Advanced Analytics (DSAA),
2016 IEEE International Conference on (pp. 735-742). IEEE.

View File

@@ -1,5 +1,5 @@
NIF API NIF API
------- =======
.. http:get:: /api .. http:get:: /api
Basic endpoint for sentiment/emotion analysis. Basic endpoint for sentiment/emotion analysis.
@@ -22,32 +22,38 @@ NIF API
Content-Type: text/javascript Content-Type: text/javascript
{ {
"@context":"http://127.0.0.1/api/contexts/Results.jsonld", "@context": [
"@id":"_:Results_11241245.22", "http://127.0.0.1/static/context.jsonld",
"@type":"results" ],
"analysis": [ "analysis": [
"plugins/sentiment-140_0.1" {
], "@id": "SentimentAnalysisExample",
"entries": [ "@type": "marl:SentimentAnalysis",
{ "dc:language": "en",
"@id": "_:Entry_11241245.22" "marl:maxPolarityValue": 10.0,
"@type":"entry", "marl:minPolarityValue": 0.0
"emotions": [], }
"entities": [], ],
"sentiments": [ "domain": "wndomains:electronics",
{ "entries": [
"@id": "Sentiment0", {
"@type": "sentiment", "opinions": [
"marl:hasPolarity": "marl:Negative", {
"marl:polarityValue": 0, "prov:generatedBy": "SentimentAnalysisExample",
"prefix": "" "marl:polarityValue": 7.8,
} "marl:hasPolarity": "marl:Positive",
], "marl:describesObject": "http://www.gsi.dit.upm.es",
"suggestions": [], }
"text": "This text makes me sad.\nwhilst this text makes me happy and surprised at the same time.\nI cannot believe it!", ],
"topics": [] "nif:isString": "I love GSI",
} "strings": [
] {
"nif:anchorOf": "GSI",
"nif:taIdentRef": "http://www.gsi.dit.upm.es"
}
]
}
]
} }
:query i input: No default. Depends on informat and intype :query i input: No default. Depends on informat and intype
@@ -86,59 +92,58 @@ NIF API
.. sourcecode:: http .. sourcecode:: http
{ {
"@id": "plugins/sentiment-140_0.1", "@context": {
"@type": "sentimentPlugin", ...
"author": "@balkian", },
"description": "Sentiment classifier using rule-based classification for English and Spanish. This plugin uses sentiment140 data to perform classification. For more information: http://help.sentiment140.com/for-students/", "@type": "plugins",
"extra_params": { "plugins": [
"language": { {
"@id": "lang_sentiment140", "name": "sentiment140",
"aliases": [ "is_activated": true,
"language", "version": "0.1",
"l" "extra_params": {
], "@id": "extra_params_sentiment140_0.1",
"options": [ "language": {
"es", "required": false,
"en", "@id": "lang_sentiment140",
"auto" "options": [
], "es",
"required": false "en",
} "auto"
}, ],
"is_activated": true, "aliases": [
"maxPolarityValue": 1.0, "language",
"minPolarityValue": 0.0, "l"
"module": "sentiment-140", ]
"name": "sentiment-140", }
"requirements": {}, },
"version": "0.1" "@id": "sentiment140_0.1"
}, }, {
{ "name": "rand",
"@id": "plugins/ExamplePlugin_0.1", "is_activated": true,
"@type": "sentimentPlugin", "version": "0.1",
"author": "@balkian", "extra_params": {
"custom_attribute": "42", "@id": "extra_params_rand_0.1",
"description": "I am just an example", "language": {
"extra_params": { "required": false,
"parameter": { "@id": "lang_rand",
"@id": "parameter", "options": [
"aliases": [ "es",
"parameter", "en",
"param" "auto"
], ],
"default": 42, "aliases": [
"required": true "language",
} "l"
}, ]
"is_activated": true, }
"maxPolarityValue": 1.0, },
"minPolarityValue": 0.0, "@id": "rand_0.1"
"module": "example", }
"name": "ExamplePlugin", ]
"requirements": "noop", }
"version": "0.1"
}
.. http:get:: /api/plugins/<pluginname> .. http:get:: /api/plugins/<pluginname>
@@ -157,60 +162,30 @@ NIF API
.. sourcecode:: http .. sourcecode:: http
{ {
"@context": "http://127.0.0.1/api/contexts/ExamplePlugin.jsonld", "@id": "rand_0.1",
"@id": "plugins/ExamplePlugin_0.1", "@type": "sentimentPlugin",
"@type": "sentimentPlugin", "extra_params": {
"author": "@balkian", "@id": "extra_params_rand_0.1",
"custom_attribute": "42", "language": {
"description": "I am just an example", "@id": "lang_rand",
"extra_params": { "aliases": [
"parameter": { "language",
"@id": "parameter", "l"
"aliases": [ ],
"parameter", "options": [
"param" "es",
], "en",
"default": 42, "auto"
"required": true ],
} "required": false
}, }
"is_activated": true, },
"maxPolarityValue": 1.0, "is_activated": true,
"minPolarityValue": 0.0, "name": "rand",
"module": "example", "version": "0.1"
"name": "ExamplePlugin",
"requirements": "noop",
"version": "0.1"
} }
.. http:get:: /api/plugins/default .. http:get:: /api/plugins/default
Return the information about the default plugin. Return the information about the default plugin.

View File

@@ -1,7 +0,0 @@
API and Examples
################
.. toctree::
vocabularies.rst
api.rst
examples.rst

View File

@@ -1,4 +1,4 @@
{ {
"@type": "plugins", "plugins": [
"plugins": {} ]
} }

View File

@@ -1,77 +0,0 @@
{
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
"@id": "me:Result1",
"@type": "results",
"analysis": [
"me:SAnalysis1",
"me:SgAnalysis1",
"me:EmotionAnalysis1",
"me:NER1",
{
"description": "missing @id and @type"
}
],
"entries": [
{
"@id": "http://micro.blog/status1",
"@type": [
"nif:RFC5147String",
"nif:Context"
],
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
"entities": [
{
"@id": "http://micro.blog/status1#char=5,13",
"nif:beginIndex": 5,
"nif:endIndex": 13,
"nif:anchorOf": "Microsoft",
"me:references": "http://dbpedia.org/page/Microsoft",
"prov:wasGeneratedBy": "me:NER1"
},
{
"@id": "http://micro.blog/status1#char=25,37",
"nif:beginIndex": 25,
"nif:endIndex": 37,
"nif:anchorOf": "Windows Phone",
"me:references": "http://dbpedia.org/page/Windows_Phone",
"prov:wasGeneratedBy": "me:NER1"
}
],
"suggestions": [
{
"@id": "http://micro.blog/status1#char=16,77",
"nif:beginIndex": 16,
"nif:endIndex": 77,
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
"prov:wasGeneratedBy": "me:SgAnalysis1"
}
],
"sentiments": [
{
"@id": "http://micro.blog/status1#char=80,97",
"nif:beginIndex": 80,
"nif:endIndex": 97,
"nif:anchorOf": "You'll be awesome.",
"marl:hasPolarity": "marl:Positive",
"marl:polarityValue": 0.9,
"prov:wasGeneratedBy": "me:SAnalysis1"
}
],
"emotions": [
{
"@id": "http://micro.blog/status1#char=0,109",
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
"prov:wasGeneratedBy": "me:EAnalysis1",
"onyx:hasEmotion": [
{
"onyx:hasEmotionCategory": "wna:liking"
},
{
"onyx:hasEmotionCategory": "wna:excitement"
}
]
}
]
}
]
}

View File

@@ -1,9 +0,0 @@
Command line
============
This video shows how to analyse text directly on the command line using the senpy tool.
.. image:: https://asciinema.org/a/9uwef1ghkjk062cw2t4mhzpyk.png
:width: 100%
:target: https://asciinema.org/a/9uwef1ghkjk062cw2t4mhzpyk
:alt: CLI demo

View File

@@ -37,7 +37,6 @@ extensions = [
'sphinx.ext.todo', 'sphinx.ext.todo',
'sphinxcontrib.httpdomain', 'sphinxcontrib.httpdomain',
'sphinx.ext.coverage', 'sphinx.ext.coverage',
'sphinx.ext.autosectionlabel',
] ]
# Add any paths that contain templates here, relative to this directory. # Add any paths that contain templates here, relative to this directory.
@@ -55,21 +54,20 @@ master_doc = 'index'
# General information about the project. # General information about the project.
project = u'Senpy' project = u'Senpy'
copyright = u'2016, J. Fernando Sánchez' copyright = u'2016, J. Fernando Sánchez'
description = u'A framework for sentiment and emotion analysis services'
# The version info for the project you're documenting, acts as replacement for # The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the # |version| and |release|, also used in various other places throughout the
# built documents. # built documents.
# #
# The short X.Y version. # The short X.Y version.
# with open('../senpy/VERSION') as f: with open('../senpy/VERSION') as f:
# version = f.read().strip() version = f.read().strip()
# The full version, including alpha/beta/rc tags. # The full version, including alpha/beta/rc tags.
# release = version release = version
# The language for content autogenerated by Sphinx. Refer to documentation # The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages. # for a list of supported languages.
language = None #language = None
# There are two options for replacing |today|: either, you set today to some # There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used: # non-false value, then it is used:
@@ -106,14 +104,14 @@ pygments_style = 'sphinx'
#keep_warnings = False #keep_warnings = False
html_theme = 'alabaster'
# -- Options for HTML output ---------------------------------------------- # -- Options for HTML output ----------------------------------------------
# if not on_rtd: # only import and set the theme if we're building docs locally if not on_rtd: # only import and set the theme if we're building docs locally
# import sphinx_rtd_theme import sphinx_rtd_theme
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# else: else:
# html_theme = 'default' html_theme = 'default'
# The theme to use for HTML and HTML Help pages. See the documentation for # The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. # a list of builtin themes.
@@ -121,13 +119,7 @@ html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme # Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the # further. For a list of options available for each theme, see the
# documentation. # documentation.
html_theme_options = { #html_theme_options = {}
'logo': 'header.png',
'github_user': 'gsi-upm',
'github_repo': 'senpy',
'github_banner': True,
}
# Add any paths that contain custom themes here, relative to this directory. # Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = [] #html_theme_path = []
@@ -167,13 +159,7 @@ html_static_path = ['_static']
#html_use_smartypants = True #html_use_smartypants = True
# Custom sidebar templates, maps document names to template names. # Custom sidebar templates, maps document names to template names.
html_sidebars = { #html_sidebars = {}
'**': [
'about.html',
'navigation.html',
'searchbox.html',
]
}
# Additional templates that should be rendered to pages, maps page names to # Additional templates that should be rendered to pages, maps page names to
# template names. # template names.

View File

@@ -1,8 +1,7 @@
Demo Demo
---- ----
There is a demo available on http://senpy.cluster.gsi.dit.upm.es/, where you can test a serie of different plugins. There is a demo available on http://senpy.demos.gsi.dit.upm.es/, where you can a serie of different plugins. You can use them in the playground or make a directly requests to the service.
You can use the playground (a web interface) or make HTTP requests to the service API.
.. image:: senpy-playground.png .. image:: senpy-playground.png
:height: 400px :height: 400px
@@ -13,4 +12,64 @@ You can use the playground (a web interface) or make HTTP requests to the servic
Plugins Demo Plugins Demo
============ ============
The source code and description of the plugins used in the demo is available here: https://lab.cluster.gsi.dit.upm.es/senpy/senpy-plugins-community/. The next plugins are available at the demo:
* emoTextAnew extracts the VAD (valence-arousal-dominance) of a sentence by matching words from the ANEW dictionary.
* emoTextWordnetAffect based on the hierarchy of WordnetAffect to calculate the emotion of the sentence.
* vaderSentiment utilizes the software from vaderSentiment to calculate the sentiment of a sentence.
* sentiText is a software developed during the TASS 2015 competition, it has been adapted for English and Spanish.
emoTextANEW plugin
******************
This plugin is going to used the ANEW lexicon dictionary to calculate de VAD (valence-arousal-dominance) of the sentence and the determinate which emotion is closer to this value.
Each emotion has a centroid, which it has been approximated using the formula described in this article:
http://www.aclweb.org/anthology/W10-0208
The plugin is going to look for the words in the sentence that appear in the ANEW dictionary and calculate the average VAD score for the sentence. Once this score is calculated, it is going to seek the emotion that is closest to this value.
emoTextWAF plugin
*****************
This plugin uses WordNet-Affect (http://wndomains.fbk.eu/wnaffect.html) to calculate the percentage of each emotion. The emotions that are going to be used are: anger, fear, disgust, joy and sadness. It is has been used a emotion mapping enlarge the emotions:
* anger : general-dislike
* fear : negative-fear
* disgust : shame
* joy : gratitude, affective, enthusiasm, love, joy, liking
* sadness : ingrattitude, daze, humlity, compassion, despair, anxiety, sadness
sentiText plugin
****************
This plugin is based in the classifier developed for the TASS 2015 competition. It has been developed for Spanish and English. The different phases that has this plugin when it is activated:
* Train both classifiers (English and Spanish).
* Initialize resources (dictionaries,stopwords,etc.).
* Extract bag of words,lemmas and chars.
Once the plugin is activated, the features that are going to be extracted for the classifiers are:
* Matches with the bag of words extracted from the train corpus.
* Sentiment score of the sentences extracted from the dictionaries (lexicons and emoticons).
* Identify negations and intensifiers in the sentences.
* Complementary features such as exclamation and interrogation marks, eloganted and caps words, hashtags, etc.
The plugin has a preprocessor, which is focues on Twitter corpora, that is going to be used for cleaning the text to simplify the feature extraction.
There is more information avaliable in the next article.
Aspect based Sentiment Analysis of Spanish Tweets, Oscar Araque and Ignacio Corcuera-Platas and Constantino Román-Gómez and Carlos A. Iglesias and J. Fernando Sánchez-Rada. http://gsi.dit.upm.es/es/investigacion/publicaciones?view=publication&task=show&id=37
vaderSentiment plugin
*********************
For developing this plugin, it has been used the module vaderSentiment, which is described in the paper: VADER: A Parsimonious Rule-based Model for Sentiment Analysis of Social Media Text C.J. Hutto and Eric Gilbert Eighth International Conference on Weblogs and Social Media (ICWSM-14). Ann Arbor, MI, June 2014.
If you use this plugin in your research, please cite the above paper
For more information about the functionality, check the official repository
https://github.com/cjhutto/vaderSentiment

View File

@@ -1,78 +0,0 @@
Examples
------
All the examples in this page use the :download:`the main schema <_static/schemas/definitions.json>`.
Simple NIF annotation
.....................
Description
,,,,,,,,,,,
This example covers the basic example in the NIF documentation: `<http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core/nif-core.html>`_.
Representation
,,,,,,,,,,,,,,
.. literalinclude:: examples/results/example-basic.json
:language: json-ld
Sentiment Analysis
.....................
Description
,,,,,,,,,,,
This annotation corresponds to the sentiment analysis of an input. The example shows the sentiment represented according to Marl format.
The sentiments detected are contained in the Sentiments array with their related part of the text.
Representation
,,,,,,,,,,,,,,
.. literalinclude:: examples/results/example-sentiment.json
:emphasize-lines: 5-10,25-33
:language: json-ld
Suggestion Mining
.................
Description
,,,,,,,,,,,
The suggestions schema represented below shows the suggestions detected in the text. Within it, we can find the NIF fields highlighted that corresponds to the text of the detected suggestion.
Representation
,,,,,,,,,,,,,,
.. literalinclude:: examples/results/example-suggestion.json
:emphasize-lines: 5-8,22-27
:language: json-ld
Emotion Analysis
................
Description
,,,,,,,,,,,
This annotation represents the emotion analysis of an input to Senpy. The emotions are contained in the emotions section with the text that refers to following Onyx format and the emotion model defined beforehand.
Representation
,,,,,,,,,,,,,,
.. literalinclude:: examples/results/example-emotion.json
:language: json-ld
:emphasize-lines: 5-8,25-37
Named Entity Recognition
........................
Description
,,,,,,,,,,,
The Named Entity Recognition is represented as follows. In this particular case, it can be seen within the entities array the entities recognised. For the example input, Microsoft and Windows Phone are the ones detected.
Representation
,,,,,,,,,,,,,,
.. literalinclude:: examples/results/example-ner.json
:emphasize-lines: 5-8,19-34
:language: json-ld
Complete example
................
Description
,,,,,,,,,,,
This example covers all of the above cases, integrating all the annotations in the same document.
Representation
,,,,,,,,,,,,,,
.. literalinclude:: examples/results/example-complete.json
:language: json-ld

View File

@@ -1,74 +0,0 @@
{
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
"@id": "me:Result1",
"@type": "results",
"analysis": [
"me:SAnalysis1",
"me:SgAnalysis1",
"me:EmotionAnalysis1",
"me:NER1"
],
"entries": [
{
"@id": "http://micro.blog/status1",
"@type": [
"nif:RFC5147String",
"nif:Context"
],
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
"entities": [
{
"@id": "http://micro.blog/status1#char=5,13",
"nif:beginIndex": 5,
"nif:endIndex": 13,
"nif:anchorOf": "Microsoft",
"me:references": "http://dbpedia.org/page/Microsoft",
"prov:wasGeneratedBy": "me:NER1"
},
{
"@id": "http://micro.blog/status1#char=25,37",
"nif:beginIndex": 25,
"nif:endIndex": 37,
"nif:anchorOf": "Windows Phone",
"me:references": "http://dbpedia.org/page/Windows_Phone",
"prov:wasGeneratedBy": "me:NER1"
}
],
"suggestions": [
{
"@id": "http://micro.blog/status1#char=16,77",
"nif:beginIndex": 16,
"nif:endIndex": 77,
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
"prov:wasGeneratedBy": "me:SgAnalysis1"
}
],
"sentiments": [
{
"@id": "http://micro.blog/status1#char=80,97",
"nif:beginIndex": 80,
"nif:endIndex": 97,
"nif:anchorOf": "You'll be awesome.",
"marl:hasPolarity": "marl:Positive",
"marl:polarityValue": 0.9,
"prov:wasGeneratedBy": "me:SAnalysis1"
}
],
"emotions": [
{
"@id": "http://micro.blog/status1#char=0,109",
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
"prov:wasGeneratedBy": "me:EAnalysis1",
"onyx:hasEmotion": [
{
"onyx:hasEmotionCategory": "wna:liking"
},
{
"onyx:hasEmotionCategory": "wna:excitement"
}
]
}
]
}
]
}

View File

@@ -1,78 +0,0 @@
{
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
"@id": "me:Result1",
"@type": "results",
"analysis": [
"me:SAnalysis1",
"me:SgAnalysis1",
"me:EmotionAnalysis1",
"me:NER1",
{
"@type": "analysis",
"@id": "anonymous"
}
],
"entries": [
{
"@id": "http://micro.blog/status1",
"@type": [
"nif:RFC5147String",
"nif:Context"
],
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
"entities": [
{
"@id": "http://micro.blog/status1#char=5,13",
"nif:beginIndex": 5,
"nif:endIndex": 13,
"nif:anchorOf": "Microsoft",
"me:references": "http://dbpedia.org/page/Microsoft",
"prov:wasGeneratedBy": "me:NER1"
},
{
"@id": "http://micro.blog/status1#char=25,37",
"nif:beginIndex": 25,
"nif:endIndex": 37,
"nif:anchorOf": "Windows Phone",
"me:references": "http://dbpedia.org/page/Windows_Phone",
"prov:wasGeneratedBy": "me:NER1"
}
],
"suggestions": [
{
"@id": "http://micro.blog/status1#char=16,77",
"nif:beginIndex": 16,
"nif:endIndex": 77,
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
"prov:wasGeneratedBy": "me:SgAnalysis1"
}
],
"sentiments": [
{
"@id": "http://micro.blog/status1#char=80,97",
"nif:beginIndex": 80,
"nif:endIndex": 97,
"nif:anchorOf": "You'll be awesome.",
"marl:hasPolarity": "marl:Positive",
"marl:polarityValue": 0.9,
"prov:wasGeneratedBy": "me:SAnalysis1"
}
],
"emotions": [
{
"@id": "http://micro.blog/status1#char=0,109",
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
"prov:wasGeneratedBy": "me:EAnalysis1",
"onyx:hasEmotion": [
{
"onyx:hasEmotionCategory": "wna:liking"
},
{
"onyx:hasEmotionCategory": "wna:excitement"
}
]
}
]
}
]
}

View File

@@ -3,7 +3,10 @@
"@id": "me:Result1", "@id": "me:Result1",
"@type": "results", "@type": "results",
"analysis": [ "analysis": [
"me:SgAnalysis1" {
"@id": "me:SgAnalysis1",
"@type": "me:SuggestionAnalysis"
}
], ],
"entries": [ "entries": [
{ {

View File

@@ -1,35 +1,15 @@
Welcome to Senpy's documentation! Welcome to Senpy's documentation!
================================= =================================
.. image:: https://readthedocs.org/projects/senpy/badge/?version=latest
:target: http://senpy.readthedocs.io/en/latest/
.. image:: https://badge.fury.io/py/senpy.svg
:target: https://badge.fury.io/py/senpy
.. image:: https://lab.cluster.gsi.dit.upm.es/senpy/senpy/badges/master/build.svg
:target: https://lab.cluster.gsi.dit.upm.es/senpy/senpy/commits/master
.. image:: https://lab.cluster.gsi.dit.upm.es/senpy/senpy/badges/master/coverage.svg
:target: https://lab.cluster.gsi.dit.upm.es/senpy/senpy/commits/master
.. image:: https://img.shields.io/pypi/l/requests.svg
:target: https://lab.cluster.gsi.dit.upm.es/senpy/senpy/
Contents:
Senpy is a framework for sentiment and emotion analysis services.
Services built with senpy are interchangeable and easy to use because they share a common :doc:`apischema`.
It also simplifies service development.
.. image:: senpy-architecture.png
:width: 100%
:align: center
.. toctree:: .. toctree::
:caption: Learn more about senpy:
:maxdepth: 2
senpy senpy
installation installation
demo
usage usage
apischema api
schema
plugins plugins
conversion conversion
about demo
:maxdepth: 2

View File

@@ -1,16 +1,6 @@
Installation Installation
------------ ------------
The stable version can be used in two ways: as a system/user library through pip, or as a docker image. The stable version can be installed in three ways.
The docker image is the recommended way because it is self-contained and isolated from the system, which means:
* Downloading and using it is just one command
* All dependencies are included
* It is OS-independent (MacOS, Windows, GNU/Linux)
* Several versions may coexist in the same machine without additional virtual environments
Additionally, you may create your own docker image with your custom plugins, ready to be used by others.
Through PIP Through PIP
*********** ***********
@@ -32,41 +22,6 @@ If you want to install senpy globally, use sudo instead of the ``--user`` flag.
Docker Image Docker Image
************ ************
Build the image or use the pre-built one: Build the image or use the pre-built one: ``docker run -ti -p 5000:5000 gsiupm/senpy --host 0.0.0.0 --default-plugins``.
.. code:: bash To add custom plugins, add a volume and tell senpy where to find the plugins: ``docker run -ti -p 5000:5000 -v <PATH OF PLUGINS>:/plugins gsiupm/senpy --host 0.0.0.0 --default-plugins -f /plugins``
docker run -ti -p 5000:5000 gsiupm/senpy --host 0.0.0.0 --default-plugins
To add custom plugins, use a docker volume:
.. code:: bash
docker run -ti -p 5000:5000 -v <PATH OF PLUGINS>:/plugins gsiupm/senpy --host 0.0.0.0 --default-plugins -f /plugins
Python 2
........
There is a Senpy version for python2 too:
.. code:: bash
docker run -ti -p 5000:5000 gsiupm/senpy:python2.7 --host 0.0.0.0 --default-plugins
Alias
.....
If you are using the docker approach regularly, it is advisable to use a script or an alias to simplify your executions:
.. code:: bash
alias senpy='docker run --rm -ti -p 5000:5000 -v $PWD:/senpy-plugins gsiupm/senpy --default-plugins'
Now, you may run senpy from any folder in your computer like so:
.. code:: bash
senpy --version

View File

@@ -1,113 +0,0 @@
Advanced plugin definition
--------------------------
In addition to finding plugins defined in source code files, senpy can also load a special type of definition file (`.senpy` files).
This used to be the only mechanism for loading in earlier versions of senpy.
The definition file contains basic information
Lastly, it is also possible to add new plugins programmatically.
.. contents:: :local:
What is a plugin?
=================
A plugin is a program that, given a text, will add annotations to it.
In practice, a plugin consists of at least two files:
- Definition file: a `.senpy` file that describes the plugin (e.g. what input parameters it accepts, what emotion model it uses).
- Python module: the actual code that will add annotations to each input.
This separation allows us to deploy plugins that use the same code but employ different parameters.
For instance, one could use the same classifier and processing in several plugins, but train with different datasets.
This scenario is particularly useful for evaluation purposes.
The only limitation is that the name of each plugin needs to be unique.
Definition files
================
The definition file complements and overrides the attributes provided by the plugin.
It can be written in YAML or JSON.
The most important attributes are:
* **name**: unique name that senpy will use internally to identify the plugin.
* **module**: indicates the module that contains the plugin code, which will be automatically loaded by senpy.
* **version**
* extra_params: to add parameters to the senpy API when this plugin is requested. Those parameters may be required, and have aliased names. For instance:
.. code:: yaml
extra_params:
hello_param:
aliases: # required
- hello_param
- hello
required: true
default: Hi you
values:
- Hi you
- Hello y'all
- Howdy
A complete example:
.. code:: yaml
name: <Name of the plugin>
module: <Python file>
version: 0.1
And the json equivalent:
.. code:: json
{
"name": "<Name of the plugin>",
"module": "<Python file>",
"version": "0.1"
}
Example plugin with a definition file
=====================================
In this section, we will implement a basic sentiment analysis plugin.
To determine the polarity of each entry, the plugin will compare the length of the string to a threshold.
This threshold will be included in the definition file.
The definition file would look like this:
.. code:: yaml
name: helloworld
module: helloworld
version: 0.0
threshold: 10
description: Hello World
Now, in a file named ``helloworld.py``:
.. code:: python
#!/bin/env python
#helloworld.py
from senpy import AnalysisPlugin
from senpy import Sentiment
class HelloWorld(AnalysisPlugin):
def analyse_entry(entry, params):
'''Basically do nothing with each entry'''
sentiment = Sentiment()
if len(entry.text) < self.threshold:
sentiment['marl:hasPolarity'] = 'marl:Positive'
else:
sentiment['marl:hasPolarity'] = 'marl:Negative'
entry.sentiments.append(sentiment)
yield entry
The complete code of the example plugin is available `here <https://lab.cluster.gsi.dit.upm.es/senpy/plugin-prueba>`__.

View File

@@ -1,83 +1,63 @@
Developing new plugins Developing new plugins
---------------------- ----------------------
This document contains the minimum to get you started with developing new analysis plugin. This document describes how to develop a new analysis plugin. For an example of conversion plugins, see :doc:`conversion`.
For an example of conversion plugins, see :doc:`conversion`.
For a description of definition files, see :doc:`plugins-definition`.
A more step-by-step tutorial with slides is available `here <https://lab.cluster.gsi.dit.upm.es/senpy/senpy-tutorial>`__ Each plugin represents a different analysis process.There are two types of files that are needed by senpy for loading a plugin:
.. contents:: :local: - Definition file, has the ".senpy" extension.
- Code file, is a python file.
What is a plugin? This separation will allow us to deploy plugins that use the same code but employ different parameters.
================= For instance, one could use the same classifier and processing in several plugins, but train with different datasets.
This scenario is particularly useful for evaluation purposes.
A plugin is a python object that can process entries. Given an entry, it will modify it, add annotations to it, or generate new entries. The only limitation is that the name of each plugin needs to be unique.
Plugins Definitions
===================
The definition file contains all the attributes of the plugin, and can be written in YAML or JSON.
The most important attributes are:
* **name**: unique name that senpy will use internally to identify the plugin.
* **module**: indicates the module that contains the plugin code, which will be automatically loaded by senpy.
* **version**
* extra_params: used to specify parameters that the plugin accepts that are not already part of the senpy API. Those parameters may be required, and have aliased names. For instance:
.. code:: yaml
extra_params:
hello_param:
aliases: # required
- hello_param
- hello
required: true
default: Hi you
values:
- Hi you
- Hello y'all
- Howdy
Parameter validation will fail if a required parameter without a default has not been provided, or if the definition includes a set of values and the provided one does not match one of them.
What is an entry? A complete example:
=================
Entries are objects that can be annotated. .. code:: yaml
In general, they will be a piece of text.
By default, entries are `NIF contexts <http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core/nif-core.html>`_ represented in JSON-LD format. name: <Name of the plugin>
It is a dictionary/JSON object that looks like this: module: <Python file>
version: 0.1
.. code:: python And the json equivalent:
{ .. code:: json
"@id": "<unique identifier or blank node name>",
"nif:isString": "input text",
"sentiments": [ {
...
}
],
...
}
Annotations are added to the object like this: {
"name": "<Name of the plugin>",
.. code:: python "module": "<Python file>",
"version": "0.1"
entry = Entry() }
entry.vocabulary__annotationName = 'myvalue'
entry['vocabulary:annotationName'] = 'myvalue'
entry['annotationNameURI'] = 'myvalue'
Where vocabulary is one of the prefixes defined in the default senpy context, and annotationURI is a full URI.
The value may be any valid JSON-LD dictionary.
For simplicity, senpy includes a series of models by default in the ``senpy.models`` module.
What are annotations?
=====================
They are objects just like entries.
Senpy ships with several default annotations, including: ``Sentiment``, ``Emotion``, ``EmotionSet``...jk bb
What's a plugin made of?
========================
When receiving a query, senpy selects what plugin or plugins should process each entry, and in what order.
It also makes sure the every entry and the parameters provided by the user meet the plugin requirements.
Hence, two parts are necessary: 1) the code that will process the entry, and 2) some attributes and metadata that will tell senpy how to interact with the plugin.
In practice, this is what a plugin looks like, tests included:
.. literalinclude:: ../senpy/plugins/example/rand_plugin.py
:emphasize-lines: 5-11
:language: python
The lines highlighted contain some information about the plugin.
In particular, the following information is mandatory:
* A unique name for the class. In our example, Rand.
* The subclass/type of plugin. This is typically either `SentimentPlugin` or `EmotionPlugin`. However, new types of plugin can be created for different annotations. The only requirement is that these new types inherit from `senpy.Analysis`
* A description of the plugin. This can be done simply by adding a doc to the class.
* A version, which should get updated.
* An author name.
Plugins Code Plugins Code
@@ -85,140 +65,64 @@ Plugins Code
The basic methods in a plugin are: The basic methods in a plugin are:
* analyse_entry: called in every user requests. It takes two parameters: ``Entry``, the entry object, and ``params``, the parameters supplied by the user. It should yield one or more ``Entry`` objects. * __init__
* activate: used to load memory-hungry resources. For instance, to train a classifier. * activate: used to load memory-hungry resources
* deactivate: used to free up resources when the plugin is no longer needed. * deactivate: used to free up resources
* analyse_entry: called in every user requests. It takes in the parameters supplied by a user and should yield one or more ``Entry`` objects.
Plugins are loaded asynchronously, so don't worry if the activate method takes too long. The plugin will be marked as activated once it is finished executing the method. Plugins are loaded asynchronously, so don't worry if the activate method takes too long. The plugin will be marked as activated once it is finished executing the method.
How does senpy find modules? Example plugin
============================ ==============
Senpy looks for files of two types: In this section, we will implement a basic sentiment analysis plugin.
To determine the polarity of each entry, the plugin will compare the length of the string to a threshold.
This threshold will be included in the definition file.
* Python files of the form `senpy_<NAME>.py` or `<NAME>_plugin.py`. In these files, it will look for: 1) Instances that inherit from `senpy.Plugin`, or subclasses of `senpy.Plugin` that can be initialized without a configuration file. i.e. classes that contain all the required attributes for a plugin. The definition file would look like this:
* Plugin definition files (see :doc:`advanced-plugins`)
Defining additional parameters .. code:: yaml
==============================
Your plugin may ask for additional parameters from the users of the service by using the attribute ``extra_params`` in your plugin definition. name: helloworld
It takes a dictionary, where the keys are the name of the argument/parameter, and the value has the following fields: module: helloworld
version: 0.0
threshold: 10
* aliases: the different names which can be used in the request to use the parameter.
* required: if set to true, users need to provide this parameter unless a default is set. Now, in a file named ``helloworld.py``:
* options: the different acceptable values of the parameter (i.e. an enum). If set, the value provided must match one of the options.
* default: the default value of the parameter, if none is provided in the request.
.. code:: python .. code:: python
"extra_params":{ #!/bin/env python
"language": { #helloworld.py
"aliases": ["language", "lang", "l"],
"required": True, from senpy.plugins import SenpyPlugin
"options": ["es", "en"], from senpy.models import Sentiment
"default": "es"
}
}
class HelloWorld(SenpyPlugin):
Loading data and files def analyse_entry(entry, params):
====================== '''Basically do nothing with each entry'''
Most plugins will need access to files (dictionaries, lexicons, etc.). sentiment = Sentiment()
These files are usually heavy or under a license that does not allow redistribution. if len(entry.text) < self.threshold:
For this reason, senpy has a `data_folder` that is separated from the source files. sentiment['marl:hasPolarity'] = 'marl:Positive'
The location of this folder is controlled programmatically or by setting the `SENPY_DATA` environment variable. else:
sentiment['marl:hasPolarity'] = 'marl:Negative'
entry.sentiments.append(sentiment)
yield entry
Plugins have a convenience function `self.open` which will automatically prepend the data folder to relative paths:
.. code:: python
import os
class PluginWithResources(AnalysisPlugin):
file_in_data = <FILE PATH>
file_in_sources = <FILE PATH>
def activate(self):
with self.open(self.file_in_data) as f:
self._classifier = train_from_file(f)
file_in_source = os.path.join(self.get_folder(), self.file_in_sources)
with self.open(file_in_source) as f:
pass
It is good practice to specify the paths of these files in the plugin configuration, so the same code can be reused with different resources.
Docker image
============
Add the following dockerfile to your project to generate a docker image with your plugin:
.. code:: dockerfile
FROM gsiupm/senpy
Once you make sure your plugin works with a specific version of senpy, modify that file to make sure your build will work even if senpy gets updated.
e.g.:
.. code:: dockerfile
FROM gsiupm/senpy:1.0.1
This will copy your source folder to the image, and install all dependencies.
Now, to build an image:
.. code:: shell
docker build . -t gsiupm/exampleplugin
And you can run it with:
.. code:: shell
docker run -p 5000:5000 gsiupm/exampleplugin
If the plugin uses non-source files (:ref:`loading data and files`), the recommended way is to use `SENPY_DATA` folder.
Data can then be mounted in the container or added to the image.
The former is recommended for open source plugins with licensed resources, whereas the latter is the most convenient and can be used for private images.
Mounting data:
.. code:: bash
docker run -v $PWD/data:/data gsiupm/exampleplugin
Adding data to the image:
.. code:: dockerfile
FROM gsiupm/senpy:1.0.1
COPY data /
F.A.Q. F.A.Q.
====== ======
What annotations can I use?
???????????????????????????
You can add almost any annotation to an entry.
The most common use cases are covered in the :doc:`apischema`.
Why does the analyse function yield instead of return? Why does the analyse function yield instead of return?
?????????????????????????????????????????????????????? ??????????????????????????????????????????????????????
This is so that plugins may add new entries to the response or filter some of them. This is so that plugins may add new entries to the response or filter some of them.
For instance, a chunker may split one entry into several. For instance, a `context detection` plugin may add a new entry for each context in the original entry.
On the other hand, a conversion plugin may leave out those entries that do not contain relevant information. On the other hand, a conveersion plugin may leave out those entries that do not contain relevant information.
If I'm using a classifier, where should I train it? If I'm using a classifier, where should I train it?
@@ -228,9 +132,9 @@ Training a classifier can be time time consuming. To avoid running the training
.. code:: python .. code:: python
from senpy.plugins import ShelfMixin, AnalysisPlugin from senpy.plugins import ShelfMixin, SenpyPlugin
class MyPlugin(ShelfMixin, AnalysisPlugin): class MyPlugin(ShelfMixin, SenpyPlugin):
def train(self): def train(self):
''' Code to train the classifier ''' Code to train the classifier
''' '''
@@ -247,18 +151,12 @@ Training a classifier can be time time consuming. To avoid running the training
def deactivate(self): def deactivate(self):
self.close() self.close()
You can speficy a 'shelf_file' in your .senpy file. By default the ShelfMixin creates a file based on the plugin name and stores it in that plugin's folder.
By default the ShelfMixin creates a file based on the plugin name and stores it in that plugin's folder. I want to implement my service as a plugin, How i can do it?
However, you can manually specify a 'shelf_file' in your .senpy file. ????????????????????????????????????????????????????????????
Shelves may get corrupted if the plugin exists unexpectedly. This example ilustrate how to implement the Sentiment140 service as a plugin in senpy
A corrupt shelf prevents the plugin from loading.
If you do not care about the data in the shelf, you can force your plugin to remove the corrupted file and load anyway, set the 'force_shelf' to True in your plugin and start it again.
How can I turn an external service into a plugin?
?????????????????????????????????????????????????
This example ilustrate how to implement a plugin that accesses the Sentiment140 service.
.. code:: python .. code:: python
@@ -287,11 +185,46 @@ This example ilustrate how to implement a plugin that accesses the Sentiment140
prefix=p, prefix=p,
marl__hasPolarity=polarity, marl__hasPolarity=polarity,
marl__polarityValue=polarity_value) marl__polarityValue=polarity_value)
sentiment.prov(self) sentiment.prov__wasGeneratedBy = self.id
entry.sentiments.append(sentiment) entry.sentiments.append(sentiment)
yield entry yield entry
Where can I define extra parameters to be introduced in the request to my plugin?
?????????????????????????????????????????????????????????????????????????????????
You can add these parameters in the definition file under the attribute "extra_params" : "{param_name}". The name of the parameter has new attributes-value pairs. The basic attributes are:
* aliases: the different names which can be used in the request to use the parameter.
* required: this option is a boolean and indicates if the parameters is binding in operation plugin.
* options: the different values of the paremeter.
* default: the default value of the parameter, this is useful in case the paremeter is required and you want to have a default value.
.. code:: python
"extra_params": {
"language": {
"aliases": ["language", "l"],
"required": true,
"options": ["es","en"],
"default": "es"
}
}
This example shows how to introduce a parameter associated with language.
The extraction of this paremeter is used in the analyse method of the Plugin interface.
.. code:: python
lang = params.get("language")
Where can I set up variables for using them in my plugin?
?????????????????????????????????????????????????????????
You can add these variables in the definition file with the structure of attribute-value pairs.
Every field added to the definition file is available to the plugin instance.
Can I activate a DEBUG mode for my plugin? Can I activate a DEBUG mode for my plugin?
??????????????????????????????????????????? ???????????????????????????????????????????
@@ -306,7 +239,8 @@ Additionally, with the ``--pdb`` option you will be dropped into a pdb post mort
.. code:: bash .. code:: bash
python -m pdb yourplugin.py senpy --pdb
Where can I find more code examples? Where can I find more code examples?
???????????????????????????????????? ????????????????????????????????????

View File

@@ -1,2 +1 @@
sphinxcontrib-httpdomain>=1.4 sphinxcontrib-httpdomain>=1.4
nbsphinx

74
docs/schema.rst Normal file
View File

@@ -0,0 +1,74 @@
Schema Examples
===============
All the examples in this page use the :download:`the main schema <_static/schemas/definitions.json>`.
Simple NIF annotation
---------------------
Description
...........
This example covers the basic example in the NIF documentation: `<http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core/nif-core.html>`_.
Representation
..............
.. literalinclude:: examples/example-basic.json
:language: json-ld
Sentiment Analysis
---------------------
Description
...........
Representation
..............
.. literalinclude:: examples/example-sentiment.json
:emphasize-lines: 5-10,25-33
:language: json-ld
Suggestion Mining
-----------------
Description
...........
Representation
..............
.. literalinclude:: examples/example-suggestion.json
:emphasize-lines: 5-8,22-27
:language: json-ld
Emotion Analysis
----------------
Description
...........
Representation
..............
.. literalinclude:: examples/example-emotion.json
:language: json-ld
:emphasize-lines: 5-8,25-37
Named Entity Recognition
------------------------
Description
...........
Representation
..............
.. literalinclude:: examples/example-ner.json
:emphasize-lines: 5-8,19-34
:language: json-ld
Complete example
----------------
Description
...........
This example covers all of the above cases, integrating all the annotations in the same document.
Representation
..............
.. literalinclude:: examples/example-complete.json
:language: json-ld

Binary file not shown.

Before

Width:  |  Height:  |  Size: 122 KiB

After

Width:  |  Height:  |  Size: 65 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 49 KiB

After

Width:  |  Height:  |  Size: 79 KiB

View File

@@ -1,41 +1,20 @@
What is Senpy? What is Senpy?
-------------- --------------
Senpy is a framework for text analysis using Linked Data. There are three main applications of Senpy so far: sentiment and emotion analysis, user profiling and entity recoginition. Annotations and Services are compliant with NIF (NLP Interchange Format). Senpy is an open source reference implementation of a linked data model for sentiment and emotion analysis services based on the vocabularies NIF, Marl and Onyx.
Senpy aims at providing a framework where analysis modules can be integrated easily as plugins, and providing a core functionality for managing tasks such as data validation, user interaction, formatting, logging, translation to linked data, etc. The overall goal of the reference implementation Senpy is easing the adoption of the proposed linked data model for sentiment and emotion analysis services, so that services from different providers become interoperable. With this aim, the design of the reference implementation has focused on its extensibility and reusability.
The figure below summarizes the typical features in a text analysis service. A modular approach allows organizations to replace individual components with custom ones developed in-house. Furthermore, organizations can benefit from reusing prepackages modules that provide advanced functionalities, such as algorithms for sentiment and emotion analysis, linked data publication or emotion and sentiment mapping between different providers.
Senpy implements all the common blocks, so developers can focus on what really matters: great analysis algorithms that solve real problems.
.. image:: senpy-framework.png Specifications
:width: 60% ==============
:align: center
The model used in Senpy is based on the following specifications:
Senpy for end users * Marl, a vocabulary designed to annotate and describe subjetive opinions expressed on the web or in information systems.
=================== * Onyx, which is built one the same principles as Marl to annotate and describe emotions, and provides interoperability with Emotion Markup Language.
* NIF 2.0, which defines a semantic format and APO for improving interoperability among natural language processing services
All services built using senpy share a common interface.
This allows users to use them (almost) interchangeably.
Senpy comes with a :ref:`built-in client`.
Senpy for service developers
============================
Senpy is a framework that turns your sentiment or emotion analysis algorithm into a full blown semantic service.
Senpy takes care of:
* Interfacing with the user: parameter validation, error handling.
* Formatting: JSON-LD, Turtle/n-triples input and output, or simple text input
* Linked Data: senpy results are semantically annotated, using a series of well established vocabularies, and sane default URIs.
* User interface: a web UI where users can explore your service and test different settings
* A client to interact with the service. Currently only available in Python.
Sharing your sentiment analysis with the world has never been easier!
Check out the :doc:`plugins` if you have developed an analysis algorithm (e.g. sentiment analysis) and you want to publish it as a service.
Architecture Architecture
============ ============
@@ -50,5 +29,7 @@ Senpy proposes a modular and dynamic architecture that allows:
The framework consists of two main modules: Senpy core, which is the building block of the service, and Senpy plugins, which consist of the analysis algorithm. The next figure depicts a simplified version of the processes involved in an analysis with the Senpy framework. The framework consists of two main modules: Senpy core, which is the building block of the service, and Senpy plugins, which consist of the analysis algorithm. The next figure depicts a simplified version of the processes involved in an analysis with the Senpy framework.
.. image:: senpy-architecture.png .. image:: senpy-architecture.png
:width: 100% :height: 400px
:width: 800px
:scale: 100 %
:align: center :align: center

View File

@@ -1,66 +0,0 @@
Server
======
The senpy server is launched via the `senpy` command:
.. code:: text
usage: senpy [-h] [--level logging_level] [--debug] [--default-plugins]
[--host HOST] [--port PORT] [--plugins-folder PLUGINS_FOLDER]
[--only-install] [--only-list] [--data-folder DATA_FOLDER]
[--threaded] [--version]
Run a Senpy server
optional arguments:
-h, --help show this help message and exit
--level logging_level, -l logging_level
Logging level
--debug, -d Run the application in debug mode
--default-plugins Load the default plugins
--host HOST Use 0.0.0.0 to accept requests from any host.
--port PORT, -p PORT Port to listen on.
--plugins-folder PLUGINS_FOLDER, -f PLUGINS_FOLDER
Where to look for plugins.
--only-install, -i Do not run a server, only install plugin dependencies
--only-list, --list Do not run a server, only list plugins found
--data-folder DATA_FOLDER, --data DATA_FOLDER
Where to look for data. It be set with the SENPY_DATA
environment variable as well.
--threaded Run a threaded server
--version, -v Output the senpy version and exit
When launched, the server will recursively look for plugins in the specified plugins folder (the current working directory by default).
For every plugin found, it will download its dependencies, and try to activate it.
The default server includes a playground and an endpoint with all plugins found.
Let's run senpy with the default plugins:
.. code:: bash
senpy -f . --default-plugins
Now go to `http://localhost:5000 <http://localhost:5000>`_, you should be greeted by the senpy playground:
.. image:: senpy-playground.png
:width: 100%
:alt: Playground
The playground is a user-friendly way to test your plugins, but you can always use the service directly: `http://localhost:5000/api?input=hello <http://localhost:5000/api?input=hello>`_.
By default, senpy will listen only on the `127.0.0.1` address.
That means you can only access the API from your (or localhost).
You can listen on a different address using the `--host` flag (e.g., 0.0.0.0).
The default port is 5000.
You can change it with the `--port` flag.
For instance, to accept connections on port 6000 on any interface:
.. code:: bash
senpy --host 0.0.0.0 --port 6000
For more options, see the `--help` page.

View File

@@ -1,15 +1,80 @@
Usage Usage
----- -----
First of all, you need to install the package. The easiest and recommended way is to just use the command-line tool to load your plugins and launch the server.
See :doc:`installation` for instructions.
Once installed, the `senpy` command should be available.
.. toctree:: .. code:: bash
:maxdepth: 1
server senpy
SenpyClientUse
commandline Or, alternatively:
.. code:: bash
python -m senpy
This will create a server with any modules found in the current path.
Useful command-line options
===========================
In case you want to load modules, which are located in different folders under the root folder, use the next option.
.. code:: bash
python -m senpy -f .
The default port used by senpy is 5000, but you can change it using the option `--port`.
.. code:: bash
python -m senpy --port 8080
Also, the host can be changed where senpy is deployed. The default value is `127.0.0.1`.
.. code:: bash
python -m senpy --host 0.0.0.0
For more options, see the `--help` page.
Alternatively, you can use the modules included in senpy to build your own application.
Senpy server
============
Once the server is launched, there is a basic endpoint in the server, which provides a playground to use the plugins that have been loaded.
In case you want to know the different endpoints of the server, there is more information available in the NIF API section_.
CLI
===
This video shows how to use senpy through command-line tool.
https://asciinema.org/a/9uwef1ghkjk062cw2t4mhzpyk
Request example in python
=========================
This example shows how to make a request to the default plugin:
.. code:: python
from senpy.client import Client
c = Client('http://127.0.0.1:5000/api/')
r = c.analyse('hello world')
for entry in r.entries:
print('{} -> {}'.format(entry.text, entry.emotions))
.. _section: http://senpy.readthedocs.org/en/latest/api.html
Conversion
==========
See :doc:`conversion`

View File

@@ -1,24 +0,0 @@
Vocabularies and model
======================
The model used in Senpy is based on NIF 2.0 [1], which defines a semantic format and API for improving interoperability among natural language processing services.
Senpy has been applied to sentiment and emotion analysis services using the following vocabularies:
* Marl [2,6], a vocabulary designed to annotate and describe subjetive opinions expressed on the web or in information systems.
* Onyx [3,5], which is built one the same principles as Marl to annotate and describe emotions, and provides interoperability with Emotion Markup Language.
An overview of the vocabularies and their use can be found in [4].
[1] Guidelines for developing NIF-based NLP services, Final Community Group Report 22 December 2015 Available at: https://www.w3.org/2015/09/bpmlod-reports/nif-based-nlp-webservices/
[2] Marl Ontology Specification, available at http://www.gsi.dit.upm.es/ontologies/marl/
[3] Onyx Ontology Specification, available at http://www.gsi.dit.upm.es/ontologies/onyx/
[4] Iglesias, C. A., Sánchez-Rada, J. F., Vulcu, G., & Buitelaar, P. (2017). Linked Data Models for Sentiment and Emotion Analysis in Social Networks. In Sentiment Analysis in Social Networks (pp. 49-69).
[5] Sánchez-Rada, J. F., & Iglesias, C. A. (2016). Onyx: A linked data approach to emotion representation. Information Processing & Management, 52(1), 99-114.
[6] Westerski, A., Iglesias Fernandez, C. A., & Tapia Rico, F. (2011). Linked opinions: Describing sentiments on the structured web of data.

View File

@@ -1,23 +0,0 @@
This is a collection of plugins that exemplify certain aspects of plugin development with senpy.
The first series of plugins the `basic` ones.
Their starting point is a classification function defined in `basic.py`.
They all include testing and running them as a script will run all tests.
In ascending order of customization, the plugins are:
* Basic is the simplest plugin of all. It leverages the `SentimentBox` Plugin class to create a plugin out of a classification method, and `MappingMixin` to convert the labels from (`pos`, `neg`) to (`marl:Positive`, `marl:Negative`
* Basic_box is just like the previous one, but replaces the mixin with a custom function.
* Basic_configurable is a version of `basic` with a configurable map of emojis for each sentiment.
* Basic_parameterized like `basic_info`, but users set the map in each query (via `extra_parameters`).
* Basic_analyse\_entry uses the more general `analyse_entry` method and adds the annotations individually.
In rest of the plugins show advanced topics:
* mynoop: shows how to add a definition file with external requirements for a plugin. Doing this with a python-only module would require moving all imports of the requirements to their functions, which is considered bad practice.
* Async: a barebones example of training a plugin and analyzing data in parallel.
All of the plugins in this folder include a set of test cases and they are periodically tested with the latest version of senpy.
Additioanlly, for an example of stand-alone plugin that can be tested and deployed with docker, take a look at: lab.cluster.gsi.dit.upm.es/senpy/plugin-example
bbm

View File

@@ -1,37 +0,0 @@
from senpy import AnalysisPlugin
import multiprocessing
def _train(process_number):
return process_number
class Async(AnalysisPlugin):
'''An example of an asynchronous module'''
author = '@balkian'
version = '0.2'
async = True
def _do_async(self, num_processes):
pool = multiprocessing.Pool(processes=num_processes)
values = sorted(pool.map(_train, range(num_processes)))
return values
def activate(self):
self.value = self._do_async(4)
def analyse_entry(self, entry, params):
values = self._do_async(2)
entry.async_values = values
yield entry
test_cases = [
{
'input': 'any',
'expected': {
'async_values': [0, 1]
}
}
]

View File

@@ -1,23 +0,0 @@
#!/usr/local/bin/python
# coding: utf-8
emoticons = {
'pos': [':)', ':]', '=)', ':D'],
'neg': [':(', ':[', '=(']
}
emojis = {
'pos': ['😁', '😂', '😃', '😄', '😆', '😅', '😄' '😍'],
'neg': ['😢', '😡', '😠', '😞', '😖', '😔', '😓', '😒']
}
def get_polarity(text, dictionaries=[emoticons, emojis]):
polarity = 'marl:Neutral'
for dictionary in dictionaries:
for label, values in dictionary.items():
for emoticon in values:
if emoticon and emoticon in text:
polarity = label
break
return polarity

View File

@@ -1,47 +0,0 @@
#!/usr/local/bin/python
# coding: utf-8
from senpy import easy_test, models, plugins
import basic
class BasicAnalyseEntry(plugins.SentimentPlugin):
'''Equivalent to Basic, implementing the analyse_entry method'''
author = '@balkian'
version = '0.1'
mappings = {
'pos': 'marl:Positive',
'neg': 'marl:Negative',
'default': 'marl:Neutral'
}
def analyse_entry(self, entry, params):
polarity = basic.get_polarity(entry.text)
polarity = self.mappings.get(polarity, self.mappings['default'])
s = models.Sentiment(marl__hasPolarity=polarity)
s.prov(self)
entry.sentiments.append(s)
yield entry
test_cases = [{
'input': 'Hello :)',
'polarity': 'marl:Positive'
}, {
'input': 'So sad :(',
'polarity': 'marl:Negative'
}, {
'input': 'Yay! Emojis 😁',
'polarity': 'marl:Positive'
}, {
'input': 'But no emoticons 😢',
'polarity': 'marl:Negative'
}]
if __name__ == '__main__':
easy_test()

View File

@@ -1,41 +0,0 @@
#!/usr/local/bin/python
# coding: utf-8
from senpy import easy_test, SentimentBox
import basic
class BasicBox(SentimentBox):
''' A modified version of Basic that also does converts annotations manually'''
author = '@balkian'
version = '0.1'
mappings = {
'pos': 'marl:Positive',
'neg': 'marl:Negative',
'default': 'marl:Neutral'
}
def predict_one(self, input):
output = basic.get_polarity(input)
return self.mappings.get(output, self.mappings['default'])
test_cases = [{
'input': 'Hello :)',
'polarity': 'marl:Positive'
}, {
'input': 'So sad :(',
'polarity': 'marl:Negative'
}, {
'input': 'Yay! Emojis 😁',
'polarity': 'marl:Positive'
}, {
'input': 'But no emoticons 😢',
'polarity': 'marl:Negative'
}]
if __name__ == '__main__':
easy_test()

View File

@@ -1,40 +0,0 @@
#!/usr/local/bin/python
# coding: utf-8
from senpy import easy_test, SentimentBox, MappingMixin
import basic
class Basic(MappingMixin, SentimentBox):
'''Provides sentiment annotation using a lexicon'''
author = '@balkian'
version = '0.1'
mappings = {
'pos': 'marl:Positive',
'neg': 'marl:Negative',
'default': 'marl:Neutral'
}
def predict_one(self, input):
return basic.get_polarity(input)
test_cases = [{
'input': 'Hello :)',
'polarity': 'marl:Positive'
}, {
'input': 'So sad :(',
'polarity': 'marl:Negative'
}, {
'input': 'Yay! Emojis 😁',
'polarity': 'marl:Positive'
}, {
'input': 'But no emoticons 😢',
'polarity': 'marl:Negative'
}]
if __name__ == '__main__':
easy_test()

View File

@@ -1,105 +0,0 @@
#!/usr/local/bin/python
# coding: utf-8
from senpy import easy_test, models, plugins
import basic
class Dictionary(plugins.SentimentPlugin):
'''Sentiment annotation using a configurable lexicon'''
author = '@balkian'
version = '0.2'
dictionaries = [basic.emojis, basic.emoticons]
mappings = {'pos': 'marl:Positive', 'neg': 'marl:Negative'}
def analyse_entry(self, entry, params):
polarity = basic.get_polarity(entry.text, self.dictionaries)
if polarity in self.mappings:
polarity = self.mappings[polarity]
s = models.Sentiment(marl__hasPolarity=polarity)
s.prov(self)
entry.sentiments.append(s)
yield entry
test_cases = [{
'input': 'Hello :)',
'polarity': 'marl:Positive'
}, {
'input': 'So sad :(',
'polarity': 'marl:Negative'
}, {
'input': 'Yay! Emojis 😁',
'polarity': 'marl:Positive'
}, {
'input': 'But no emoticons 😢',
'polarity': 'marl:Negative'
}]
class EmojiOnly(Dictionary):
'''Sentiment annotation with a basic lexicon of emojis'''
dictionaries = [basic.emojis]
test_cases = [{
'input': 'Hello :)',
'polarity': 'marl:Neutral'
}, {
'input': 'So sad :(',
'polarity': 'marl:Neutral'
}, {
'input': 'Yay! Emojis 😁',
'polarity': 'marl:Positive'
}, {
'input': 'But no emoticons 😢',
'polarity': 'marl:Negative'
}]
class EmoticonsOnly(Dictionary):
'''Sentiment annotation with a basic lexicon of emoticons'''
dictionaries = [basic.emoticons]
test_cases = [{
'input': 'Hello :)',
'polarity': 'marl:Positive'
}, {
'input': 'So sad :(',
'polarity': 'marl:Negative'
}, {
'input': 'Yay! Emojis 😁',
'polarity': 'marl:Neutral'
}, {
'input': 'But no emoticons 😢',
'polarity': 'marl:Neutral'
}]
class Salutes(Dictionary):
'''Sentiment annotation with a custom lexicon, for illustration purposes'''
dictionaries = [{
'marl:Positive': ['Hello', '!'],
'marl:Negative': ['Good bye', ]
}]
test_cases = [{
'input': 'Hello :)',
'polarity': 'marl:Positive'
}, {
'input': 'Good bye :(',
'polarity': 'marl:Negative'
}, {
'input': 'Yay! Emojis 😁',
'polarity': 'marl:Positive'
}, {
'input': 'But no emoticons 😢',
'polarity': 'marl:Neutral'
}]
if __name__ == '__main__':
easy_test()

View File

@@ -1,25 +0,0 @@
from senpy import AnalysisPlugin, easy
class Dummy(AnalysisPlugin):
'''This is a dummy self-contained plugin'''
author = '@balkian'
version = '0.1'
def analyse_entry(self, entry, params):
entry['nif:isString'] = entry['nif:isString'][::-1]
entry.reversed = entry.get('reversed', 0) + 1
yield entry
test_cases = [{
'entry': {
'nif:isString': 'Hello',
},
'expected': {
'nif:isString': 'olleH'
}
}]
if __name__ == '__main__':
easy()

View File

@@ -1,40 +0,0 @@
from senpy import AnalysisPlugin, easy
class DummyRequired(AnalysisPlugin):
'''This is a dummy self-contained plugin'''
author = '@balkian'
version = '0.1'
extra_params = {
'example': {
'description': 'An example parameter',
'required': True,
'options': ['a', 'b']
}
}
def analyse_entry(self, entry, params):
entry['nif:isString'] = entry['nif:isString'][::-1]
entry.reversed = entry.get('reversed', 0) + 1
yield entry
test_cases = [{
'entry': {
'nif:isString': 'Hello',
},
'should_fail': True
}, {
'entry': {
'nif:isString': 'Hello',
},
'params': {
'example': 'a'
},
'expected': {
'nif:isString': 'olleH'
}
}]
if __name__ == '__main__':
easy()

View File

@@ -1,24 +0,0 @@
import noop
from senpy.plugins import SentimentPlugin
class NoOp(SentimentPlugin):
'''This plugin does nothing. Literally nothing.'''
version = 0
def analyse_entry(self, entry, *args, **kwargs):
yield entry
def test(self):
print(dir(noop))
super(NoOp, self).test()
test_cases = [{
'entry': {
'nif:isString': 'hello'
},
'expected': {
'nif:isString': 'hello'
}
}]

View File

@@ -1,3 +0,0 @@
module: mynoop
requirements:
- noop

View File

@@ -1,63 +0,0 @@
#!/usr/local/bin/python
# coding: utf-8
from senpy import easy_test, models, plugins
import basic
class ParameterizedDictionary(plugins.SentimentPlugin):
'''This is a basic self-contained plugin'''
author = '@balkian'
version = '0.2'
extra_params = {
'positive-words': {
'description': 'Comma-separated list of words that are considered positive',
'aliases': ['positive'],
'required': True
},
'negative-words': {
'description': 'Comma-separated list of words that are considered negative',
'aliases': ['negative'],
'required': False
}
}
def analyse_entry(self, entry, params):
positive_words = params['positive-words'].split(',')
negative_words = params['negative-words'].split(',')
dictionary = {
'marl:Positive': positive_words,
'marl:Negative': negative_words,
}
polarity = basic.get_polarity(entry.text, [dictionary])
s = models.Sentiment(marl__hasPolarity=polarity)
s.prov(self)
entry.sentiments.append(s)
yield entry
test_cases = [
{
'input': 'Hello :)',
'polarity': 'marl:Positive',
'parameters': {
'positive': "Hello,:)",
'negative': "sad,:()"
}
},
{
'input': 'Hello :)',
'polarity': 'marl:Negative',
'parameters': {
'positive': "",
'negative': "Hello"
}
}
]
if __name__ == '__main__':
easy_test()

View File

@@ -1,33 +0,0 @@
'''
Create a dummy dataset.
Messages with a happy emoticon are labelled positive
Messages with a sad emoticon are labelled negative
'''
import random
dataset = []
vocabulary = ['hello', 'world', 'senpy', 'cool', 'goodbye', 'random', 'text']
emojimap = {
1: [':)', ],
-1: [':(', ]
}
for tag, values in emojimap.items():
for i in range(1000):
msg = ''
for j in range(3):
msg += random.choice(vocabulary)
msg += " "
msg += random.choice(values)
dataset.append([msg, tag])
text = []
labels = []
for i in dataset:
text.append(i[0])
labels.append(i[1])

View File

@@ -1,30 +0,0 @@
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from mydata import text, labels
X_train, X_test, y_train, y_test = train_test_split(text, labels, test_size=0.12, random_state=42)
from sklearn.naive_bayes import MultinomialNB
count_vec = CountVectorizer(tokenizer=lambda x: x.split())
clf3 = MultinomialNB()
pipeline = Pipeline([('cv', count_vec),
('clf', clf3)])
pipeline.fit(X_train, y_train)
print('Feature names: {}'.format(count_vec.get_feature_names()))
print('Class count: {}'.format(clf3.class_count_))
if __name__ == '__main__':
print('--Results--')
tests = [
(['The sentiment for senpy should be positive :)', ], 1),
(['The sentiment for anything else should be negative :()', ], -1)
]
for features, expected in tests:
result = pipeline.predict(features)
print('Input: {}\nExpected: {}\nGot: {}'.format(features[0], expected, result))

View File

@@ -1,37 +0,0 @@
from senpy import SentimentBox, MappingMixin, easy_test
from mypipeline import pipeline
class PipelineSentiment(MappingMixin, SentimentBox):
'''
This is a pipeline plugin that wraps a classifier defined in another module
(mypipeline).
'''
author = '@balkian'
version = 0.1
maxPolarityValue = 1
minPolarityValue = -1
mappings = {
1: 'marl:Positive',
-1: 'marl:Negative'
}
def predict_one(self, input):
return pipeline.predict([input, ])[0]
test_cases = [
{
'input': 'The sentiment for senpy should be positive :)',
'polarity': 'marl:Positive'
},
{
'input': 'The sentiment for senpy should be negative :(',
'polarity': 'marl:Negative'
}
]
if __name__ == '__main__':
easy_test()

View File

@@ -1,27 +0,0 @@
from senpy.plugins import AnalysisPlugin
from time import sleep
class Sleep(AnalysisPlugin):
'''Dummy plugin to test async'''
author = "@balkian"
version = "0.2"
timeout = 0.05
extra_params = {
"timeout": {
"@id": "timeout_sleep",
"aliases": ["timeout", "to"],
"required": False,
"default": 0
}
}
def activate(self, *args, **kwargs):
sleep(self.timeout)
def analyse_entry(self, entry, params):
sleep(float(params.get("timeout", self.timeout)))
yield entry
def test(self):
pass

View File

@@ -1 +0,0 @@
gsitk

View File

@@ -1,7 +0,0 @@
Deploy senpy to a kubernetes cluster.
Usage:
```
kubectl apply -f . -n senpy
```

View File

@@ -1,26 +0,0 @@
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: senpy-latest
spec:
replicas: 1
template:
metadata:
labels:
role: senpy-latest
app: test
spec:
containers:
- name: senpy-latest
image: $IMAGEWTAG
imagePullPolicy: Always
args:
- "--default-plugins"
resources:
limits:
memory: "512Mi"
cpu: "1000m"
ports:
- name: web
containerPort: 5000

View File

@@ -1,14 +0,0 @@
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: senpy-ingress
spec:
rules:
- host: latest.senpy.cluster.gsi.dit.upm.es
http:
paths:
- path: /
backend:
serviceName: senpy-latest
servicePort: 5000

View File

@@ -1,12 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: senpy-latest
spec:
type: ClusterIP
ports:
- port: 5000
protocol: TCP
selector:
role: senpy-latest

View File

@@ -1,15 +1,11 @@
Flask>=0.10.1 Flask>=0.10.1
requests>=2.4.1 requests>=2.4.1
tornado>=4.4.3 gevent>=1.1rc4
PyLD>=0.6.5 PyLD>=0.6.5
nltk six
future future
jsonschema jsonschema
jsonref jsonref
PyYAML PyYAML
rdflib rdflib
rdflib-jsonld rdflib-jsonld
numpy
scipy
scikit-learn
responses

View File

@@ -25,10 +25,4 @@ logger = logging.getLogger(__name__)
logger.info('Using senpy version: {}'.format(__version__)) logger.info('Using senpy version: {}'.format(__version__))
from .utils import easy, easy_load, easy_test # noqa: F401
from .models import * # noqa: F401,F403
from .plugins import * # noqa: F401,F403
from .extensions import * # noqa: F401,F403
__all__ = ['api', 'blueprints', 'cli', 'extensions', 'models', 'plugins'] __all__ = ['api', 'blueprints', 'cli', 'extensions', 'models', 'plugins']

View File

@@ -22,17 +22,35 @@ the server.
from flask import Flask from flask import Flask
from senpy.extensions import Senpy from senpy.extensions import Senpy
from senpy.utils import easy_test from gevent.wsgi import WSGIServer
from gevent.monkey import patch_all
import logging import logging
import os import os
import sys import sys
import argparse import argparse
import senpy import senpy
patch_all(thread=False)
SERVER_PORT = os.environ.get("PORT", 5000) SERVER_PORT = os.environ.get("PORT", 5000)
def info(type, value, tb):
if hasattr(sys, 'ps1') or not sys.stderr.isatty():
# we are in interactive mode or we don't have a tty-like
# device, so we call the default hook
sys.__excepthook__(type, value, tb)
else:
import traceback
import pdb
# we are NOT in interactive mode, print the exception...
traceback.print_exception(type, value, tb)
print
# ...then start the debugger in post-mortem mode.
# pdb.pm() # deprecated
pdb.post_mortem(tb) # more "modern"
def main(): def main():
parser = argparse.ArgumentParser(description='Run a Senpy server') parser = argparse.ArgumentParser(description='Run a Senpy server')
parser.add_argument( parser.add_argument(
@@ -40,7 +58,7 @@ def main():
'-l', '-l',
metavar='logging_level', metavar='logging_level',
type=str, type=str,
default="WARN", default="INFO",
help='Logging level') help='Logging level')
parser.add_argument( parser.add_argument(
'--debug', '--debug',
@@ -68,7 +86,7 @@ def main():
'--plugins-folder', '--plugins-folder',
'-f', '-f',
type=str, type=str,
default='.', default='plugins',
help='Where to look for plugins.') help='Where to look for plugins.')
parser.add_argument( parser.add_argument(
'--only-install', '--only-install',
@@ -76,94 +94,28 @@ def main():
action='store_true', action='store_true',
default=False, default=False,
help='Do not run a server, only install plugin dependencies') help='Do not run a server, only install plugin dependencies')
parser.add_argument(
'--only-test',
action='store_true',
default=False,
help='Do not run a server, just test all plugins')
parser.add_argument(
'--test',
'-t',
action='store_true',
default=False,
help='Test all plugins before launching the server')
parser.add_argument(
'--only-list',
'--list',
action='store_true',
default=False,
help='Do not run a server, only list plugins found')
parser.add_argument(
'--data-folder',
'--data',
type=str,
default=None,
help='Where to look for data. It be set with the SENPY_DATA environment variable as well.')
parser.add_argument(
'--threaded',
action='store_false',
default=True,
help='Run a threaded server')
parser.add_argument(
'--no-deps',
'-n',
action='store_true',
default=False,
help='Skip installing dependencies')
parser.add_argument(
'--version',
'-v',
action='store_true',
default=False,
help='Output the senpy version and exit')
parser.add_argument(
'--allow-fail',
'--fail',
action='store_true',
default=False,
help='Do not exit if some plugins fail to activate')
args = parser.parse_args() args = parser.parse_args()
if args.version: logging.basicConfig()
print('Senpy version {}'.format(senpy.__version__))
print(sys.version)
exit(1)
rl = logging.getLogger() rl = logging.getLogger()
rl.setLevel(getattr(logging, args.level)) rl.setLevel(getattr(logging, args.level))
app = Flask(__name__) app = Flask(__name__)
app.debug = args.debug app.debug = args.debug
sp = Senpy(app, args.plugins_folder, if args.debug:
default_plugins=args.default_plugins, sys.excepthook = info
data_folder=args.data_folder) sp = Senpy(app, args.plugins_folder, default_plugins=args.default_plugins)
if args.only_list:
plugins = sp.plugins()
maxname = max(len(x.name) for x in plugins)
maxversion = max(len(x.version) for x in plugins)
print('Found {} plugins:'.format(len(plugins)))
for plugin in plugins:
import inspect
fpath = inspect.getfile(plugin.__class__)
print('\t{: <{maxname}} @ {: <{maxversion}} -> {}'.format(plugin.name,
plugin.version,
fpath,
maxname=maxname,
maxversion=maxversion))
return
if not args.no_deps:
sp.install_deps()
if args.only_install: if args.only_install:
sp.install_deps()
return return
sp.activate_all(allow_fail=args.allow_fail) sp.activate_all()
if args.test or args.only_test: http_server = WSGIServer((args.host, args.port), app)
easy_test(sp.plugins(), debug=args.debug) try:
if args.only_test: print('Senpy version {}'.format(senpy.__version__))
return print('Server running on port %s:%d. Ctrl+C to quit' % (args.host,
print('Senpy version {}'.format(senpy.__version__)) args.port))
print('Server running on port %s:%d. Ctrl+C to quit' % (args.host, http_server.serve_forever()
args.port)) except KeyboardInterrupt:
app.run(args.host, print('Bye!')
args.port, http_server.stop()
threaded=args.threaded,
debug=app.debug)
sp.deactivate_all() sp.deactivate_all()

View File

@@ -1,53 +1,38 @@
from future.utils import iteritems from future.utils import iteritems
from .models import Error, Results, Entry, from_string from .models import Error
import logging import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
boolean = [True, False]
API_PARAMS = { API_PARAMS = {
"algorithm": { "algorithm": {
"aliases": ["algorithms", "a", "algo"], "aliases": ["algorithm", "a", "algo"],
"required": False, "required": False,
"description": ("Algorithms that will be used to process the request."
"It may be a list of comma-separated names."),
},
"expanded-jsonld": {
"@id": "expanded-jsonld",
"aliases": ["expanded"],
"options": boolean,
"required": True,
"default": False
},
"with_parameters": {
"aliases": ['withparameters',
'with-parameters'],
"options": boolean,
"default": False,
"required": True
}, },
"outformat": { "outformat": {
"@id": "outformat", "@id": "outformat",
"aliases": ["o"], "aliases": ["outformat", "o"],
"default": "json-ld", "default": "json-ld",
"required": True, "required": True,
"options": ["json-ld", "turtle", "ntriples"], "options": ["json-ld", "turtle"],
}, },
"help": { "expanded-jsonld": {
"@id": "help", "@id": "expanded-jsonld",
"description": "Show additional help to know more about the possible parameters", "aliases": ["expanded", "expanded-jsonld"],
"aliases": ["h"],
"required": True, "required": True,
"options": boolean, "default": 0
"default": False
}, },
"emotionModel": { "emotionModel": {
"@id": "emotionModel", "@id": "emotionModel",
"aliases": ["emoModel"], "aliases": ["emotionModel", "emoModel"],
"required": False "required": False
}, },
"plugin_type": {
"@id": "pluginType",
"description": 'What kind of plugins to list',
"aliases": ["pluginType", "plugin_type"],
"required": True,
"default": "analysisPlugin"
},
"conversion": { "conversion": {
"@id": "conversion", "@id": "conversion",
"description": "How to show the elements that have (not) been converted", "description": "How to show the elements that have (not) been converted",
@@ -57,43 +42,17 @@ API_PARAMS = {
} }
} }
EVAL_PARAMS = {
"algorithm": {
"aliases": ["plug", "p", "plugins", "algorithms", 'algo', 'a', 'plugin'],
"description": "Plugins to be evaluated",
"required": True,
"help": "See activated plugins in /plugins"
},
"dataset": {
"aliases": ["datasets", "data", "d"],
"description": "Datasets to be evaluated",
"required": True,
"help": "See avalaible datasets in /datasets"
}
}
PLUGINS_PARAMS = {
"plugin_type": {
"@id": "pluginType",
"description": 'What kind of plugins to list',
"aliases": ["pluginType"],
"required": True,
"default": 'analysisPlugin'
}
}
WEB_PARAMS = { WEB_PARAMS = {
"inHeaders": { "inHeaders": {
"aliases": ["headers"], "aliases": ["inHeaders", "headers"],
"required": True, "required": True,
"default": False, "default": "0"
"options": boolean
}, },
} }
CLI_PARAMS = { CLI_PARAMS = {
"plugin_folder": { "plugin_folder": {
"aliases": ["folder"], "aliases": ["plugin_folder", "folder"],
"required": True, "required": True,
"default": "." "default": "."
}, },
@@ -102,104 +61,71 @@ CLI_PARAMS = {
NIF_PARAMS = { NIF_PARAMS = {
"input": { "input": {
"@id": "input", "@id": "input",
"aliases": ["i"], "aliases": ["i", "input"],
"required": True, "required": True,
"help": "Input text" "help": "Input text"
}, },
"informat": {
"@id": "informat",
"aliases": ["f", "informat"],
"required": False,
"default": "text",
"options": ["turtle", "text"],
},
"intype": { "intype": {
"@id": "intype", "@id": "intype",
"aliases": ["t"], "aliases": ["intype", "t"],
"required": False, "required": False,
"default": "direct", "default": "direct",
"options": ["direct", "url", "file"], "options": ["direct", "url", "file"],
}, },
"informat": {
"@id": "informat",
"aliases": ["f"],
"required": False,
"default": "text",
"options": ["text", "json-ld"],
},
"language": { "language": {
"@id": "language", "@id": "language",
"aliases": ["l"], "aliases": ["language", "l"],
"required": False, "required": False,
}, },
"prefix": { "prefix": {
"@id": "prefix", "@id": "prefix",
"aliases": ["p"], "aliases": ["prefix", "p"],
"required": True, "required": True,
"default": "", "default": "",
}, },
"urischeme": { "urischeme": {
"@id": "urischeme", "@id": "urischeme",
"aliases": ["u"], "aliases": ["urischeme", "u"],
"required": False, "required": False,
"default": "RFC5147String", "default": "RFC5147String",
"options": ["RFC5147String", ] "options": "RFC5147String"
} },
} }
def parse_params(indict, *specs): def parse_params(indict, spec=NIF_PARAMS):
if not specs: logger.debug("Parsing: {}\n{}".format(indict, spec))
specs = [NIF_PARAMS]
logger.debug("Parsing: {}\n{}".format(indict, specs))
outdict = indict.copy() outdict = indict.copy()
wrong_params = {} wrong_params = {}
for spec in specs: for param, options in iteritems(spec):
for param, options in iteritems(spec): if param[0] != "@": # Exclude json-ld properties
for alias in options.get("aliases", []): for alias in options.get("aliases", []):
# Replace each alias with the correct name of the parameter if alias in indict:
if alias in indict and alias != param:
outdict[param] = indict[alias] outdict[param] = indict[alias]
del outdict[alias]
continue
if param not in outdict: if param not in outdict:
if "default" in options: if options.get("required", False) and "default" not in options:
# We assume the default is correct
outdict[param] = options["default"]
elif options.get("required", False):
wrong_params[param] = spec[param] wrong_params[param] = spec[param]
continue else:
if "options" in options: if "default" in options:
if options["options"] == boolean: outdict[param] = options["default"]
outdict[param] = outdict[param] in [None, True, 'true', '1'] else:
elif outdict[param] not in options["options"]: if "options" in spec[param] and \
outdict[param] not in spec[param]["options"]:
wrong_params[param] = spec[param] wrong_params[param] = spec[param]
if wrong_params: if wrong_params:
logger.debug("Error parsing: %s", wrong_params) logger.debug("Error parsing: %s", wrong_params)
message = Error( message = Error(
status=400, status=400,
message='Missing or invalid parameters', message="Missing or invalid parameters",
parameters=outdict, parameters=outdict,
errors=wrong_params) errors={param: error
for param, error in iteritems(wrong_params)})
raise message raise message
if 'algorithm' in outdict and not isinstance(outdict['algorithm'], list):
outdict['algorithm'] = list(outdict['algorithm'].split(','))
return outdict return outdict
def parse_extra_params(request, plugin=None):
params = request.parameters.copy()
if plugin:
extra_params = parse_params(params, plugin.get('extra_params', {}))
params.update(extra_params)
return params
def parse_call(params):
'''Return a results object based on the parameters used in a call/request.
'''
params = parse_params(params, NIF_PARAMS)
if params['informat'] == 'text':
results = Results()
entry = Entry(nif__isString=params['input'],
id='#') # Use @base
results.entries.append(entry)
elif params['informat'] == 'json-ld':
results = from_string(params['input'], cls=Results)
else: # pragma: no cover
raise NotImplementedError('Informat {} is not implemented'.format(params['informat']))
results.parameters = params
return results

View File

@@ -18,40 +18,20 @@
Blueprints for Senpy Blueprints for Senpy
""" """
from flask import (Blueprint, request, current_app, render_template, url_for, from flask import (Blueprint, request, current_app, render_template, url_for,
jsonify, redirect) jsonify)
from .models import Error, Response, Help, Plugins, read_schema, dump_schema, Datasets from .models import Error, Response, Plugins, read_schema
from . import api from .api import WEB_PARAMS, API_PARAMS, parse_params
from .version import __version__ from .version import __version__
from functools import wraps from functools import wraps
import logging import logging
import json
import base64
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
api_blueprint = Blueprint("api", __name__) api_blueprint = Blueprint("api", __name__)
demo_blueprint = Blueprint("demo", __name__, template_folder='templates') demo_blueprint = Blueprint("demo", __name__)
ns_blueprint = Blueprint("ns", __name__) ns_blueprint = Blueprint("ns", __name__)
_mimetypes_r = {'json-ld': ['application/ld+json'],
'turtle': ['text/turtle'],
'ntriples': ['application/n-triples'],
'text': ['text/plain']}
MIMETYPES = {}
for k, vs in _mimetypes_r.items():
for v in vs:
if v in MIMETYPES:
raise Exception('MIMETYPE {} specified for two formats: {} and {}'.format(v,
v,
MIMETYPES[v]))
MIMETYPES[v] = k
DEFAULT_MIMETYPE = 'application/ld+json'
DEFAULT_FORMAT = 'json-ld'
def get_params(req): def get_params(req):
if req.method == 'POST': if req.method == 'POST':
@@ -63,178 +43,100 @@ def get_params(req):
return indict return indict
def encoded_url(url=None, base=None):
code = ''
if not url:
if request.method == 'GET':
url = request.full_path[1:] # Remove the first slash
else:
hash(frozenset(request.form.params().items()))
code = 'hash:{}'.format(hash)
code = code or base64.urlsafe_b64encode(url.encode()).decode()
if base:
return base + code
return url_for('api.decode', code=code, _external=True)
def decoded_url(code, base=None):
if code.startswith('hash:'):
raise Exception('Can not decode a URL for a POST request')
base = base or request.url_root
path = base64.urlsafe_b64decode(code.encode()).decode()
return base + path
@demo_blueprint.route('/') @demo_blueprint.route('/')
def index(): def index():
ev = str(get_params(request).get('evaluation', False)) return render_template("index.html", version=__version__)
evaluation_enabled = ev.lower() not in ['false', 'no', 'none']
return render_template("index.html",
evaluation=evaluation_enabled,
version=__version__)
@api_blueprint.route('/contexts/<entity>.jsonld') @api_blueprint.route('/contexts/<entity>.jsonld')
def context(entity="context"): def context(entity="context"):
context = Response._context context = Response._context
context['@vocab'] = url_for('ns.index', _external=True) context['@vocab'] = url_for('ns.index', _external=True)
context['endpoint'] = url_for('api.api_root', _external=True)
return jsonify({"@context": context}) return jsonify({"@context": context})
@api_blueprint.route('/d/<code>')
def decode(code):
try:
return redirect(decoded_url(code))
except Exception:
return Error('invalid URL').flask()
@ns_blueprint.route('/') # noqa: F811 @ns_blueprint.route('/') # noqa: F811
def index(): def index():
context = Response._context.copy() context = Response._context
context['endpoint'] = url_for('api.api_root', _external=True) context['@vocab'] = url_for('.ns', _external=True)
return jsonify({"@context": context}) return jsonify({"@context": context})
@api_blueprint.route('/schemas/<schema>') @api_blueprint.route('/schemas/<schema>')
def schema(schema="definitions"): def schema(schema="definitions"):
try: try:
return dump_schema(read_schema(schema)) return jsonify(read_schema(schema))
except Exception as ex: # Should be FileNotFoundError, but it's missing from py2 except Exception: # Should be FileNotFoundError, but it's missing from py2
return Error(message="Schema not found: {}".format(ex), status=404).flask() return Error(message="Schema not found", status=404).flask()
def basic_api(f): def basic_api(f):
default_params = {
'inHeaders': False,
'expanded-jsonld': False,
'outformat': None,
'with_parameters': True,
}
@wraps(f) @wraps(f)
def decorated_function(*args, **kwargs): def decorated_function(*args, **kwargs):
raw_params = get_params(request) raw_params = get_params(request)
logger.info('Getting request: {}'.format(raw_params)) headers = {'X-ORIGINAL-PARAMS': raw_params}
headers = {'X-ORIGINAL-PARAMS': json.dumps(raw_params)} # Get defaults
params = default_params web_params = parse_params({}, spec=WEB_PARAMS)
api_params = parse_params({}, spec=API_PARAMS)
outformat = 'json-ld'
try: try:
params = api.parse_params(raw_params, api.WEB_PARAMS, api.API_PARAMS) print('Getting request:')
if hasattr(request, 'parameters'): print(request)
request.parameters.update(params) web_params = parse_params(raw_params, spec=WEB_PARAMS)
api_params = parse_params(raw_params, spec=API_PARAMS)
if hasattr(request, 'params'):
request.params.update(api_params)
else: else:
request.parameters = params request.params = api_params
response = f(*args, **kwargs) response = f(*args, **kwargs)
except (Exception) as ex: except Error as ex:
if current_app.debug or current_app.config['TESTING']:
raise
if not isinstance(ex, Error):
msg = "{}".format(ex)
ex = Error(message=msg, status=500)
response = ex response = ex
response.parameters = raw_params
logger.exception(ex)
if 'parameters' in response and not params['with_parameters']: in_headers = web_params['inHeaders'] != "0"
del response.parameters expanded = api_params['expanded-jsonld']
outformat = api_params['outformat']
logger.info('Response: {}'.format(response))
mime = request.accept_mimetypes\
.best_match(MIMETYPES.keys(),
DEFAULT_MIMETYPE)
mimeformat = MIMETYPES.get(mime, DEFAULT_FORMAT)
outformat = params['outformat'] or mimeformat
return response.flask( return response.flask(
in_headers=params['inHeaders'], in_headers=in_headers,
headers=headers, headers=headers,
prefix=params.get('prefix', encoded_url()), prefix=url_for('.api', _external=True),
context_uri=url_for('api.context', context_uri=url_for('api.context',
entity=type(response).__name__, entity=type(response).__name__,
_external=True), _external=True),
outformat=outformat, outformat=outformat,
expanded=params['expanded-jsonld']) expanded=expanded)
return decorated_function return decorated_function
@api_blueprint.route('/', defaults={'plugin': None}, methods=['POST', 'GET']) @api_blueprint.route('/', methods=['POST', 'GET'])
@api_blueprint.route('/<path:plugin>', methods=['POST', 'GET'])
@basic_api @basic_api
def api_root(plugin): def api():
if request.parameters['help']: response = current_app.senpy.analyse(**request.params)
dic = dict(api.API_PARAMS, **api.NIF_PARAMS) return response
response = Help(valid_parameters=dic)
return response
req = api.parse_call(request.parameters)
if plugin:
plugin = plugin.replace('+', '/')
plugin = plugin.split('/')
req.parameters['algorithm'] = plugin
return current_app.senpy.analyse(req)
@api_blueprint.route('/evaluate/', methods=['POST', 'GET'])
@basic_api
def evaluate():
if request.parameters['help']:
dic = dict(api.EVAL_PARAMS)
response = Help(parameters=dic)
return response
else:
params = api.parse_params(request.parameters, api.EVAL_PARAMS)
response = current_app.senpy.evaluate(params)
return response
@api_blueprint.route('/plugins/', methods=['POST', 'GET']) @api_blueprint.route('/plugins/', methods=['POST', 'GET'])
@basic_api @basic_api
def plugins(): def plugins():
sp = current_app.senpy sp = current_app.senpy
params = api.parse_params(request.parameters, api.PLUGINS_PARAMS) ptype = request.params.get('plugin_type')
ptype = params.get('plugin_type') plugins = sp.filter_plugins(plugin_type=ptype)
plugins = list(sp.plugins(plugin_type=ptype)) dic = Plugins(plugins=list(plugins.values()))
dic = Plugins(plugins=plugins)
return dic return dic
@api_blueprint.route('/plugins/<plugin>/', methods=['POST', 'GET']) @api_blueprint.route('/plugins/<plugin>/', methods=['POST', 'GET'])
@basic_api @basic_api
def plugin(plugin): def plugin(plugin=None):
sp = current_app.senpy sp = current_app.senpy
return sp.get_plugin(plugin) if plugin == 'default' and sp.default_plugin:
return sp.default_plugin
plugins = sp.filter_plugins(
@api_blueprint.route('/datasets/', methods=['POST', 'GET']) id='plugins/{}'.format(plugin)) or sp.filter_plugins(name=plugin)
@basic_api if plugins:
def datasets(): response = list(plugins.values())[0]
sp = current_app.senpy else:
datasets = sp.datasets return Error(message="Plugin not found", status=404)
dic = Datasets(datasets=list(datasets.values())) return response
return dic

View File

@@ -1,7 +1,7 @@
import sys import sys
from .models import Error from .models import Error
from .api import parse_params, CLI_PARAMS
from .extensions import Senpy from .extensions import Senpy
from . import api
def argv_to_dict(argv): def argv_to_dict(argv):
@@ -13,31 +13,27 @@ def argv_to_dict(argv):
if argv[i][0] == '-': if argv[i][0] == '-':
key = argv[i].strip('-') key = argv[i].strip('-')
value = argv[i + 1] if len(argv) > i + 1 else None value = argv[i + 1] if len(argv) > i + 1 else None
if not value or value[0] == '-': if value and value[0] == '-':
cli_dict[key] = True cli_dict[key] = ""
else: else:
cli_dict[key] = value cli_dict[key] = value
return cli_dict return cli_dict
def parse_cli(argv):
cli_dict = argv_to_dict(argv)
cli_params = parse_params(cli_dict, spec=CLI_PARAMS)
return cli_params, cli_dict
def main_function(argv): def main_function(argv):
'''This is the method for unit testing '''This is the method for unit testing
''' '''
params = api.parse_params(argv_to_dict(argv), cli_params, cli_dict = parse_cli(argv)
api.CLI_PARAMS, plugin_folder = cli_params['plugin_folder']
api.API_PARAMS, sp = Senpy(default_plugins=False, plugin_folder=plugin_folder)
api.NIF_PARAMS) sp.activate_all(sync=True)
plugin_folder = params['plugin_folder'] res = sp.analyse(**cli_dict)
default_plugins = params.get('default-plugins', False)
sp = Senpy(default_plugins=default_plugins, plugin_folder=plugin_folder)
request = api.parse_call(params)
algos = request.parameters.get('algorithm', None)
if algos:
for algo in algos:
sp.activate_plugin(algo)
else:
sp.activate_all()
res = sp.analyse(request)
return res return res
@@ -46,9 +42,9 @@ def main():
''' '''
try: try:
res = main_function(sys.argv[1:]) res = main_function(sys.argv[1:])
print(res.serialize()) print(res.to_JSON())
except Error as err: except Error as err:
print(err.serialize()) print(err.to_JSON())
sys.exit(2) sys.exit(2)

View File

@@ -12,22 +12,12 @@ class Client(object):
def analyse(self, input, method='GET', **kwargs): def analyse(self, input, method='GET', **kwargs):
return self.request('/', method=method, input=input, **kwargs) return self.request('/', method=method, input=input, **kwargs)
def evaluate(self, input, method='GET', **kwargs):
return self.request('/evaluate', method=method, input=input, **kwargs)
def plugins(self, *args, **kwargs):
resp = self.request(path='/plugins').plugins
return {p.name: p for p in resp}
def datasets(self):
resp = self.request(path='/datasets').datasets
return {d.name: d for d in resp}
def request(self, path=None, method='GET', **params): def request(self, path=None, method='GET', **params):
url = '{}{}'.format(self.endpoint.rstrip('/'), path) url = '{}{}'.format(self.endpoint, path)
response = requests.request(method=method, url=url, params=params) response = requests.request(method=method, url=url, params=params)
try: try:
resp = models.from_dict(response.json()) resp = models.from_dict(response.json())
resp.validate(resp)
except Exception as ex: except Exception as ex:
logger.error(('There seems to be a problem with the response:\n' logger.error(('There seems to be a problem with the response:\n'
'\tURL: {url}\n' '\tURL: {url}\n'

View File

@@ -5,54 +5,49 @@ It orchestrates plugin (de)activation and analysis.
from future import standard_library from future import standard_library
standard_library.install_aliases() standard_library.install_aliases()
from . import plugins, api from . import plugins
from .plugins import Plugin, evaluate from .plugins import SenpyPlugin
from .models import Error, AggregatedEvaluation from .models import Error, Entry, Results
from .blueprints import api_blueprint, demo_blueprint, ns_blueprint from .blueprints import api_blueprint, demo_blueprint, ns_blueprint
from .api import API_PARAMS, NIF_PARAMS, parse_params
from threading import Thread from threading import Thread
from functools import partial
import os import os
import copy import copy
import errno import fnmatch
import inspect
import sys
import importlib
import logging import logging
import traceback
import yaml
from . import gsitk_compat import pip
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class Senpy(object): class Senpy(object):
""" Default Senpy extension for Flask """ """ Default Senpy extension for Flask """
def __init__(self, def __init__(self,
app=None, app=None,
plugin_folder=".", plugin_folder=".",
data_folder=None,
default_plugins=False): default_plugins=False):
self.app = app
default_data = os.path.join(os.getcwd(), 'senpy_data') self._search_folders = set()
self.data_folder = data_folder or os.environ.get('SENPY_DATA', default_data) self._plugin_list = []
try: self._outdated = True
os.makedirs(self.data_folder)
except OSError as e:
if e.errno == errno.EEXIST:
logger.debug('Data folder exists: {}'.format(self.data_folder))
else: # pragma: no cover
raise
self._default = None self._default = None
self._plugins = {}
if plugin_folder:
self.add_folder(plugin_folder)
self.add_folder(plugin_folder)
if default_plugins: if default_plugins:
self.add_folder('plugins', from_root=True) self.add_folder('plugins', from_root=True)
else: else:
# Add only conversion plugins # Add only conversion plugins
self.add_folder(os.path.join('plugins', 'conversion'), self.add_folder(os.path.join('plugins', 'conversion'),
from_root=True) from_root=True)
self.app = app
if app is not None: if app is not None:
self.init_app(app) self.init_app(app)
@@ -67,204 +62,123 @@ class Senpy(object):
# otherwise fall back to the request context # otherwise fall back to the request context
if hasattr(app, 'teardown_appcontext'): if hasattr(app, 'teardown_appcontext'):
app.teardown_appcontext(self.teardown) app.teardown_appcontext(self.teardown)
else: # pragma: no cover else:
app.teardown_request(self.teardown) app.teardown_request(self.teardown)
app.register_blueprint(api_blueprint, url_prefix="/api") app.register_blueprint(api_blueprint, url_prefix="/api")
app.register_blueprint(ns_blueprint, url_prefix="/ns") app.register_blueprint(ns_blueprint, url_prefix="/ns")
app.register_blueprint(demo_blueprint, url_prefix="/") app.register_blueprint(demo_blueprint, url_prefix="/")
def add_plugin(self, plugin):
self._plugins[plugin.name.lower()] = plugin
def delete_plugin(self, plugin):
del self._plugins[plugin.name.lower()]
def plugins(self, **kwargs):
""" Return the plugins registered for a given application. Filtered by criteria """
return list(plugins.pfilter(self._plugins, **kwargs))
def get_plugin(self, name, default=None):
if name == 'default':
return self.default_plugin
plugin = name.lower()
if plugin in self._plugins:
return self._plugins[plugin]
results = self.plugins(id='endpoint:plugins/{}'.format(name))
if not results:
return Error(message="Plugin not found", status=404)
return results[0]
@property
def analysis_plugins(self):
""" Return only the analysis plugins """
return self.plugins(plugin_type='analysisPlugin')
def add_folder(self, folder, from_root=False): def add_folder(self, folder, from_root=False):
""" Find plugins in this folder and add them to this instance """
if from_root: if from_root:
folder = os.path.join(os.path.dirname(__file__), folder) folder = os.path.join(os.path.dirname(__file__), folder)
logger.debug("Adding folder: %s", folder) logger.debug("Adding folder: %s", folder)
if os.path.isdir(folder): if os.path.isdir(folder):
new_plugins = plugins.from_folder([folder], self._search_folders.add(folder)
data_folder=self.data_folder) self._outdated = True
for plugin in new_plugins:
self.add_plugin(plugin)
else: else:
raise AttributeError("Not a folder or does not exist: %s", folder) logger.debug("Not a folder: %s", folder)
def _get_plugins(self, request): def _find_plugin(self, params):
if not self.analysis_plugins: api_params = parse_params(params, spec=API_PARAMS)
algo = None
if "algorithm" in api_params and api_params["algorithm"]:
algo = api_params["algorithm"]
elif self.plugins:
algo = self.default_plugin and self.default_plugin.name
if not algo:
raise Error( raise Error(
status=404, status=404,
message=("No plugins found." message=("No plugins found."
" Please install one.")) " Please install one.").format(algo))
algos = request.parameters.get('algorithm', None) if algo not in self.plugins:
if not algos: logger.debug(("The algorithm '{}' is not valid\n"
if self.default_plugin: "Valid algorithms: {}").format(algo,
algos = [self.default_plugin.name, ] self.plugins.keys()))
else:
raise Error(
status=404,
message="No default plugin found, and None provided")
plugins = list()
for algo in algos:
algo = algo.lower()
if algo not in self._plugins:
msg = ("The algorithm '{}' is not valid\n"
"Valid algorithms: {}").format(algo,
self._plugins.keys())
logger.debug(msg)
raise Error(
status=404,
message=msg)
plugins.append(self._plugins[algo])
return plugins
def _process_entries(self, entries, req, plugins):
"""
Recursively process the entries with the first plugin in the list, and pass the results
to the rest of the plugins.
"""
if not plugins:
for i in entries:
yield i
return
plugin = plugins[0]
specific_params = api.parse_extra_params(req, plugin)
req.analysis.append({'plugin': plugin,
'parameters': specific_params})
results = plugin.analyse_entries(entries, specific_params)
for i in self._process_entries(results, req, plugins[1:]):
yield i
def install_deps(self):
plugins.install_deps(*self.plugins())
def analyse(self, request):
"""
Main method that analyses a request, either from CLI or HTTP.
It takes a processed request, provided by the user, as returned
by api.parse_call().
"""
logger.debug("analysing request: {}".format(request))
entries = request.entries
request.entries = []
plugins = self._get_plugins(request)
results = request
for i in self._process_entries(entries, results, plugins):
results.entries.append(i)
self.convert_emotions(results)
logger.debug("Returning analysis result: {}".format(results))
results.analysis = [i['plugin'].id for i in results.analysis]
return results
def _get_datasets(self, request):
if not self.datasets:
raise Error( raise Error(
status=404, status=404,
message=("No datasets found." message="The algorithm '{}' is not valid".format(algo))
" Please verify DatasetManager"))
datasets_name = request.parameters.get('dataset', None).split(',')
for dataset in datasets_name:
if dataset not in self.datasets:
logger.debug(("The dataset '{}' is not valid\n"
"Valid datasets: {}").format(dataset,
self.datasets.keys()))
raise Error(
status=404,
message="The dataset '{}' is not valid".format(dataset))
dm = gsitk_compat.DatasetManager()
datasets = dm.prepare_datasets(datasets_name)
return datasets
@property if not self.plugins[algo].is_activated:
def datasets(self): logger.debug("Plugin not activated: {}".format(algo))
self._dataset_list = {} raise Error(
dm = gsitk_compat.DatasetManager() status=400,
for item in dm.get_datasets(): message=("The algorithm '{}'"
for key in item: " is not activated yet").format(algo))
if key in self._dataset_list: return self.plugins[algo]
continue
properties = item[key]
properties['@id'] = key
self._dataset_list[key] = properties
return self._dataset_list
def evaluate(self, params): def _get_params(self, params, plugin):
logger.debug("evaluating request: {}".format(params)) nif_params = parse_params(params, spec=NIF_PARAMS)
results = AggregatedEvaluation() extra_params = plugin.get('extra_params', {})
results.parameters = params specific_params = parse_params(params, spec=extra_params)
datasets = self._get_datasets(results) nif_params.update(specific_params)
plugins = self._get_plugins(results) return nif_params
for eval in evaluate(plugins, datasets):
results.evaluations.append(eval) def _get_entries(self, params):
if 'with_parameters' not in results.parameters: entry = None
del results.parameters if params['informat'] == 'text':
logger.debug("Returning evaluation result: {}".format(results)) entry = Entry(text=params['input'])
return results else:
raise NotImplemented('Only text input format implemented')
yield entry
def analyse(self, **api_params):
logger.debug("analysing with params: {}".format(api_params))
plugin = self._find_plugin(api_params)
nif_params = self._get_params(api_params, plugin)
resp = Results()
if 'with_parameters' in api_params:
resp.parameters = nif_params
try:
entries = []
for i in self._get_entries(nif_params):
entries += list(plugin.analyse_entry(i, nif_params))
resp.entries = entries
self.convert_emotions(resp, plugin, nif_params)
resp.analysis.append(plugin.id)
logger.debug("Returning analysis result: {}".format(resp))
except Error as ex:
logger.exception('Error returning analysis result')
resp = ex
except Exception as ex:
logger.exception('Error returning analysis result')
resp = Error(message=str(ex), status=500)
return resp
def _conversion_candidates(self, fromModel, toModel): def _conversion_candidates(self, fromModel, toModel):
candidates = self.plugins(plugin_type='emotionConversionPlugin') candidates = self.filter_plugins(**{'@type': 'emotionConversionPlugin'})
for candidate in candidates: for name, candidate in candidates.items():
for pair in candidate.onyx__doesConversion: for pair in candidate.onyx__doesConversion:
logging.debug(pair) logging.debug(pair)
if pair['onyx:conversionFrom'] == fromModel \ if pair['onyx:conversionFrom'] == fromModel \
and pair['onyx:conversionTo'] == toModel: and pair['onyx:conversionTo'] == toModel:
# logging.debug('Found candidate: {}'.format(candidate))
yield candidate yield candidate
def convert_emotions(self, resp): def convert_emotions(self, resp, plugin, params):
""" """
Conversion of all emotions in a response **in place**. Conversion of all emotions in a response.
In addition to converting from one model to another, it has In addition to converting from one model to another, it has
to include the conversion plugin to the analysis list. to include the conversion plugin to the analysis list.
Needless to say, this is far from an elegant solution, but it works. Needless to say, this is far from an elegant solution, but it works.
@todo refactor and clean up @todo refactor and clean up
""" """
plugins = [i['plugin'] for i in resp.analysis] fromModel = plugin.get('onyx:usesEmotionModel', None)
params = resp.parameters
toModel = params.get('emotionModel', None) toModel = params.get('emotionModel', None)
output = params.get('conversion', None)
logger.debug('Asked for model: {}'.format(toModel))
logger.debug('Analysis plugin uses model: {}'.format(fromModel))
if not toModel: if not toModel:
return return
try:
logger.debug('Asked for model: {}'.format(toModel)) candidate = next(self._conversion_candidates(fromModel, toModel))
output = params.get('conversion', None) except StopIteration:
candidates = {} e = Error(('No conversion plugin found for: '
for plugin in plugins: '{} -> {}'.format(fromModel, toModel)))
try: e.original_response = resp
fromModel = plugin.get('onyx:usesEmotionModel', None) e.parameters = params
candidates[plugin.id] = next(self._conversion_candidates(fromModel, toModel)) raise e
logger.debug('Analysis plugin {} uses model: {}'.format(plugin.id, fromModel))
except StopIteration:
e = Error(('No conversion plugin found for: '
'{} -> {}'.format(fromModel, toModel)),
status=404)
e.original_response = resp
e.parameters = params
raise e
newentries = [] newentries = []
for i in resp.entries: for i in resp.entries:
if output == "full": if output == "full":
@@ -272,10 +186,6 @@ class Senpy(object):
else: else:
newemotions = [] newemotions = []
for j in i.emotions: for j in i.emotions:
plugname = j['prov:wasGeneratedBy']
candidate = candidates[plugname]
resp.analysis.append({'plugin': candidate,
'parameters': params})
for k in candidate.convert(j, fromModel, toModel, params): for k in candidate.convert(j, fromModel, toModel, params):
k.prov__wasGeneratedBy = candidate.id k.prov__wasGeneratedBy = candidate.id
if output == 'nested': if output == 'nested':
@@ -284,102 +194,208 @@ class Senpy(object):
i.emotions = newemotions i.emotions = newemotions
newentries.append(i) newentries.append(i)
resp.entries = newentries resp.entries = newentries
resp.analysis.append(candidate.id)
@property @property
def default_plugin(self): def default_plugin(self):
if not self._default or not self._default.is_activated: candidate = self._default
candidates = self.plugins(plugin_type='analysisPlugin', if not candidate:
is_activated=True) candidates = self.filter_plugins(is_activated=True)
if len(candidates) > 0: if len(candidates) > 0:
self._default = candidates[0] candidate = list(candidates.values())[0]
else: logger.debug("Default: {}".format(candidate))
self._default = None return candidate
logger.debug("Default: {}".format(self._default))
return self._default
@default_plugin.setter @default_plugin.setter
def default_plugin(self, value): def default_plugin(self, value):
if isinstance(value, Plugin): if isinstance(value, SenpyPlugin):
if not value.is_activated:
raise AttributeError('The default plugin has to be activated.')
self._default = value self._default = value
else: else:
self._default = self._plugins[value.lower()] self._default = self.plugins[value]
def activate_all(self, sync=True, allow_fail=False): def activate_all(self, sync=False):
ps = [] ps = []
for plug in self._plugins.keys(): for plug in self.plugins.keys():
try: ps.append(self.activate_plugin(plug, sync=sync))
self.activate_plugin(plug, sync=sync)
except Exception as ex:
if not allow_fail:
raise
logger.error('Could not activate {}: {}'.format(plug, ex))
return ps return ps
def deactivate_all(self, sync=True): def deactivate_all(self, sync=False):
ps = [] ps = []
for plug in self._plugins.keys(): for plug in self.plugins.keys():
ps.append(self.deactivate_plugin(plug, sync=sync)) ps.append(self.deactivate_plugin(plug, sync=sync))
return ps return ps
def _set_active(self, plugin, active=True, *args, **kwargs): def _set_active_plugin(self, plugin_name, active=True, *args, **kwargs):
''' We're using a variable in the plugin itself to activate/deactivate plugins.\ ''' We're using a variable in the plugin itself to activate/deactive plugins.\
Note that plugins may activate themselves by setting this variable. Note that plugins may activate themselves by setting this variable.
''' '''
plugin.is_activated = active self.plugins[plugin_name].is_activated = active
def _activate(self, plugin): def activate_plugin(self, plugin_name, sync=False):
success = False try:
with plugin._lock: plugin = self.plugins[plugin_name]
if plugin.is_activated: except KeyError:
return
plugin.activate()
msg = "Plugin activated: {}".format(plugin.name)
logger.info(msg)
success = True
self._set_active(plugin, success)
return success
def activate_plugin(self, plugin_name, sync=True):
plugin_name = plugin_name.lower()
if plugin_name not in self._plugins:
raise Error( raise Error(
message="Plugin not found: {}".format(plugin_name), status=404) message="Plugin not found: {}".format(plugin_name), status=404)
plugin = self._plugins[plugin_name]
logger.info("Activating plugin: {}".format(plugin.name)) logger.info("Activating plugin: {}".format(plugin.name))
if sync or not getattr(plugin, 'async', True): def act():
return self._activate(plugin) success = False
try:
plugin.activate()
msg = "Plugin activated: {}".format(plugin.name)
logger.info(msg)
success = True
self._set_active_plugin(plugin_name, success)
except Exception as ex:
msg = "Error activating plugin {} - {} : \n\t{}".format(
plugin.name, ex, traceback.format_exc())
logger.error(msg)
raise Error(msg)
if sync or 'async' in plugin and not plugin.async:
act()
else: else:
th = Thread(target=partial(self._activate, plugin)) th = Thread(target=act)
th.start() th.start()
return th
def _deactivate(self, plugin): def deactivate_plugin(self, plugin_name, sync=False):
with plugin._lock: try:
if not plugin.is_activated: plugin = self.plugins[plugin_name]
return except KeyError:
plugin.deactivate()
logger.info("Plugin deactivated: {}".format(plugin.name))
def deactivate_plugin(self, plugin_name, sync=True):
plugin_name = plugin_name.lower()
if plugin_name not in self._plugins:
raise Error( raise Error(
message="Plugin not found: {}".format(plugin_name), status=404) message="Plugin not found: {}".format(plugin_name), status=404)
plugin = self._plugins[plugin_name]
self._set_active(plugin, False) self._set_active_plugin(plugin_name, False)
if sync or not getattr(plugin, 'async', True): def deact():
self._deactivate(plugin) try:
plugin.deactivate()
logger.info("Plugin deactivated: {}".format(plugin.name))
except Exception as ex:
logger.error(
"Error deactivating plugin {}: {}".format(plugin.name, ex))
logger.error("Trace: {}".format(traceback.format_exc()))
if sync or 'async' in plugin and not plugin.async:
deact()
else: else:
th = Thread(target=partial(self._deactivate, plugin)) th = Thread(target=deact)
th.start() th.start()
return th
@classmethod
def validate_info(cls, info):
return all(x in info for x in ('name', 'module', 'description', 'version'))
def install_deps(self):
for i in self.plugins.values():
self._install_deps(i)
@classmethod
def _install_deps(cls, info=None):
requirements = info.get('requirements', [])
if requirements:
pip_args = []
pip_args.append('install')
pip_args.append('--use-wheel')
for req in requirements:
pip_args.append(req)
logger.info('Installing requirements: ' + str(requirements))
pip.main(pip_args)
@classmethod
def _load_module(cls, name, root):
sys.path.append(root)
tmp = importlib.import_module(name)
sys.path.remove(root)
return tmp
@classmethod
def _load_plugin_from_info(cls, info, root):
if not cls.validate_info(info):
logger.warn('The module info is not valid.\n\t{}'.format(info))
return None, None
module = info["module"]
name = info["name"]
cls._install_deps(info)
tmp = cls._load_module(module, root)
candidate = None
for _, obj in inspect.getmembers(tmp):
if inspect.isclass(obj) and inspect.getmodule(obj) == tmp:
logger.debug(("Found plugin class:"
" {}@{}").format(obj, inspect.getmodule(obj)))
candidate = obj
break
if not candidate:
logger.debug("No valid plugin for: {}".format(module))
return
module = candidate(info=info)
return name, module
@classmethod
def _load_plugin(cls, root, filename):
fpath = os.path.join(root, filename)
logger.debug("Loading plugin: {}".format(fpath))
with open(fpath, 'r') as f:
info = yaml.load(f)
logger.debug("Info: {}".format(info))
return cls._load_plugin_from_info(info, root)
def _load_plugins(self):
plugins = {}
for search_folder in self._search_folders:
for root, dirnames, filenames in os.walk(search_folder):
for filename in fnmatch.filter(filenames, '*.senpy'):
name, plugin = self._load_plugin(root, filename)
if plugin and name:
plugins[name] = plugin
self._outdated = False
return plugins
def teardown(self, exception): def teardown(self, exception):
pass pass
@property
def plugins(self):
""" Return the plugins registered for a given application. """
if self._outdated:
self._plugin_list = self._load_plugins()
return self._plugin_list
def filter_plugins(self, **kwargs):
""" Filter plugins by different criteria """
ptype = kwargs.pop('plugin_type', None)
logger.debug('#' * 100)
logger.debug('ptype {}'.format(ptype))
if ptype:
try:
ptype = ptype[0].upper() + ptype[1:]
pclass = getattr(plugins, ptype)
logger.debug('Class: {}'.format(pclass))
candidates = filter(lambda x: isinstance(x, pclass),
self.plugins.values())
except AttributeError:
raise Error('{} is not a valid type'.format(ptype))
else:
candidates = self.plugins.values()
logger.debug(candidates)
def matches(plug):
res = all(getattr(plug, k, None) == v for (k, v) in kwargs.items())
logger.debug(
"matching {} with {}: {}".format(plug.name, kwargs, res))
return res
if kwargs:
candidates = filter(matches, candidates)
return {p.name: p for p in candidates}
@property
def analysis_plugins(self):
""" Return only the analysis plugins """
return self.filter_plugins(plugin_type='analysisPlugin')

View File

@@ -1,23 +0,0 @@
import logging
logger = logging.getLogger(__name__)
MSG = 'GSITK is not (properly) installed.'
IMPORTMSG = '{} Some functions will be unavailable.'.format(MSG)
RUNMSG = '{} Install it to use this function.'.format(MSG)
def raise_exception(*args, **kwargs):
raise Exception(RUNMSG)
try:
from gsitk.datasets.datasets import DatasetManager
from gsitk.evaluation.evaluation import Evaluation as Eval
from sklearn.pipeline import Pipeline
GSITK_AVAILABLE = True
modules = locals()
except ImportError:
logger.warn(IMPORTMSG)
GSITK_AVAILABLE = False
DatasetManager = Eval = Pipeline = raise_exception

View File

@@ -1,257 +0,0 @@
'''
Meta-programming for the models.
'''
import os
import json
import jsonschema
import inspect
import copy
from abc import ABCMeta
from collections import MutableMapping, namedtuple
class BaseMeta(ABCMeta):
'''
Metaclass for models. It extracts the default values for the fields in
the model.
For instance, instances of the following class wouldn't need to mark
their version or description on initialization:
.. code-block:: python
class MyPlugin(Plugin):
version=0.3
description='A dull plugin'
Note that these operations could be included in the __init__ of the
class, but it would be very inefficient.
'''
_subtypes = {}
def __new__(mcs, name, bases, attrs, **kwargs):
register_afterwards = False
defaults = {}
attrs = mcs.expand_with_schema(name, attrs)
if 'schema' in attrs:
register_afterwards = True
for base in bases:
if hasattr(base, '_defaults'):
defaults.update(getattr(base, '_defaults'))
info, rest = mcs.split_attrs(attrs)
for i in list(info.keys()):
if isinstance(info[i], _Alias):
fget, fset, fdel = make_property(info[i].indict)
rest[i] = property(fget=fget, fset=fset, fdel=fdel)
else:
defaults[i] = info[i]
rest['_defaults'] = defaults
cls = super(BaseMeta, mcs).__new__(mcs, name, tuple(bases), rest)
if register_afterwards:
mcs.register(cls, defaults['@type'])
return cls
@classmethod
def register(mcs, rsubclass, rtype=None):
mcs._subtypes[rtype or rsubclass.__name__] = rsubclass
@staticmethod
def expand_with_schema(name, attrs):
if 'schema' in attrs: # Schema specified by name
schema_file = '{}.json'.format(attrs['schema'])
elif 'schema_file' in attrs:
schema_file = attrs['schema_file']
del attrs['schema_file']
else:
return attrs
if '/' not in 'schema_file':
thisdir = os.path.dirname(os.path.realpath(__file__))
schema_file = os.path.join(thisdir,
'schemas',
schema_file)
schema_path = 'file://' + schema_file
with open(schema_file) as f:
schema = json.load(f)
resolver = jsonschema.RefResolver(schema_path, schema)
attrs['@type'] = "".join((name[0].lower(), name[1:]))
attrs['_schema_file'] = schema_file
attrs['schema'] = schema
attrs['_validator'] = jsonschema.Draft4Validator(schema, resolver=resolver)
schema_defaults = BaseMeta.get_defaults(attrs['schema'])
attrs.update(schema_defaults)
return attrs
@staticmethod
def is_func(v):
return inspect.isroutine(v) or inspect.ismethod(v) or \
inspect.ismodule(v) or isinstance(v, property)
@staticmethod
def is_internal(k):
return k[0] == '_' or k == 'schema' or k == 'data'
@staticmethod
def get_key(key):
if key[0] != '_':
key = key.replace("__", ":", 1)
return key
@staticmethod
def split_attrs(attrs):
'''
Extract the attributes of the class.
This allows adding default values in the class definition.
e.g.:
'''
isattr = {}
rest = {}
for key, value in attrs.items():
if not (BaseMeta.is_internal(key)) and (not BaseMeta.is_func(value)):
isattr[key] = value
else:
rest[key] = value
return isattr, rest
@staticmethod
def get_defaults(schema):
temp = {}
for obj in [
schema,
] + schema.get('allOf', []):
for k, v in obj.get('properties', {}).items():
if 'default' in v and k not in temp:
temp[k] = v['default']
return temp
def make_property(key):
def fget(self):
return self[key]
def fdel(self):
del self[key]
def fset(self, value):
self[key] = value
return fget, fset, fdel
class CustomDict(MutableMapping, object):
'''
A dictionary whose elements can also be accessed as attributes. Since some
characters are not valid in the dot-notation, the attribute names also
converted. e.g.:
> d = CustomDict()
> d.key = d['ns:name'] = 1
> d.key == d['key']
True
> d.ns__name == d['ns:name']
'''
_defaults = {}
_map_attr_key = {'id': '@id'}
def __init__(self, *args, **kwargs):
super(CustomDict, self).__init__()
for k, v in self._defaults.items():
self[k] = copy.copy(v)
for arg in args:
self.update(arg)
for k, v in kwargs.items():
self[self._attr_to_key(k)] = v
return self
def serializable(self):
def ser_or_down(item):
if hasattr(item, 'serializable'):
return item.serializable()
elif isinstance(item, dict):
temp = dict()
for kp in item:
vp = item[kp]
temp[kp] = ser_or_down(vp)
return temp
elif isinstance(item, list) or isinstance(item, set):
return list(ser_or_down(i) for i in item)
else:
return item
return ser_or_down(self.as_dict())
def __getitem__(self, key):
key = self._key_to_attr(key)
return self.__dict__[key]
def __setitem__(self, key, value):
'''Do not insert data directly, there might be a property in that key. '''
key = self._key_to_attr(key)
return setattr(self, key, value)
def as_dict(self):
return {self._attr_to_key(k): v for k, v in self.__dict__.items()
if not self._internal_key(k)}
def __iter__(self):
return (k for k in self.__dict__ if not self._internal_key(k))
def __len__(self):
return len(self.__dict__)
def __delitem__(self, key):
del self.__dict__[key]
def update(self, other):
for k, v in other.items():
self[k] = v
def _attr_to_key(self, key):
key = key.replace("__", ":", 1)
key = self._map_attr_key.get(key, key)
return key
def _key_to_attr(self, key):
if self._internal_key(key):
return key
key = key.replace(":", "__", 1)
return key
def __getattr__(self, key):
try:
return self.__dict__[self._attr_to_key(key)]
except KeyError:
raise AttributeError
@staticmethod
def _internal_key(key):
return key[0] == '_'
def __str__(self):
return str(self.serializable())
def __repr__(self):
return str(self.serializable())
_Alias = namedtuple('Alias', 'indict')
def alias(key):
return _Alias(key)

View File

@@ -6,29 +6,23 @@ For compatibility with Py3 and for easier debugging, this new version drops
introspection and adds all arguments to the models. introspection and adds all arguments to the models.
''' '''
from __future__ import print_function from __future__ import print_function
from future import standard_library from six import string_types
standard_library.install_aliases()
from future.utils import with_metaclass
from past.builtins import basestring
import time import time
import copy import copy
import json import json
import os import os
import jsonref import jsonref
import jsonschema
from flask import Response as FlaskResponse from flask import Response as FlaskResponse
from pyld import jsonld from pyld import jsonld
import logging
logging.getLogger('rdflib').setLevel(logging.WARN)
logger = logging.getLogger(__name__)
from rdflib import Graph from rdflib import Graph
import logging
from .meta import BaseMeta, CustomDict, alias logger = logging.getLogger(__name__)
DEFINITIONS_FILE = 'definitions.json' DEFINITIONS_FILE = 'definitions.json'
CONTEXT_PATH = os.path.join( CONTEXT_PATH = os.path.join(
@@ -51,102 +45,40 @@ def read_schema(schema_file, absolute=False):
return jsonref.load(f, base_uri=schema_uri) return jsonref.load(f, base_uri=schema_uri)
def dump_schema(schema): base_schema = read_schema(DEFINITIONS_FILE)
return jsonref.dumps(schema)
def load_context(context): class Context(dict):
logging.debug('Loading context: {}'.format(context)) @staticmethod
if not context: def load(context):
return context logging.debug('Loading context: {}'.format(context))
elif isinstance(context, list): if not context:
contexts = []
for c in context:
contexts.append(load_context(c))
return contexts
elif isinstance(context, dict):
return dict(context)
elif isinstance(context, basestring):
try:
with open(context) as f:
return dict(json.loads(f.read()))
except IOError:
return context return context
else: elif isinstance(context, list):
raise AttributeError('Please, provide a valid context') contexts = []
for c in context:
contexts.append(Context.load(c))
return contexts
elif isinstance(context, dict):
return Context(context)
elif isinstance(context, string_types):
try:
with open(context) as f:
return Context(json.loads(f.read()))
except IOError:
return context
else:
raise AttributeError('Please, provide a valid context')
base_context = load_context(CONTEXT_PATH) base_context = Context.load(CONTEXT_PATH)
def register(rsubclass, rtype=None): class SenpyMixin(object):
BaseMeta.register(rsubclass, rtype)
class BaseModel(with_metaclass(BaseMeta, CustomDict)):
'''
Entities of the base model are a special kind of dictionary that emulates
a JSON-LD object. The structure of the dictionary is checked via JSON-schema.
For convenience, the values can also be accessed as attributes
(a la Javascript). e.g.:
>>> myobject.key == myobject['key']
True
>>> myobject.ns__name == myobject['ns:name']
True
Additionally, subclasses of this class can specify default values for their
instances. These defaults are inherited by subclasses. e.g.:
>>> class NewModel(BaseModel):
... mydefault = 5
>>> n1 = NewModel()
>>> n1['mydefault'] == 5
True
>>> n1.mydefault = 3
>>> n1['mydefault'] = 3
True
>>> n2 = NewModel()
>>> n2 == 5
True
>>> class SubModel(NewModel):
pass
>>> subn = SubModel()
>>> subn.mydefault == 5
True
Lastly, every subclass that also specifies a schema will get registered, so it
is possible to deserialize JSON and get the right type.
i.e. to recover an instance of the original class from a plain JSON.
'''
schema_file = DEFINITIONS_FILE
_context = base_context["@context"] _context = base_context["@context"]
def __init__(self, *args, **kwargs):
auto_id = kwargs.pop('_auto_id', True)
super(BaseModel, self).__init__(*args, **kwargs)
if auto_id:
self.id
if '@type' not in self:
logger.warn('Created an instance of an unknown model')
@property
def id(self):
if '@id' not in self:
self['@id'] = '_:{}_{}'.format(type(self).__name__, time.time())
return self['@id']
@id.setter
def id(self, value):
self['@id'] = value
def flask(self, def flask(self,
in_headers=False, in_headers=True,
headers=None, headers=None,
outformat='json-ld', outformat='json-ld',
**kwargs): **kwargs):
@@ -170,28 +102,26 @@ class BaseModel(with_metaclass(BaseMeta, CustomDict)):
}) })
return FlaskResponse( return FlaskResponse(
response=content, response=content,
status=self.get('status', 200), status=getattr(self, "status", 200),
headers=headers, headers=headers,
mimetype=mimetype) mimetype=mimetype)
def serialize(self, format='json-ld', with_mime=False, **kwargs): def serialize(self, format='json-ld', with_mime=False, **kwargs):
js = self.jsonld(**kwargs) js = self.jsonld(**kwargs)
content = json.dumps(js, indent=2, sort_keys=True)
if format == 'json-ld': if format == 'json-ld':
content = json.dumps(js, indent=2, sort_keys=True)
mimetype = "application/json" mimetype = "application/json"
elif format in ['turtle', 'ntriples']: elif format in ['turtle', ]:
logger.debug(js) logger.debug(js)
base = kwargs.get('prefix') content = json.dumps(js, indent=2, sort_keys=True)
g = Graph().parse( g = Graph().parse(
data=content, data=content,
format='json-ld', format='json-ld',
base=base, base=kwargs.get('prefix'),
context=[self._context, context=self._context)
{'@base': base}])
logger.debug( logger.debug(
'Parsing with prefix: {}'.format(kwargs.get('prefix'))) 'Parsing with prefix: {}'.format(kwargs.get('prefix')))
content = g.serialize(format=format, content = g.serialize(format='turtle').decode('utf-8')
base=base).decode('utf-8')
mimetype = 'text/{}'.format(format) mimetype = 'text/{}'.format(format)
else: else:
raise Error('Unknown outformat: {}'.format(format)) raise Error('Unknown outformat: {}'.format(format))
@@ -200,70 +130,135 @@ class BaseModel(with_metaclass(BaseMeta, CustomDict)):
else: else:
return content return content
def serializable(self):
def ser_or_down(item):
if hasattr(item, 'serializable'):
return item.serializable()
elif isinstance(item, dict):
temp = dict()
for kp in item:
vp = item[kp]
temp[kp] = ser_or_down(vp)
return temp
elif isinstance(item, list):
return list(ser_or_down(i) for i in item)
else:
return item
return ser_or_down(self._plain_dict())
def jsonld(self, def jsonld(self,
with_context=False, with_context=True,
context_uri=None, context_uri=None,
prefix=None, prefix=None,
expanded=False): expanded=False):
ser = self.serializable()
result = self.serializable() result = jsonld.compact(
ser,
self._context,
options={
'base': prefix,
'expandContext': self._context,
'senpy': prefix
})
if context_uri:
result['@context'] = context_uri
if expanded: if expanded:
result = jsonld.expand( result = jsonld.expand(
result, options={'base': prefix, result, options={'base': prefix,
'expandContext': self._context})[0] 'expandContext': self._context})
if not with_context: if not with_context:
try: del result['@context']
del result['@context']
except KeyError:
pass
elif context_uri:
result['@context'] = context_uri
else:
result['@context'] = self._context
return result return result
def to_JSON(self, *args, **kwargs):
js = json.dumps(self.jsonld(*args, **kwargs), indent=4, sort_keys=True)
return js
def validate(self, obj=None): def validate(self, obj=None):
if not obj: if not obj:
obj = self obj = self
if hasattr(obj, "jsonld"): if hasattr(obj, "jsonld"):
obj = obj.jsonld() obj = obj.jsonld()
self._validator.validate(obj) jsonschema.validate(obj, self.schema)
def prov(self, another): def __str__(self):
self['prov:wasGeneratedBy'] = another.id return str(self.to_JSON())
def subtypes(): class BaseModel(SenpyMixin, dict):
return BaseMeta._subtypes
schema = base_schema
def from_dict(indict, cls=None): def __init__(self, *args, **kwargs):
if not cls: if 'id' in kwargs:
target = indict.get('@type', None) self.id = kwargs.pop('id')
cls = BaseModel elif kwargs.pop('_auto_id', True):
self.id = '_:{}_{}'.format(type(self).__name__, time.time())
temp = dict(*args, **kwargs)
for obj in [
self.schema,
] + self.schema.get('allOf', []):
for k, v in obj.get('properties', {}).items():
if 'default' in v and k not in temp:
temp[k] = copy.deepcopy(v['default'])
for i in temp:
nk = self._get_key(i)
if nk != i:
temp[nk] = temp[i]
del temp[i]
try: try:
cls = subtypes()[target] temp['@type'] = getattr(self, '@type')
except AttributeError:
logger.warn('Creating an instance of an unknown model')
super(BaseModel, self).__init__(temp)
def _get_key(self, key):
key = key.replace("__", ":", 1)
return key
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
def __getattr__(self, key):
try:
return self.__getitem__(self._get_key(key))
except KeyError: except KeyError:
pass raise AttributeError(key)
outdict = dict()
for k, v in indict.items(): def __setattr__(self, key, value):
if k == '@context': self.__setitem__(self._get_key(key), value)
pass
elif isinstance(v, dict): def __delattr__(self, key):
v = from_dict(indict[k]) self.__delitem__(self._get_key(key))
elif isinstance(v, list):
v = v[:] def _plain_dict(self):
for ix, v2 in enumerate(v): d = {k: v for (k, v) in self.items() if k[0] != "_"}
if isinstance(v2, dict): if 'id' in d:
v[ix] = from_dict(v2) d["@id"] = d.pop('id')
outdict[k] = copy.copy(v) return d
return cls(**outdict)
def from_string(string, **kwargs): _subtypes = {}
return from_dict(json.loads(string), **kwargs)
def register(rsubclass, rtype=None):
_subtypes[rtype or rsubclass.__name__] = rsubclass
def from_dict(indict):
target = indict.get('@type', None)
if target and target in _subtypes:
cls = _subtypes[target]
else:
cls = BaseModel
return cls(**indict)
def from_json(injson): def from_json(injson):
@@ -271,63 +266,27 @@ def from_json(injson):
return from_dict(indict) return from_dict(indict)
class Entry(BaseModel): def from_schema(name, schema_file=None, base_classes=None):
schema = 'entry'
text = alias('nif:isString')
class Sentiment(BaseModel):
schema = 'sentiment'
polarity = alias('marl:hasPolarity')
polarityValue = alias('marl:hasPolarityValue')
class Error(BaseModel, Exception):
schema = 'error'
def __init__(self, message='Generic senpy exception', *args, **kwargs):
Exception.__init__(self, message)
super(Error, self).__init__(*args, **kwargs)
self.message = message
def __str__(self):
if not hasattr(self, 'errors'):
return self.message
return '{}:\n\t{}'.format(self.message, self.errors)
def __hash__(self):
return Exception.__hash__(self)
# Add the remaining schemas programmatically
def _class_from_schema(name, schema=None, schema_file=None, base_classes=None):
base_classes = base_classes or [] base_classes = base_classes or []
base_classes.append(BaseModel) base_classes.append(BaseModel)
attrs = {} schema_file = schema_file or '{}.json'.format(name)
if schema: class_name = '{}{}'.format(name[0].upper(), name[1:])
attrs['schema'] = schema newclass = type(class_name, tuple(base_classes), {})
elif schema_file: setattr(newclass, '@type', name)
attrs['schema_file'] = schema_file setattr(newclass, 'schema', read_schema(schema_file))
else: setattr(newclass, 'class_name', class_name)
attrs['schema'] = name register(newclass, name)
name = "".join((name[0].upper(), name[1:])) return newclass
return BaseMeta(name, base_classes, attrs)
def _add_class_from_schema(*args, **kwargs): def _add_from_schema(*args, **kwargs):
generatedClass = _class_from_schema(*args, **kwargs) generatedClass = from_schema(*args, **kwargs)
globals()[generatedClass.__name__] = generatedClass globals()[generatedClass.__name__] = generatedClass
del generatedClass del generatedClass
for i in [ for i in [
'aggregatedEvaluation',
'analysis', 'analysis',
'dataset',
'datasets',
'emotion', 'emotion',
'emotionConversion', 'emotionConversion',
'emotionConversionPlugin', 'emotionConversionPlugin',
@@ -335,17 +294,48 @@ for i in [
'emotionModel', 'emotionModel',
'emotionPlugin', 'emotionPlugin',
'emotionSet', 'emotionSet',
'evaluation', 'entry',
'entity',
'help',
'metric',
'plugin', 'plugin',
'plugins', 'plugins',
'response', 'response',
'results', 'results',
'sentiment',
'sentimentPlugin', 'sentimentPlugin',
'suggestion', 'suggestion',
'topic',
]: ]:
_add_class_from_schema(i) _add_from_schema(i)
_ErrorModel = from_schema('error')
class Error(SenpyMixin, BaseException):
def __init__(self, message, *args, **kwargs):
super(Error, self).__init__(self, message, message)
self._error = _ErrorModel(message=message, *args, **kwargs)
self.message = message
def __getitem__(self, key):
return self._error[key]
def __setitem__(self, key, value):
self._error[key] = value
def __delitem__(self, key):
del self._error[key]
def __getattr__(self, key):
if key != '_error' and hasattr(self._error, key):
return getattr(self._error, key)
raise AttributeError(key)
def __setattr__(self, key, value):
if key != '_error':
return setattr(self._error, key, value)
else:
super(Error, self).__setattr__(key, value)
def __delattr__(self, key):
delattr(self._error, key)
register(Error, 'error')

View File

@@ -1,124 +1,31 @@
from future import standard_library from future import standard_library
standard_library.install_aliases() standard_library.install_aliases()
import inspect
from future.utils import with_metaclass
from functools import partial
import os.path import os.path
import os import os
import re
import pickle import pickle
import logging import logging
import tempfile
import copy import copy
import pprint from .. import models
import inspect
import sys
import subprocess
import importlib
import yaml
import threading
import nltk
from .. import models, utils
from .. import api
from .. import gsitk_compat
from .. import testing
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class PluginMeta(models.BaseMeta): class SenpyPlugin(models.Plugin):
_classes = {} def __init__(self, info=None):
def __new__(mcs, name, bases, attrs, **kwargs):
plugin_type = []
if hasattr(bases[0], 'plugin_type'):
plugin_type += bases[0].plugin_type
plugin_type.append(name)
alias = attrs.get('name', name)
attrs['plugin_type'] = plugin_type
attrs['name'] = alias
if 'description' not in attrs:
doc = attrs.get('__doc__', None)
if doc:
attrs['description'] = doc
else:
logger.warn(('Plugin {} does not have a description. '
'Please, add a short summary to help other developers').format(name))
cls = super(PluginMeta, mcs).__new__(mcs, name, bases, attrs)
if alias in mcs._classes:
if os.environ.get('SENPY_TESTING', ""):
raise Exception(('The type of plugin {} already exists. '
'Please, choose a different name').format(name))
else:
logger.warn('Overloading plugin class: {}'.format(alias))
mcs._classes[alias] = cls
return cls
@classmethod
def for_type(cls, ptype):
return cls._classes[ptype]
class Plugin(with_metaclass(PluginMeta, models.Plugin)):
'''
Base class for all plugins in senpy.
A plugin must provide at least these attributes:
- version
- description (or docstring)
- author
Additionally, they may provide a URL (url) of a repository or website.
'''
def __init__(self, info=None, data_folder=None, **kwargs):
""" """
Provides a canonical name for plugins and serves as base for other Provides a canonical name for plugins and serves as base for other
kinds of plugins. kinds of plugins.
""" """
if not info:
raise models.Error(message=("You need to provide configuration"
"information for the plugin."))
logger.debug("Initialising {}".format(info)) logger.debug("Initialising {}".format(info))
super(Plugin, self).__init__(**kwargs) id = 'plugins/{}_{}'.format(info['name'], info['version'])
if info: super(SenpyPlugin, self).__init__(id=id, **info)
self.update(info)
self.validate()
self.id = 'endpoint:plugins/{}_{}'.format(self['name'], self['version'])
self.is_activated = False self.is_activated = False
self._lock = threading.Lock()
self._directory = os.path.abspath(os.path.dirname(inspect.getfile(self.__class__)))
data_folder = data_folder or os.getcwd()
subdir = os.path.join(data_folder, self.name)
self._data_paths = [
data_folder,
subdir,
self._directory,
os.path.join(self._directory, 'data'),
]
if os.path.exists(subdir):
data_folder = subdir
self.data_folder = data_folder
self._log = logging.getLogger('{}.{}'.format(__name__, self.name))
@property
def log(self):
return self._log
def validate(self):
missing = []
for x in ['name', 'description', 'version']:
if x not in self:
missing.append(x)
if missing:
raise models.Error('Missing configuration parameters: {}'.format(missing))
def get_folder(self): def get_folder(self):
return os.path.dirname(inspect.getfile(self.__class__)) return os.path.dirname(inspect.getfile(self.__class__))
@@ -129,83 +36,12 @@ class Plugin(with_metaclass(PluginMeta, models.Plugin)):
def deactivate(self): def deactivate(self):
pass pass
def test(self, test_cases=None):
if not test_cases:
if not hasattr(self, 'test_cases'):
raise AttributeError(('Plugin {} [{}] does not have any defined '
'test cases').format(self.id,
inspect.getfile(self.__class__)))
test_cases = self.test_cases
for case in test_cases:
try:
self.test_case(case)
self.log.debug('Test case passed:\n{}'.format(pprint.pformat(case)))
except Exception as ex:
self.log.warn('Test case failed:\n{}'.format(pprint.pformat(case)))
raise
def test_case(self, case, mock=testing.MOCK_REQUESTS): class AnalysisPlugin(SenpyPlugin):
entry = models.Entry(case['entry'])
given_parameters = case.get('params', case.get('parameters', {}))
expected = case.get('expected', None)
should_fail = case.get('should_fail', False)
responses = case.get('responses', [])
try:
params = api.parse_params(given_parameters, self.extra_params)
method = partial(self.analyse_entries, [entry, ], params)
if mock:
res = list(method())
else:
with testing.patch_all_requests(responses):
res = list(method())
if not isinstance(expected, list):
expected = [expected]
utils.check_template(res, expected)
for r in res:
r.validate()
except models.Error:
if should_fail:
return
raise
assert not should_fail
def find_file(self, fname):
for p in self._data_paths:
alternative = os.path.join(p, fname)
if os.path.exists(alternative):
return alternative
raise IOError('File does not exist: {}'.format(fname))
def open(self, fpath, mode='r'):
if 'w' in mode:
# When writing, only use absolute paths or data_folder
if not os.path.isabs(fpath):
fpath = os.path.join(self.data_folder, fpath)
else:
fpath = self.find_file(fpath)
return open(fpath, mode=mode)
def serve(self, debug=True, **kwargs):
utils.easy(plugin_list=[self, ], plugin_folder=None, debug=debug, **kwargs)
# For backwards compatibility
SenpyPlugin = Plugin
class Analysis(Plugin):
'''
A subclass of Plugin that analyses text and provides an annotation.
'''
def analyse(self, *args, **kwargs): def analyse(self, *args, **kwargs):
raise NotImplementedError( raise NotImplemented(
'Your plugin should implement either analyse or analyse_entry') 'Your method should implement either analyse or analyse_entry')
def analyse_entry(self, entry, parameters): def analyse_entry(self, entry, parameters):
""" An implemented plugin should override this method. """ An implemented plugin should override this method.
@@ -214,459 +50,61 @@ class Analysis(Plugin):
Note that this method may yield an annotated entry or a list of Note that this method may yield an annotated entry or a list of
entries (e.g. in a tokenizer) entries (e.g. in a tokenizer)
""" """
text = entry['nif:isString'] text = entry['text']
params = copy.copy(parameters) params = copy.copy(parameters)
params['input'] = text params['input'] = text
results = self.analyse(**params) results = self.analyse(**params)
for i in results.entries: for i in results.entries:
yield i yield i
def analyse_entries(self, entries, parameters):
for entry in entries:
self.log.debug('Analysing entry with plugin {}: {}'.format(self, entry))
results = self.analyse_entry(entry, parameters)
if inspect.isgenerator(results):
for result in results:
yield result
else:
yield results
def test_case(self, case): class ConversionPlugin(SenpyPlugin):
if 'entry' not in case and 'input' in case:
entry = models.Entry(_auto_id=False)
entry.nif__isString = case['input']
case['entry'] = entry
super(Analysis, self).test_case(case)
AnalysisPlugin = Analysis
class Conversion(Plugin):
'''
A subclass of Plugins that convert between different annotation models.
e.g. a conversion of emotion models, or normalization of sentiment values.
'''
pass pass
ConversionPlugin = Conversion class SentimentPlugin(models.SentimentPlugin, AnalysisPlugin):
def __init__(self, info, *args, **kwargs):
super(SentimentPlugin, self).__init__(info, *args, **kwargs)
self.minPolarityValue = float(info.get("minPolarityValue", 0))
self.maxPolarityValue = float(info.get("maxPolarityValue", 1))
class SentimentPlugin(Analysis, models.SentimentPlugin): class EmotionPlugin(models.EmotionPlugin, AnalysisPlugin):
''' def __init__(self, info, *args, **kwargs):
Sentiment plugins provide sentiment annotation (using Marl) super(EmotionPlugin, self).__init__(info, *args, **kwargs)
''' self.minEmotionValue = float(info.get("minEmotionValue", -1))
minPolarityValue = 0 self.maxEmotionValue = float(info.get("maxEmotionValue", 1))
maxPolarityValue = 1
def test_case(self, case):
if 'polarity' in case:
expected = case.get('expected', {})
s = models.Sentiment(_auto_id=False)
s.marl__hasPolarity = case['polarity']
if 'sentiments' not in expected:
expected['sentiments'] = []
expected['sentiments'].append(s)
case['expected'] = expected
super(SentimentPlugin, self).test_case(case)
class EmotionPlugin(Analysis, models.EmotionPlugin): class EmotionConversionPlugin(models.EmotionConversionPlugin, ConversionPlugin):
'''
Emotion plugins provide emotion annotation (using Onyx)
'''
minEmotionValue = 0
maxEmotionValue = 1
class EmotionConversion(Conversion):
'''
A subclass of Conversion that converts emotion annotations using different models
'''
pass pass
EmotionConversionPlugin = EmotionConversion
class Box(AnalysisPlugin):
'''
Black box plugins delegate analysis to a function.
The flow is like so:
.. code-block::
entry --> input() --> predict_one() --> output() --> entry'
In other words: their ``input`` method convers a query (entry and a set of parameters) into
the input to the box method. The ``output`` method convers the results given by the box into
an entry that senpy can handle.
'''
def input(self, entry, params=None):
'''Transforms a query (entry+param) into an input for the black box'''
return entry
def output(self, output, entry=None, params=None):
'''Transforms the results of the black box into an entry'''
return output
def predict_one(self, input):
raise NotImplementedError('You should define the behavior of this plugin')
def analyse_entries(self, entries, params):
for entry in entries:
input = self.input(entry=entry, params=params)
results = self.predict_one(input=input)
yield self.output(output=results, entry=entry, params=params)
def fit(self, X=None, y=None):
return self
def transform(self, X):
return [self.predict_one(x) for x in X]
def predict(self, X):
return self.transform(X)
def fit_transform(self, X, y):
self.fit(X, y)
return self.transform(X)
def as_pipe(self):
pipe = gsitk_compat.Pipeline([('plugin', self)])
pipe.name = self.name
return pipe
class TextBox(Box):
'''A black box plugin that takes only text as input'''
def input(self, entry, params):
entry = super(TextBox, self).input(entry, params)
return entry['nif:isString']
class SentimentBox(TextBox, SentimentPlugin):
'''
A box plugin where the output is only a polarity label or a tuple (polarity, polarityValue)
'''
def output(self, output, entry, **kwargs):
s = models.Sentiment()
try:
label, value = output
except ValueError:
label, value = output, None
s.prov(self)
s.polarity = label
if value is not None:
s.polarityValue = value
entry.sentiments.append(s)
return entry
class EmotionBox(TextBox, EmotionPlugin):
'''
A box plugin where the output is only an a tuple of emotion labels
'''
def output(self, output, entry, **kwargs):
if not isinstance(output, list):
output = [output]
s = models.EmotionSet()
entry.emotions.append(s)
for label in output:
e = models.Emotion(onyx__hasEmotionCategory=label)
s.append(e)
return entry
class MappingMixin(object):
@property
def mappings(self):
return self._mappings
@mappings.setter
def mappings(self, value):
self._mappings = value
def output(self, output, entry, params):
output = self.mappings.get(output,
self.mappings.get('default', output))
return super(MappingMixin, self).output(output=output,
entry=entry,
params=params)
class ShelfMixin(object): class ShelfMixin(object):
@property @property
def sh(self): def sh(self):
if not hasattr(self, '_sh') or self._sh is None: if not hasattr(self, '_sh') or self._sh is None:
self._sh = {} self.__dict__['_sh'] = {}
if os.path.isfile(self.shelf_file): if os.path.isfile(self.shelf_file):
try: self.__dict__['_sh'] = pickle.load(open(self.shelf_file, 'rb'))
with self.open(self.shelf_file, 'rb') as p:
self._sh = pickle.load(p)
except (IndexError, EOFError, pickle.UnpicklingError):
self.log.warning('Corrupted shelf file: {}'.format(self.shelf_file))
if not self.get('force_shelf', False):
raise
return self._sh return self._sh
@sh.deleter @sh.deleter
def sh(self): def sh(self):
if os.path.isfile(self.shelf_file): if os.path.isfile(self.shelf_file):
os.remove(self.shelf_file) os.remove(self.shelf_file)
del self._sh del self.__dict__['_sh']
self.save() self.save()
@sh.setter
def sh(self, value):
self._sh = value
@property @property
def shelf_file(self): def shelf_file(self):
if not hasattr(self, '_shelf_file') or not self._shelf_file: if 'shelf_file' not in self or not self['shelf_file']:
self._shelf_file = os.path.join(self.data_folder, self.name + '.p') sd = os.environ.get('SENPY_DATA', tempfile.gettempdir())
return self._shelf_file self.shelf_file = os.path.join(sd, self.name + '.p')
return self['shelf_file']
@shelf_file.setter
def shelf_file(self, value):
self._shelf_file = value
def save(self): def save(self):
self.log.debug('Saving pickle') logger.debug('saving pickle')
if hasattr(self, '_sh') and self._sh is not None: if hasattr(self, '_sh') and self._sh is not None:
with self.open(self.shelf_file, 'wb') as f: with open(self.shelf_file, 'wb') as f:
pickle.dump(self._sh, f) pickle.dump(self._sh, f)
def pfilter(plugins, plugin_type=Analysis, **kwargs):
""" Filter plugins by different criteria """
if isinstance(plugins, models.Plugins):
plugins = plugins.plugins
elif isinstance(plugins, dict):
plugins = plugins.values()
logger.debug('#' * 100)
logger.debug('plugin_type {}'.format(plugin_type))
if plugin_type:
if isinstance(plugin_type, PluginMeta):
plugin_type = plugin_type.__name__
try:
plugin_type = plugin_type[0].upper() + plugin_type[1:]
pclass = globals()[plugin_type]
logger.debug('Class: {}'.format(pclass))
candidates = filter(lambda x: isinstance(x, pclass),
plugins)
except KeyError:
raise models.Error('{} is not a valid type'.format(plugin_type))
else:
candidates = plugins
logger.debug(candidates)
def matches(plug):
res = all(getattr(plug, k, None) == v for (k, v) in kwargs.items())
logger.debug(
"matching {} with {}: {}".format(plug.name, kwargs, res))
return res
if kwargs:
candidates = filter(matches, candidates)
return candidates
def load_module(name, root=None):
if root:
sys.path.append(root)
tmp = importlib.import_module(name)
if root:
sys.path.remove(root)
return tmp
def _log_subprocess_output(process):
for line in iter(process.stdout.readline, b''):
logger.info('%r', line)
for line in iter(process.stderr.readline, b''):
logger.error('%r', line)
def install_deps(*plugins):
installed = False
nltk_resources = set()
for info in plugins:
requirements = info.get('requirements', [])
if requirements:
pip_args = [sys.executable, '-m', 'pip', 'install']
for req in requirements:
pip_args.append(req)
logger.info('Installing requirements: ' + str(requirements))
process = subprocess.Popen(pip_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_log_subprocess_output(process)
exitcode = process.wait()
installed = True
if exitcode != 0:
raise models.Error("Dependencies not properly installed: {}".format(pip_args))
nltk_resources |= set(info.get('nltk_resources', []))
installed |= nltk.download(list(nltk_resources))
return installed
is_plugin_file = re.compile(r'.*\.senpy$|senpy_[a-zA-Z0-9_]+\.py$|'
'^(?!test_)[a-zA-Z0-9_]+_plugin.py$')
def find_plugins(folders):
for search_folder in folders:
for root, dirnames, filenames in os.walk(search_folder):
# Do not look for plugins in hidden or special folders
dirnames[:] = [d for d in dirnames if d[0] not in ['.', '_']]
for filename in filter(is_plugin_file.match, filenames):
fpath = os.path.join(root, filename)
yield fpath
def from_path(fpath, install_on_fail=False, **kwargs):
logger.debug("Loading plugin from {}".format(fpath))
if fpath.endswith('.py'):
# We asume root is the dir of the file, and module is the name of the file
root = os.path.dirname(fpath)
module = os.path.basename(fpath)[:-3]
for instance in _from_module_name(module=module, root=root, **kwargs):
yield instance
else:
info = parse_plugin_info(fpath)
yield from_info(info, install_on_fail=install_on_fail, **kwargs)
def from_folder(folders, loader=from_path, **kwargs):
plugins = []
for fpath in find_plugins(folders):
for plugin in loader(fpath, **kwargs):
plugins.append(plugin)
return plugins
def from_info(info, root=None, install_on_fail=True, **kwargs):
if any(x not in info for x in ('module',)):
raise ValueError('Plugin info is not valid: {}'.format(info))
module = info["module"]
if not root and '_path' in info:
root = os.path.dirname(info['_path'])
fun = partial(one_from_module, module, root=root, info=info, **kwargs)
try:
return fun()
except (ImportError, LookupError):
install_deps(info)
return fun()
def parse_plugin_info(fpath):
logger.debug("Parsing plugin info: {}".format(fpath))
with open(fpath, 'r') as f:
info = yaml.load(f)
info['_path'] = fpath
return info
def from_module(module, **kwargs):
if inspect.ismodule(module):
res = _from_loaded_module(module, **kwargs)
else:
res = _from_module_name(module, **kwargs)
for p in res:
yield p
def one_from_module(module, root, info, **kwargs):
if '@type' in info:
cls = PluginMeta.from_type(info['@type'])
return cls(info=info, **kwargs)
instance = next(from_module(module=module, root=root, info=info, **kwargs), None)
if not instance:
raise Exception("No valid plugin for: {}".format(module))
return instance
def _classes_in_module(module):
for _, obj in inspect.getmembers(module):
if inspect.isclass(obj) and inspect.getmodule(obj) == module:
logger.debug(("Found plugin class:"
" {}@{}").format(obj, inspect.getmodule(obj)))
yield obj
def _instances_in_module(module):
for _, obj in inspect.getmembers(module):
if isinstance(obj, Plugin) and inspect.getmodule(obj) == module:
logger.debug(("Found plugin instance:"
" {}@{}").format(obj, inspect.getmodule(obj)))
yield obj
def _from_module_name(module, root, info=None, **kwargs):
module = load_module(module, root)
for plugin in _from_loaded_module(module=module, root=root, info=info, **kwargs):
yield plugin
def _from_loaded_module(module, info=None, **kwargs):
for cls in _classes_in_module(module):
yield cls(info=info, **kwargs)
for instance in _instances_in_module(module):
yield instance
def evaluate(plugins, datasets, **kwargs):
ev = gsitk_compat.Eval(tuples=None,
datasets=datasets,
pipelines=[plugin.as_pipe() for plugin in plugins])
ev.evaluate()
results = ev.results
evaluations = evaluations_to_JSONLD(results, **kwargs)
return evaluations
def evaluations_to_JSONLD(results, flatten=False):
'''
Map the evaluation results to a JSONLD scheme
'''
evaluations = list()
metric_names = ['accuracy', 'precision_macro', 'recall_macro',
'f1_macro', 'f1_weighted', 'f1_micro', 'f1_macro']
for index, row in results.iterrows():
evaluation = models.Evaluation()
if row.get('CV', True):
evaluation['@type'] = ['StaticCV', 'Evaluation']
evaluation.evaluatesOn = row['Dataset']
evaluation.evaluates = row['Model']
i = 0
if flatten:
metric = models.Metric()
for name in metric_names:
metric[name] = row[name]
evaluation.metrics.append(metric)
else:
# We should probably discontinue this representation
for name in metric_names:
metric = models.Metric()
metric['@id'] = 'Metric' + str(i)
metric['@type'] = name.capitalize()
metric.value = row[name]
evaluation.metrics.append(metric)
i += 1
evaluations.append(evaluation)
return evaluations

View File

@@ -0,0 +1,52 @@
from senpy.plugins import EmotionConversionPlugin
from senpy.models import EmotionSet, Emotion, Error
import logging
logger = logging.getLogger(__name__)
class CentroidConversion(EmotionConversionPlugin):
def _forward_conversion(self, original):
"""Sum the VAD value of all categories found."""
res = Emotion()
for e in original.onyx__hasEmotion:
category = e.onyx__hasEmotionCategory
if category in self.centroids:
for dim, value in self.centroids[category].items():
try:
res[dim] += value
except Exception:
res[dim] = value
return res
def _backwards_conversion(self, original):
"""Find the closest category"""
dimensions = list(self.centroids.values())[0]
def distance(e1, e2):
return sum((e1[k] - e2.get(self.aliases[k], 0)) for k in dimensions)
emotion = ''
mindistance = 10000000000000000000000.0
for state in self.centroids:
d = distance(self.centroids[state], original)
if d < mindistance:
mindistance = d
emotion = state
result = Emotion(onyx__hasEmotionCategory=emotion)
return result
def convert(self, emotionSet, fromModel, toModel, params):
cf, ct = self.centroids_direction
logger.debug('{}\n{}\n{}\n{}'.format(emotionSet, fromModel, toModel, params))
e = EmotionSet()
if fromModel == cf:
e.onyx__hasEmotion.append(self._forward_conversion(emotionSet))
elif fromModel == ct:
for i in emotionSet.onyx__hasEmotion:
e.onyx__hasEmotion.append(self._backwards_conversion(i))
else:
raise Error('EMOTION MODEL NOT KNOWN')
yield e

View File

@@ -1,158 +0,0 @@
from senpy.plugins import EmotionConversionPlugin
from senpy.models import EmotionSet, Emotion, Error
import logging
logger = logging.getLogger(__name__)
class CentroidConversion(EmotionConversionPlugin):
'''
This plugin converts emotion annotations from a dimensional model to a
categorical one, and vice versa. The centroids used in the conversion
are configurable and appear in the semantic description of the plugin.
'''
def __init__(self, info, *args, **kwargs):
if 'centroids' not in info:
raise Error('Centroid conversion plugins should provide '
'the centroids in their senpy file')
if 'onyx:doesConversion' not in info:
if 'centroids_direction' not in info:
raise Error('Please, provide centroids direction')
cf, ct = info['centroids_direction']
info['onyx:doesConversion'] = [{
'onyx:conversionFrom': cf,
'onyx:conversionTo': ct
}, {
'onyx:conversionFrom': ct,
'onyx:conversionTo': cf
}]
if 'aliases' in info:
aliases = info['aliases']
ncentroids = {}
for k1, v1 in info['centroids'].items():
nv1 = {}
for k2, v2 in v1.items():
nv1[aliases.get(k2, k2)] = v2
ncentroids[aliases.get(k1, k1)] = nv1
info['centroids'] = ncentroids
super(CentroidConversion, self).__init__(info, *args, **kwargs)
self.dimensions = set()
for c in self.centroids.values():
self.dimensions.update(c.keys())
self.neutralPoints = self.get("neutralPoints", dict())
if not self.neutralPoints:
for i in self.dimensions:
self.neutralPoints[i] = self.get("neutralValue", 0)
def _forward_conversion(self, original):
"""Sum the VAD value of all categories found weighted by intensity.
Intensities are scaled by onyx:maxIntensityValue if it is present, else maxIntensityValue
is assumed to be one. Emotion entries that do not have onxy:hasEmotionIntensity specified
are assumed to have maxIntensityValue. Emotion entries that do not have
onyx:hasEmotionCategory specified are ignored."""
res = Emotion()
maxIntensity = float(original.get("onyx:maxIntensityValue", 1))
for e in original.onyx__hasEmotion:
category = e.get("onyx:hasEmotionCategory", None)
if not category:
continue
intensity = e.get("onyx:hasEmotionIntensity", maxIntensity) / maxIntensity
if not intensity:
continue
centroid = self.centroids.get(category, None)
if centroid:
for dim, value in centroid.items():
neutral = self.neutralPoints[dim]
if dim not in res:
res[dim] = 0
res[dim] += (value - neutral) * intensity + neutral
return res
def _backwards_conversion(self, original):
"""Find the closest category"""
centroids = self.centroids
neutralPoints = self.neutralPoints
dimensions = self.dimensions
def distance_k(centroid, original, k):
# k component of the distance between the value and a given centroid
return (centroid.get(k, neutralPoints[k]) - original.get(k, neutralPoints[k]))**2
def distance(centroid):
return sum(distance_k(centroid, original, k) for k in dimensions)
emotion = min(centroids, key=lambda x: distance(centroids[x]))
result = Emotion(onyx__hasEmotionCategory=emotion)
result.onyx__algorithmConfidence = distance(centroids[emotion])
return result
def convert(self, emotionSet, fromModel, toModel, params):
cf, ct = self.centroids_direction
logger.debug(
'{}\n{}\n{}\n{}'.format(emotionSet, fromModel, toModel, params))
e = EmotionSet()
if fromModel == cf and toModel == ct:
e.onyx__hasEmotion.append(self._forward_conversion(emotionSet))
elif fromModel == ct and toModel == cf:
for i in emotionSet.onyx__hasEmotion:
e.onyx__hasEmotion.append(self._backwards_conversion(i))
else:
raise Error('EMOTION MODEL NOT KNOWN')
yield e
def test(self, info=None):
if not info:
info = {
"name": "CentroidTest",
"description": "Centroid test",
"version": 0,
"centroids": {
"c1": {"V1": 0.5,
"V2": 0.5},
"c2": {"V1": -0.5,
"V2": 0.5},
"c3": {"V1": -0.5,
"V2": -0.5},
"c4": {"V1": 0.5,
"V2": -0.5}},
"aliases": {
"V1": "X-dimension",
"V2": "Y-dimension"
},
"centroids_direction": ["emoml:big6", "emoml:fsre-dimensions"]
}
c = CentroidConversion(info)
es1 = EmotionSet()
e1 = Emotion()
e1.onyx__hasEmotionCategory = "c1"
es1.onyx__hasEmotion.append(e1)
res = c._forward_conversion(es1)
assert res["X-dimension"] == 0.5
assert res["Y-dimension"] == 0.5
e2 = Emotion()
e2.onyx__hasEmotionCategory = "c2"
es1.onyx__hasEmotion.append(e2)
res = c._forward_conversion(es1)
assert res["X-dimension"] == 0
assert res["Y-dimension"] == 1
e = Emotion()
e["X-dimension"] = -0.2
e["Y-dimension"] = -0.3
res = c._backwards_conversion(e)
assert res["onyx:hasEmotionCategory"] == "c3"
e = Emotion()
e["X-dimension"] = -0.2
e["Y-dimension"] = 0.3
res = c._backwards_conversion(e)
assert res["onyx:hasEmotionCategory"] == "c2"

View File

@@ -1,52 +0,0 @@
---
name: Ekman2FSRE
module: senpy.plugins.conversion.emotion.centroids
description: Plugin to convert emotion sets from Ekman to VAD
version: 0.2
# No need to specify onyx:doesConversion because centroids.py adds it automatically from centroids_direction
neutralValue: 5.0
centroids:
anger:
A: 6.95
D: 5.1
V: 2.7
S: 5.0
disgust:
A: 5.3
D: 8.05
V: 2.7
S: 5.0
fear:
A: 6.5
D: 3.6
V: 3.2
S: 5.0
happiness:
A: 7.22
D: 6.28
V: 8.6
S: 5.0
sadness:
A: 5.21
D: 2.82
V: 2.21
S: 5.0
surprise:
A: 5.0
D: 5.0
V: 5.0
S: 10.0
centroids_direction:
- emoml:big6
- emoml:fsre-dimensions
aliases: # These are aliases for any key in the centroid, to avoid repeating a long name several times
A: emoml:fsre-dimensions_arousal
V: emoml:fsre-dimensions_valence
D: emoml:fsre-dimensions_potency
S: emoml:fsre-dimensions_unpredictability
anger: emoml:big6anger
disgust: emoml:big6disgust
fear: emoml:big6fear
happiness: emoml:big6happiness
sadness: emoml:big6sadness
surprise: emoml:big6surprise

View File

@@ -1,40 +1,38 @@
--- ---
name: Ekman2PAD name: Ekman2VAD
module: senpy.plugins.conversion.emotion.centroids module: senpy.plugins.conversion.centroids
description: Plugin to convert emotion sets from Ekman to VAD description: Plugin to convert emotion sets from Ekman to VAD
version: 0.2 version: 0.1
# No need to specify onyx:doesConversion because centroids.py adds it automatically from centroids_direction onyx:doesConversion:
neutralValue: 5.0 - onyx:conversionFrom: emoml:big6
onyx:conversionTo: emoml:fsre-dimensions
- onyx:conversionFrom: emoml:fsre-dimensions
onyx:conversionTo: emoml:big6
centroids: centroids:
anger: emoml:big6anger:
A: 6.95 A: 6.95
D: 5.1 D: 5.1
V: 2.7 V: 2.7
disgust: emoml:big6disgust:
A: 5.3 A: 5.3
D: 8.05 D: 8.05
V: 2.7 V: 2.7
fear: emoml:big6fear:
A: 6.5 A: 6.5
D: 3.6 D: 3.6
V: 3.2 V: 3.2
happiness: emoml:big6happiness:
A: 7.22 A: 7.22
D: 6.28 D: 6.28
V: 8.6 V: 8.6
sadness: emoml:big6sadness:
A: 5.21 A: 5.21
D: 2.82 D: 2.82
V: 2.21 V: 2.21
centroids_direction: centroids_direction:
- emoml:big6 - emoml:big6
- emoml:pad - emoml:fsre-dimensions
aliases: # These are aliases for any key in the centroid, to avoid repeating a long name several times aliases:
A: emoml:pad-dimensions:arousal A: emoml:arousal
V: emoml:pad-dimensions:pleasure V: emoml:valence
D: emoml:pad-dimensions:dominance D: emoml:dominance
anger: emoml:big6anger
disgust: emoml:big6disgust
fear: emoml:big6fear
happiness: emoml:big6happiness
sadness: emoml:big6sadness

View File

@@ -1,18 +1,10 @@
import random import random
from senpy.plugins import EmotionPlugin from senpy.plugins import EmotionPlugin
from senpy.models import EmotionSet, Emotion, Entry from senpy.models import EmotionSet, Emotion
class EmoRand(EmotionPlugin): class RmoRandPlugin(EmotionPlugin):
name = "emoRand"
description = 'A sample plugin that returns a random emotion annotation'
author = '@balkian'
version = '0.1'
url = "https://github.com/gsi-upm/senpy-plugins-community"
requirements = {}
onyx__usesEmotionModel = "emoml:big6"
def analyse_entry(self, entry, params): def analyse_entry(self, entry, params):
category = "emoml:big6happiness" category = "emoml:big6happiness"
number = max(-1, min(1, random.gauss(0, 0.5))) number = max(-1, min(1, random.gauss(0, 0.5)))
@@ -24,11 +16,3 @@ class EmoRand(EmotionPlugin):
emotionSet.prov__wasGeneratedBy = self.id emotionSet.prov__wasGeneratedBy = self.id
entry.emotions.append(emotionSet) entry.emotions.append(emotionSet)
yield entry yield entry
def test(self):
params = dict()
results = list()
for i in range(100):
res = next(self.analyse_entry(Entry(nif__isString="Hello"), params))
res.validate()
results.append(res.emotions[0]['onyx:hasEmotion'][0]['onyx:hasEmotionCategory'])

View File

@@ -0,0 +1,9 @@
---
name: emoRand
module: emoRand
description: A sample plugin that returns a random emotion annotation
author: "@balkian"
version: '0.1'
url: "https://github.com/gsi-upm/senpy-plugins-community"
requirements: {}
onyx:usesEmotionModel: "emoml:big6"

View File

@@ -1,32 +0,0 @@
import random
from senpy.plugins import EmotionPlugin
from senpy.models import EmotionSet, Emotion, Entry
class EmoRand(EmotionPlugin):
'''A sample plugin that returns a random emotion annotation'''
author = '@balkian'
version = '0.1'
url = "https://github.com/gsi-upm/senpy-plugins-community"
onyx__usesEmotionModel = "emoml:big6"
def analyse_entry(self, entry, params):
category = "emoml:big6happiness"
number = max(-1, min(1, random.gauss(0, 0.5)))
if number > 0:
category = "emoml:big6anger"
emotionSet = EmotionSet()
emotion = Emotion({"onyx:hasEmotionCategory": category})
emotionSet.onyx__hasEmotion.append(emotion)
emotionSet.prov__wasGeneratedBy = self.id
entry.emotions.append(emotionSet)
yield entry
def test(self):
params = dict()
results = list()
for i in range(100):
res = next(self.analyse_entry(Entry(nif__isString="Hello"), params))
res.validate()
results.append(res.emotions[0]['onyx:hasEmotion'][0]['onyx:hasEmotionCategory'])

View File

@@ -0,0 +1,24 @@
import random
from senpy.plugins import SentimentPlugin
from senpy.models import Sentiment
class RandPlugin(SentimentPlugin):
def analyse_entry(self, entry, params):
lang = params.get("language", "auto")
polarity_value = max(-1, min(1, random.gauss(0.2, 0.2)))
polarity = "marl:Neutral"
if polarity_value > 0:
polarity = "marl:Positive"
elif polarity_value < 0:
polarity = "marl:Negative"
sentiment = Sentiment({
"marl:hasPolarity": polarity,
"marl:polarityValue": polarity_value
})
sentiment["prov:wasGeneratedBy"] = self.id
entry.sentiments.append(sentiment)
entry.language = lang
yield entry

View File

@@ -0,0 +1,10 @@
---
name: rand
module: rand
description: A sample plugin that returns a random sentiment annotation
author: "@balkian"
version: '0.1'
url: "https://github.com/gsi-upm/senpy-plugins-community"
requirements: {}
marl:maxPolarityValue: '1'
marl:minPolarityValue: "-1"

View File

@@ -1,36 +0,0 @@
import random
from senpy import SentimentPlugin, Sentiment, Entry
class Rand(SentimentPlugin):
'''A sample plugin that returns a random sentiment annotation'''
author = "@balkian"
version = '0.1'
url = "https://github.com/gsi-upm/senpy-plugins-community"
marl__maxPolarityValue = '1'
marl__minPolarityValue = "-1"
def analyse_entry(self, entry, params):
polarity_value = max(-1, min(1, random.gauss(0.2, 0.2)))
polarity = "marl:Neutral"
if polarity_value > 0:
polarity = "marl:Positive"
elif polarity_value < 0:
polarity = "marl:Negative"
sentiment = Sentiment(marl__hasPolarity=polarity,
marl__polarityValue=polarity_value)
sentiment.prov(self)
entry.sentiments.append(sentiment)
yield entry
def test(self):
'''Run several random analyses.'''
params = dict()
results = list()
for i in range(50):
res = next(self.analyse_entry(Entry(nif__isString="Hello"),
params))
res.validate()
results.append(res.sentiments[0]['marl:hasPolarity'])
assert 'marl:Positive' in results
assert 'marl:Negative' in results

View File

@@ -1,80 +0,0 @@
from senpy.plugins import AnalysisPlugin
from senpy.models import Entry
from nltk.tokenize.punkt import PunktSentenceTokenizer
from nltk.tokenize.simple import LineTokenizer
import nltk
class Split(AnalysisPlugin):
'''description: A sample plugin that chunks input text'''
author = ["@militarpancho", '@balkian']
version = '0.2'
url = "https://github.com/gsi-upm/senpy"
extra_params = {
'delimiter': {
'aliases': ['type', 't'],
'required': False,
'default': 'sentence',
'options': ['sentence', 'paragraph']
},
}
def activate(self):
nltk.download('punkt')
def analyse_entry(self, entry, params):
yield entry
chunker_type = params["delimiter"]
original_text = entry['nif:isString']
if chunker_type == "sentence":
tokenizer = PunktSentenceTokenizer()
if chunker_type == "paragraph":
tokenizer = LineTokenizer()
chars = list(tokenizer.span_tokenize(original_text))
for i, chunk in enumerate(tokenizer.tokenize(original_text)):
print(chunk)
e = Entry()
e['nif:isString'] = chunk
if entry.id:
e.id = entry.id + "#char={},{}".format(chars[i][0], chars[i][1])
yield e
test_cases = [
{
'entry': {
'nif:isString': 'Hello. World.'
},
'params': {
'delimiter': 'sentence',
},
'expected': [
{
'nif:isString': 'Hello.'
},
{
'nif:isString': 'World.'
}
]
},
{
'entry': {
"@id": ":test",
'nif:isString': 'Hello\nWorld'
},
'params': {
'delimiter': 'paragraph',
},
'expected': [
{
"@id": ":test#char=0,5",
'nif:isString': 'Hello'
},
{
"@id": ":test#char=6,11",
'nif:isString': 'World'
}
]
}
]

View File

@@ -0,0 +1,36 @@
import requests
import json
from senpy.plugins import SentimentPlugin
from senpy.models import Sentiment
class Sentiment140Plugin(SentimentPlugin):
def analyse_entry(self, entry, params):
lang = params.get("language", "auto")
res = requests.post("http://www.sentiment140.com/api/bulkClassifyJson",
json.dumps({
"language": lang,
"data": [{
"text": entry.text
}]
}))
p = params.get("prefix", None)
polarity_value = self.maxPolarityValue * int(
res.json()["data"][0]["polarity"]) * 0.25
polarity = "marl:Neutral"
neutral_value = self.maxPolarityValue / 2.0
if polarity_value > neutral_value:
polarity = "marl:Positive"
elif polarity_value < neutral_value:
polarity = "marl:Negative"
sentiment = Sentiment(
prefix=p,
marl__hasPolarity=polarity,
marl__polarityValue=polarity_value)
sentiment.prov__wasGeneratedBy = self.id
entry.sentiments = []
entry.sentiments.append(sentiment)
entry.language = lang
yield entry

View File

@@ -0,0 +1,21 @@
---
name: sentiment140
module: sentiment140
description: "Connects to the sentiment140 free API: http://sentiment140.com"
author: "@balkian"
version: '0.2'
url: "https://github.com/gsi-upm/senpy-plugins-community"
extra_params:
language:
"@id": lang_sentiment140
aliases:
- language
- l
required: false
options:
- es
- en
- auto
requirements: {}
maxPolarityValue: 1
minPolarityValue: 0

View File

@@ -1,75 +0,0 @@
import requests
import json
from senpy.plugins import SentimentPlugin
from senpy.models import Sentiment
ENDPOINT = 'http://www.sentiment140.com/api/bulkClassifyJson'
class Sentiment140(SentimentPlugin):
'''Connects to the sentiment140 free API: http://sentiment140.com'''
author = "@balkian"
version = '0.2'
url = "https://github.com/gsi-upm/senpy-plugins-community"
extra_params = {
'language': {
"@id": 'lang_sentiment140',
'aliases': ['language', 'l'],
'required': False,
'default': 'auto',
'options': ['es', 'en', 'auto']
}
}
maxPolarityValue = 1
minPolarityValue = 0
def analyse_entry(self, entry, params):
lang = params["language"]
res = requests.post(ENDPOINT,
json.dumps({
"language": lang,
"data": [{
"text": entry['nif:isString']
}]
}))
p = params.get("prefix", None)
polarity_value = self.maxPolarityValue * int(
res.json()["data"][0]["polarity"]) * 0.25
polarity = "marl:Neutral"
neutral_value = self.maxPolarityValue / 2.0
if polarity_value > neutral_value:
polarity = "marl:Positive"
elif polarity_value < neutral_value:
polarity = "marl:Negative"
sentiment = Sentiment(
prefix=p,
marl__hasPolarity=polarity,
marl__polarityValue=polarity_value)
sentiment.prov__wasGeneratedBy = self.id
entry.sentiments.append(sentiment)
entry.language = lang
yield entry
test_cases = [
{
'entry': {
'nif:isString': 'I love Titanic'
},
'params': {},
'expected': {
"nif:isString": "I love Titanic",
'sentiments': [
{
'marl:hasPolarity': 'marl:Positive',
}
]
},
'responses': [{'url': ENDPOINT,
'method': 'POST',
'json': {'data': [{'polarity': 4}]}}]
}
]

View File

@@ -1,38 +0,0 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"allOf": [
{"$ref": "response.json"},
{
"title": "AggregatedEvaluation",
"description": "The results of the evaluation",
"type": "object",
"properties": {
"@context": {
"$ref": "context.json"
},
"@type": {
"default": "AggregatedEvaluation"
},
"@id": {
"description": "ID of the aggregated evaluation",
"type": "string"
},
"evaluations": {
"default": [],
"type": "array",
"items": {
"anyOf": [
{
"$ref": "evaluation.json"
},{
"type": "string"
}
]
}
}
},
"required": ["@id", "evaluations"]
}
]
}

View File

@@ -10,10 +10,8 @@
"wna": "http://www.gsi.dit.upm.es/ontologies/wnaffect/ns#", "wna": "http://www.gsi.dit.upm.es/ontologies/wnaffect/ns#",
"emoml": "http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/emotionml/ns#", "emoml": "http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/emotionml/ns#",
"xsd": "http://www.w3.org/2001/XMLSchema#", "xsd": "http://www.w3.org/2001/XMLSchema#",
"fam": "http://vocab.fusepool.info/fam#",
"topics": { "topics": {
"@id": "nif:topic", "@id": "dc:subject"
"@container": "@set"
}, },
"entities": { "entities": {
"@id": "me:hasEntities" "@id": "me:hasEntities"
@@ -22,16 +20,10 @@
"@id": "me:hasSuggestions", "@id": "me:hasSuggestions",
"@container": "@set" "@container": "@set"
}, },
"onyx:hasEmotion": {
"@container": "@set"
},
"emotions": { "emotions": {
"@id": "onyx:hasEmotionSet", "@id": "onyx:hasEmotionSet",
"@container": "@set" "@container": "@set"
}, },
"onyx:hasEmotion": {
"@container": "@set"
},
"sentiments": { "sentiments": {
"@id": "marl:hasOpinion", "@id": "marl:hasOpinion",
"@container": "@set" "@container": "@set"
@@ -45,12 +37,6 @@
"@type": "@id", "@type": "@id",
"@container": "@set" "@container": "@set"
}, },
"options": {
"@container": "@set"
},
"plugins": {
"@container": "@set"
},
"prov:wasGeneratedBy": { "prov:wasGeneratedBy": {
"@type": "@id" "@type": "@id"
}, },

View File

@@ -1,29 +0,0 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"name": "Dataset",
"properties": {
"@id": {
"type": "string"
},
"name": {
"type": "string"
},
"compression": {
"type": "string"
},
"expected_bytes": {
"type": "int"
},
"filename": {
"description": "Name of the dataset",
"type": "string"
},
"url": {
"description": "Classifier or plugin evaluated",
"type": "string"
},
"stats": {
}
},
"required": ["@id"]
}

View File

@@ -1,18 +0,0 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"allOf": [
{"$ref": "response.json"},
{
"required": ["datasets"],
"properties": {
"datasets": {
"type": "array",
"default": [],
"items": {
"$ref": "dataset.json"
}
}
}
}
]
}

View File

@@ -41,20 +41,5 @@
}, },
"Response": { "Response": {
"$ref": "response.json" "$ref": "response.json"
},
"AggregatedEvaluation": {
"$ref": "aggregatedEvaluation.json"
},
"Evaluation": {
"$ref": "evaluation.json"
},
"Metric": {
"$ref": "metric.json"
},
"Dataset": {
"$ref": "dataset.json"
},
"Datasets": {
"$ref": "datasets.json"
} }
} }

View File

@@ -5,7 +5,7 @@
"nif:beginIndex": {"type": "integer"}, "nif:beginIndex": {"type": "integer"},
"nif:endIndex": {"type": "integer"}, "nif:endIndex": {"type": "integer"},
"nif:anchorOf": { "nif:anchorOf": {
"description": "Piece of context that contains the Emotion", "description": "Piece of context that contains the Sentiment",
"type": "string" "type": "string"
}, },
"onyx:hasDimension": { "onyx:hasDimension": {

Some files were not shown because too many files have changed in this diff Show More