mirror of
https://github.com/gsi-upm/senpy
synced 2025-09-16 03:22:22 +00:00
Compare commits
19 Commits
0.7.1-5-g8
...
0.8.3
Author | SHA1 | Date | |
---|---|---|---|
|
efb305173e | ||
|
2288b04c92 | ||
|
7899cb4d33 | ||
|
62ddca79ac | ||
|
99403b3443 | ||
|
a0ff528a4b | ||
|
97bd245dfc | ||
|
d8b59d06a4 | ||
|
453b9f3257 | ||
|
5fb858f5fc | ||
|
bd984a1437 | ||
|
e741b565a1 | ||
|
668a803d89 | ||
|
9daae8dda7 | ||
|
c72094b94b | ||
|
15d456d048 | ||
|
fef06d4333 | ||
|
454aa61fba | ||
|
ba2e18125c |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -6,3 +6,5 @@ build
|
||||
README.html
|
||||
__pycache__
|
||||
VERSION
|
||||
Dockerfile-*
|
||||
Dockerfile
|
@@ -1,4 +1,4 @@
|
||||
image: docker:latest
|
||||
image: gsiupm/dockermake:latest
|
||||
|
||||
|
||||
# When using dind, it's wise to use the overlayfs driver for
|
||||
@@ -6,79 +6,60 @@ image: docker:latest
|
||||
variables:
|
||||
DOCKER_DRIVER: overlay
|
||||
DOCKERFILE: Dockerfile
|
||||
|
||||
before_script:
|
||||
- sh version.sh > senpy/VERSION
|
||||
IMAGENAME: $CI_REGISTRY_IMAGE
|
||||
|
||||
stages:
|
||||
- test
|
||||
- images
|
||||
- release
|
||||
- push
|
||||
- clean
|
||||
|
||||
.test: &test_definition
|
||||
variables:
|
||||
PIP_CACHE_DIR: "$CI_PROJECT_DIR/pip-cache"
|
||||
cache:
|
||||
paths:
|
||||
- .eggs/
|
||||
- "$CI_PROJECT_DIR/pip-cache"
|
||||
- .venv
|
||||
key: "$CI_PROJECT_NAME"
|
||||
stage: test
|
||||
script:
|
||||
- pip install --use-wheel -U pip setuptools virtualenv
|
||||
- virtualenv .venv/$PYTHON_VERSION
|
||||
- source .venv/$PYTHON_VERSION/bin/activate
|
||||
- pip install --use-wheel -r requirements.txt
|
||||
- pip install --use-wheel -r test-requirements.txt
|
||||
- py.test --cov=senpy --cov-report term-missing
|
||||
- python
|
||||
- make -e test-$PYTHON_VERSION
|
||||
|
||||
test-3.5:
|
||||
<<: *test_definition
|
||||
image: "python:3.5"
|
||||
|
||||
test-3.4:
|
||||
<<: *test_definition
|
||||
image: "python:3.4"
|
||||
variables:
|
||||
PYTHON_VERSION: "3.5"
|
||||
|
||||
test-2.7:
|
||||
<<: *test_definition
|
||||
image: "python:2.7"
|
||||
variables:
|
||||
PYTHON_VERSION: "2.7"
|
||||
|
||||
|
||||
.image: &image_definition
|
||||
variables:
|
||||
PYTHON_VERSION: "3.5"
|
||||
stage: push
|
||||
before_script:
|
||||
- docker login -u gitlab-ci-token -p $CI_BUILD_TOKEN $CI_REGISTRY
|
||||
script:
|
||||
- docker build -f Dockerfile-3.5 . -t $CI_REGISTRY_IMAGE:$CI_BUILD_REF_SLUG-$PYTHON_VERSION
|
||||
- docker push $CI_REGISTRY_IMAGE:$CI_BUILD_REF_SLUG-$PYTHON_VERSION
|
||||
stage: images
|
||||
- make -e push-$PYTHON_VERSION
|
||||
only:
|
||||
- tags
|
||||
- triggers
|
||||
|
||||
image-3.5:
|
||||
push-3.5:
|
||||
<<: *image_definition
|
||||
variables:
|
||||
PYTHON_VERSION: "3.5"
|
||||
|
||||
image-2.7:
|
||||
push-2.7:
|
||||
<<: *image_definition
|
||||
variables:
|
||||
PYTHON_VERSION: "2.7"
|
||||
|
||||
image-latest:
|
||||
stage: release
|
||||
before_script:
|
||||
- docker login -u gitlab-ci-token -p $CI_BUILD_TOKEN $CI_REGISTRY
|
||||
script:
|
||||
- docker build -f Dockerfile . -t $CI_REGISTRY_IMAGE
|
||||
- docker tag $CI_REGISTRY_IMAGE $CI_REGISTRY_IMAGE:$CI_BUILD_REF_SLUG
|
||||
- docker push $CI_REGISTRY_IMAGE
|
||||
- docker push $CI_REGISTRY_IMAGE:$CI_BUILD_REF_SLUG
|
||||
push-latest:
|
||||
<<: *image_definition
|
||||
variables:
|
||||
PYTHON_VERSION: latest
|
||||
only:
|
||||
- master
|
||||
- triggers
|
||||
- triggers
|
||||
|
||||
clean :
|
||||
stage: clean
|
||||
script:
|
||||
- make -e clean
|
||||
only:
|
||||
- master
|
5
.pre-commit-config.yaml
Normal file
5
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
- repo: git://github.com/pre-commit/pre-commit-hooks
|
||||
sha: e626cd57090d8df0be21e4df0f4e55cc3511d6ab
|
||||
hooks:
|
||||
- id: flake8
|
||||
- id: check-json
|
17
.travis.yml
17
.travis.yml
@@ -1,8 +1,13 @@
|
||||
sudo: required
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
language: python
|
||||
python:
|
||||
- "2.7"
|
||||
- "3.4"
|
||||
- "3.5"
|
||||
install: "pip install -r requirements.txt"
|
||||
|
||||
env:
|
||||
- PYV=2.7
|
||||
- PYV=3.4
|
||||
- PYV=3.5
|
||||
# run nosetests - Tests
|
||||
script: nosetests
|
||||
script: make test-$PYV
|
||||
|
@@ -1 +0,0 @@
|
||||
Dockerfile-3.5
|
@@ -1,21 +0,0 @@
|
||||
from python:2.7
|
||||
|
||||
RUN mkdir /cache/
|
||||
ENV PIP_CACHE_DIR=/cache/
|
||||
|
||||
WORKDIR /usr/src/app
|
||||
ADD requirements.txt /usr/src/app/
|
||||
RUN pip install --use-wheel -r requirements.txt
|
||||
ADD . /usr/src/app/
|
||||
RUN pip install .
|
||||
|
||||
|
||||
VOLUME /data/
|
||||
|
||||
RUN mkdir /senpy-plugins/
|
||||
|
||||
WORKDIR /senpy-plugins/
|
||||
ONBUILD ADD . /senpy-plugins/
|
||||
ONBUILD RUN python -m senpy --only-install -f /senpy-plugins
|
||||
|
||||
ENTRYPOINT ["python", "-m", "senpy", "-f", "/senpy-plugins/", "--host", "0.0.0.0"]
|
@@ -1,21 +0,0 @@
|
||||
from python:3.4
|
||||
|
||||
RUN mkdir /cache/
|
||||
ENV PIP_CACHE_DIR=/cache/
|
||||
|
||||
WORKDIR /usr/src/app
|
||||
ADD requirements.txt /usr/src/app/
|
||||
RUN pip install --use-wheel -r requirements.txt
|
||||
ADD . /usr/src/app/
|
||||
RUN pip install .
|
||||
|
||||
|
||||
VOLUME /data/
|
||||
|
||||
RUN mkdir /senpy-plugins/
|
||||
|
||||
WORKDIR /senpy-plugins/
|
||||
ONBUILD ADD . /senpy-plugins/
|
||||
ONBUILD RUN python -m senpy --only-install -f /senpy-plugins
|
||||
|
||||
ENTRYPOINT ["python", "-m", "senpy", "-f", "/senpy-plugins/", "--host", "0.0.0.0"]
|
@@ -1,21 +0,0 @@
|
||||
FROM python:3.5
|
||||
|
||||
RUN mkdir /cache/
|
||||
ENV PIP_CACHE_DIR=/cache/
|
||||
|
||||
WORKDIR /usr/src/app
|
||||
ADD requirements.txt /usr/src/app/
|
||||
RUN pip install --use-wheel -r requirements.txt
|
||||
ADD . /usr/src/app/
|
||||
RUN pip install .
|
||||
|
||||
|
||||
VOLUME /data/
|
||||
|
||||
RUN mkdir /senpy-plugins/
|
||||
|
||||
WORKDIR /senpy-plugins/
|
||||
ONBUILD ADD . /senpy-plugins/
|
||||
ONBUILD RUN python -m senpy --only-install -f /senpy-plugins
|
||||
|
||||
ENTRYPOINT ["python", "-m", "senpy", "-f", "/senpy-plugins/", "--host", "0.0.0.0"]
|
@@ -1,33 +0,0 @@
|
||||
from python:2.7
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get -y install git
|
||||
RUN mkdir -p /senpy-plugins
|
||||
|
||||
RUN apt-get -y install python-numpy
|
||||
RUN apt-get -y install python-scipy
|
||||
RUN apt-get -y install python-sklearn
|
||||
RUN apt-get -y install python-gevent
|
||||
RUN apt-get -y install libopenblas-dev
|
||||
RUN apt-get -y install gfortran
|
||||
RUN apt-get -y install libxml2-dev libxslt1-dev python-dev
|
||||
|
||||
#RUN pip install --upgrade pip
|
||||
|
||||
ADD id_rsa /root/.ssh/id_rsa
|
||||
RUN chmod 700 /root/.ssh/id_rsa
|
||||
RUN echo "Host github.com\n\tStrictHostKeyChecking no\n" >> /root/.ssh/config
|
||||
|
||||
RUN git clone https://github.com/gsi-upm/senpy /usr/src/app/
|
||||
RUN git clone git@github.com:gsi-upm/senpy-plugins-enterprise /senpy-plugins/enterprise
|
||||
RUN git clone https://github.com/gsi-upm/senpy-plugins-community /senpy-plugins/community
|
||||
|
||||
RUN pip install /usr/src/app
|
||||
RUN pip install --no-use-wheel -r /senpy-plugins/enterprise/requirements.txt
|
||||
RUN python -m nltk.downloader stopwords
|
||||
RUN python -m nltk.downloader punkt
|
||||
RUN python -m nltk.downloader maxent_treebank_pos_tagger
|
||||
RUN python -m nltk.downloader wordnet
|
||||
|
||||
WORKDIR /senpy-plugins
|
||||
ENTRYPOINT ["python", "-m", "senpy", "-f", ".", "--host", "0.0.0.0"]
|
@@ -1,21 +1,22 @@
|
||||
from python:{{PYVERSION}}
|
||||
|
||||
RUN mkdir /cache/
|
||||
ENV PIP_CACHE_DIR=/cache/
|
||||
|
||||
WORKDIR /usr/src/app
|
||||
ADD requirements.txt /usr/src/app/
|
||||
RUN pip install --use-wheel -r requirements.txt
|
||||
ADD . /usr/src/app/
|
||||
RUN pip install .
|
||||
MAINTAINER J. Fernando Sánchez <jf.sanchez@upm.es>
|
||||
|
||||
RUN mkdir /cache/ /senpy-plugins /data/
|
||||
|
||||
VOLUME /data/
|
||||
|
||||
RUN mkdir /senpy-plugins/
|
||||
ENV PIP_CACHE_DIR=/cache/ SENPY_DATA=/data
|
||||
|
||||
WORKDIR /senpy-plugins/
|
||||
ONBUILD ADD . /senpy-plugins/
|
||||
ONBUILD COPY . /senpy-plugins/
|
||||
ONBUILD RUN python -m senpy --only-install -f /senpy-plugins
|
||||
ONBUILD WORKDIR /senpy-plugins/
|
||||
|
||||
ENTRYPOINT ["python", "-m", "senpy", "-f", "/senpy-plugins/", "--host", "0.0.0.0"]
|
||||
|
||||
WORKDIR /usr/src/app
|
||||
COPY test-requirements.txt requirements.txt /usr/src/app/
|
||||
RUN pip install --use-wheel -r test-requirements.txt -r requirements.txt
|
||||
COPY . /usr/src/app/
|
||||
RUN pip install --no-deps --no-index .
|
||||
|
||||
ENTRYPOINT ["python", "-m", "senpy", "-f", "/senpy-plugins/", "--host", "0.0.0.0"]
|
||||
|
76
Makefile
76
Makefile
@@ -1,25 +1,26 @@
|
||||
PYVERSIONS=3.5 3.4 2.7
|
||||
PYVERSIONS=3.5 2.7
|
||||
PYMAIN=$(firstword $(PYVERSIONS))
|
||||
NAME=senpy
|
||||
REPO=gsiupm
|
||||
VERSION=$(shell ./version.sh)
|
||||
VERSION=$(shell git describe --tags --dirty 2>/dev/null)
|
||||
TARNAME=$(NAME)-$(VERSION).tar.gz
|
||||
IMAGENAME=$(REPO)/$(NAME):$(VERSION)
|
||||
TEST_COMMAND=gitlab-runner exec docker --cache-dir=/tmp/gitlabrunner --docker-volumes /tmp/gitlabrunner:/tmp/gitlabrunner --env CI_PROJECT_NAME=$(NAME)
|
||||
IMAGENAME=$(REPO)/$(NAME)
|
||||
IMAGEWTAG=$(IMAGENAME):$(VERSION)
|
||||
action="test-${PYMAIN}"
|
||||
|
||||
all: build run
|
||||
|
||||
FORCE:
|
||||
.FORCE:
|
||||
|
||||
version: FORCE
|
||||
version: .FORCE
|
||||
@echo $(VERSION) > $(NAME)/VERSION
|
||||
@echo $(NAME) $(VERSION)
|
||||
@echo $(VERSION)
|
||||
|
||||
yapf:
|
||||
yapf -i -r senpy
|
||||
yapf -i -r tests
|
||||
|
||||
dev:
|
||||
init:
|
||||
pip install --user pre-commit
|
||||
pre-commit install
|
||||
|
||||
@@ -34,26 +35,27 @@ quick_build: $(addprefix build-, $(PYMAIN))
|
||||
|
||||
build: $(addprefix build-, $(PYVERSIONS))
|
||||
|
||||
build-%: Dockerfile-%
|
||||
docker build -t '$(IMAGENAME)-python$*' -f Dockerfile-$* .;
|
||||
build-%: version Dockerfile-%
|
||||
docker build -t '$(IMAGEWTAG)-python$*' -f Dockerfile-$* .;
|
||||
|
||||
quick_test: $(addprefix test-,$(PYMAIN))
|
||||
|
||||
test: $(addprefix test-,$(PYVERSIONS))
|
||||
|
||||
debug-%:
|
||||
@docker start $(NAME)-debug || (\
|
||||
dev-%:
|
||||
@docker start $(NAME)-dev$* || (\
|
||||
$(MAKE) build-$*; \
|
||||
docker run -d -w /usr/src/app/ -v $$PWD:/usr/src/app --entrypoint=/bin/bash -p 5000:5000 -ti --name $(NAME)-debug '$(IMAGENAME)-python$*'; \
|
||||
docker exec -ti $(NAME)-debug pip install -r test-requirements.txt; \
|
||||
docker run -d -w /usr/src/app/ -v $$PWD:/usr/src/app --entrypoint=/bin/bash -ti --name $(NAME)-dev$* '$(IMAGEWTAG)-python$*'; \
|
||||
)\
|
||||
|
||||
docker attach $(NAME)-debug
|
||||
docker exec -ti $(NAME)-dev$* bash
|
||||
|
||||
debug: debug-$(PYMAIN)
|
||||
dev: dev-$(PYMAIN)
|
||||
|
||||
test-%:
|
||||
$(TEST_COMMAND) test-$*
|
||||
test-all: $(addprefix test-,$(PYVERSIONS))
|
||||
|
||||
test-%: build-%
|
||||
docker run --rm --entrypoint /usr/local/bin/python -w /usr/src/app $(IMAGEWTAG)-python$* setup.py test
|
||||
|
||||
test: test-$(PYMAIN)
|
||||
|
||||
dist/$(TARNAME):
|
||||
docker run --rm -ti -v $$PWD:/usr/src/app/ -w /usr/src/app/ python:$(PYMAIN) python setup.py sdist;
|
||||
@@ -65,23 +67,19 @@ pip_test-%: sdist
|
||||
|
||||
pip_test: $(addprefix pip_test-,$(PYVERSIONS))
|
||||
|
||||
upload-%: test-%
|
||||
docker push '$(IMAGENAME)-python$*'
|
||||
|
||||
upload: test $(addprefix upload-,$(PYVERSIONS))
|
||||
docker tag '$(IMAGENAME)-python$(PYMAIN)' '$(IMAGENAME)'
|
||||
docker tag '$(IMAGENAME)-python$(PYMAIN)' '$(REPO)/$(NAME)'
|
||||
docker push '$(IMAGENAME)'
|
||||
docker push '$(REPO)/$(NAME)'
|
||||
|
||||
clean:
|
||||
@docker ps -a | awk '/$(REPO)\/$(NAME)/{ split($$2, vers, "-"); if(vers[0] != "${VERSION}"){ print $$1;}}' | xargs docker rm -v 2>/dev/null|| true
|
||||
@docker images | awk '/$(REPO)\/$(NAME)/{ split($$2, vers, "-"); if(vers[0] != "${VERSION}"){ print $$1":"$$2;}}' | xargs docker rmi 2>/dev/null|| true
|
||||
@docker rmi $(NAME)-debug 2>/dev/null || true
|
||||
@docker rmi $(NAME)-dev 2>/dev/null || true
|
||||
|
||||
upload_git:
|
||||
|
||||
git_commit:
|
||||
git commit -a
|
||||
|
||||
git_tag:
|
||||
git tag ${VERSION}
|
||||
|
||||
git_push:
|
||||
git push --tags origin master
|
||||
|
||||
pip_upload:
|
||||
@@ -90,8 +88,20 @@ pip_upload:
|
||||
pip_test: $(addprefix pip_test-,$(PYVERSIONS))
|
||||
|
||||
run-%: build-%
|
||||
docker run --rm -p 5000:5000 -ti '$(IMAGENAME)-python$(PYMAIN)' --default-plugins
|
||||
docker run --rm -p 5000:5000 -ti '$(IMAGEWTAG)-python$(PYMAIN)' --default-plugins
|
||||
|
||||
run: run-$(PYMAIN)
|
||||
|
||||
.PHONY: test test-% build-% build test pip_test run yapf dev
|
||||
push-latest: build-$(PYMAIN)
|
||||
docker tag '$(IMAGEWTAG)-python$(PYMAIN)' '$(IMAGEWTAG)'
|
||||
docker tag '$(IMAGEWTAG)-python$(PYMAIN)' '$(IMAGENAME)'
|
||||
docker push '$(IMAGENAME):latest'
|
||||
docker push '$(IMAGEWTAG)'
|
||||
|
||||
push-%: build-%
|
||||
docker push $(IMAGENAME):$(VERSION)-python$*
|
||||
|
||||
ci:
|
||||
gitlab-runner exec docker --docker-volumes /var/run/docker.sock:/var/run/docker.sock --env CI_PROJECT_NAME=$(NAME) ${action}
|
||||
|
||||
.PHONY: test test-% test-all build-% build test pip_test run yapf push-main push-% dev ci version .FORCE
|
||||
|
@@ -38,9 +38,9 @@ If you want to install senpy globally, use sudo instead of the ``--user`` flag.
|
||||
|
||||
Docker Image
|
||||
************
|
||||
Build the image or use the pre-built one: ``docker run -ti -p 5000:5000 gsiupm/senpy --host 0.0.0.0 --default-plugins``.
|
||||
Build the image or use the pre-built one: ``docker run -ti -p 5000:5000 gsiupm/senpy --default-plugins``.
|
||||
|
||||
To add custom plugins, add a volume and tell senpy where to find the plugins: ``docker run -ti -p 5000:5000 -v <PATH OF PLUGINS>:/plugins gsiupm/senpy --host 0.0.0.0 --default-plugins -f /plugins``
|
||||
To add custom plugins, add a volume and tell senpy where to find the plugins: ``docker run -ti -p 5000:5000 -v <PATH OF PLUGINS>:/plugins gsiupm/senpy --default-plugins -f /plugins``
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
118
docs/api.rst
118
docs/api.rst
@@ -62,6 +62,7 @@ NIF API
|
||||
:query o outformat: one of `turtle` (default), `text`, `json-ld`
|
||||
:query p prefix: prefix for the URIs
|
||||
:query algo algorithm: algorithm/plugin to use for the analysis. For a list of options, see :http:get:`/api/plugins`. If not provided, the default plugin will be used (:http:get:`/api/plugins/default`).
|
||||
:query algo emotionModel: desired emotion model in the results. If the requested algorithm does not use that emotion model, there are conversion plugins specifically for this. If none of the plugins match, an error will be returned, which includes the results *as is*.
|
||||
|
||||
:reqheader Accept: the response content type depends on
|
||||
:mailheader:`Accept` header
|
||||
@@ -69,6 +70,7 @@ NIF API
|
||||
header of request
|
||||
:statuscode 200: no error
|
||||
:statuscode 404: service not found
|
||||
:statuscode 400: error while processing the request
|
||||
|
||||
.. http:post:: /api
|
||||
|
||||
@@ -93,51 +95,53 @@ NIF API
|
||||
{
|
||||
"@context": {
|
||||
...
|
||||
},
|
||||
"sentiment140": {
|
||||
"name": "sentiment140",
|
||||
"is_activated": true,
|
||||
"version": "0.1",
|
||||
"extra_params": {
|
||||
"@id": "extra_params_sentiment140_0.1",
|
||||
"language": {
|
||||
"required": false,
|
||||
"@id": "lang_sentiment140",
|
||||
"options": [
|
||||
"es",
|
||||
"en",
|
||||
"auto"
|
||||
],
|
||||
"aliases": [
|
||||
"language",
|
||||
"l"
|
||||
]
|
||||
}
|
||||
},
|
||||
"@id": "sentiment140_0.1"
|
||||
},
|
||||
"rand": {
|
||||
"name": "rand",
|
||||
"is_activated": true,
|
||||
"version": "0.1",
|
||||
"extra_params": {
|
||||
"@id": "extra_params_rand_0.1",
|
||||
"language": {
|
||||
"required": false,
|
||||
"@id": "lang_rand",
|
||||
"options": [
|
||||
"es",
|
||||
"en",
|
||||
"auto"
|
||||
],
|
||||
"aliases": [
|
||||
"language",
|
||||
"l"
|
||||
]
|
||||
}
|
||||
},
|
||||
"@id": "rand_0.1"
|
||||
}
|
||||
},
|
||||
"@type": "plugins",
|
||||
"plugins": [
|
||||
{
|
||||
"name": "sentiment140",
|
||||
"is_activated": true,
|
||||
"version": "0.1",
|
||||
"extra_params": {
|
||||
"@id": "extra_params_sentiment140_0.1",
|
||||
"language": {
|
||||
"required": false,
|
||||
"@id": "lang_sentiment140",
|
||||
"options": [
|
||||
"es",
|
||||
"en",
|
||||
"auto"
|
||||
],
|
||||
"aliases": [
|
||||
"language",
|
||||
"l"
|
||||
]
|
||||
}
|
||||
},
|
||||
"@id": "sentiment140_0.1"
|
||||
}, {
|
||||
"name": "rand",
|
||||
"is_activated": true,
|
||||
"version": "0.1",
|
||||
"extra_params": {
|
||||
"@id": "extra_params_rand_0.1",
|
||||
"language": {
|
||||
"required": false,
|
||||
"@id": "lang_rand",
|
||||
"options": [
|
||||
"es",
|
||||
"en",
|
||||
"auto"
|
||||
],
|
||||
"aliases": [
|
||||
"language",
|
||||
"l"
|
||||
]
|
||||
}
|
||||
},
|
||||
"@id": "rand_0.1"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
@@ -148,7 +152,7 @@ NIF API
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
GET /api/plugins/rand HTTP/1.1
|
||||
GET /api/plugins/rand/ HTTP/1.1
|
||||
Host: localhost
|
||||
Accept: application/json, text/javascript
|
||||
|
||||
@@ -159,6 +163,7 @@ NIF API
|
||||
|
||||
{
|
||||
"@id": "rand_0.1",
|
||||
"@type": "sentimentPlugin",
|
||||
"extra_params": {
|
||||
"@id": "extra_params_rand_0.1",
|
||||
"language": {
|
||||
@@ -185,24 +190,3 @@ NIF API
|
||||
|
||||
Return the information about the default plugin.
|
||||
|
||||
.. http:get:: /api/plugins/<pluginname>/{de}activate
|
||||
|
||||
{De}activate a plugin.
|
||||
|
||||
**Example request**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
GET /api/plugins/rand/deactivate HTTP/1.1
|
||||
Host: localhost
|
||||
Accept: application/json, text/javascript
|
||||
|
||||
|
||||
**Example response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
{
|
||||
"@context": {},
|
||||
"message": "Ok"
|
||||
}
|
||||
|
116
docs/conversion.rst
Normal file
116
docs/conversion.rst
Normal file
@@ -0,0 +1,116 @@
|
||||
Conversion
|
||||
----------
|
||||
|
||||
Senpy includes experimental support for emotion/sentiment conversion plugins.
|
||||
|
||||
|
||||
Use
|
||||
===
|
||||
|
||||
Consider the original query: http://127.0.0.1:5000/api/?i=hello&algo=emoRand
|
||||
|
||||
The requested plugin (emoRand) returns emotions using Ekman's model (or big6 in EmotionML):
|
||||
|
||||
.. code:: json
|
||||
|
||||
|
||||
... rest of the document ...
|
||||
{
|
||||
"@type": "emotionSet",
|
||||
"onyx:hasEmotion": {
|
||||
"@type": "emotion",
|
||||
"onyx:hasEmotionCategory": "emoml:big6anger"
|
||||
},
|
||||
"prov:wasGeneratedBy": "plugins/emoRand_0.1"
|
||||
}
|
||||
|
||||
|
||||
|
||||
To get these emotions in VAD space (FSRE dimensions in EmotionML), we'd do this:
|
||||
|
||||
http://127.0.0.1:5000/api/?i=hello&algo=emoRand&emotionModel=emoml:fsre-dimensions
|
||||
|
||||
This call, provided there is a valid conversion plugin from Ekman's to VAD, would return something like this:
|
||||
|
||||
.. code:: json
|
||||
|
||||
|
||||
... rest of the document ...
|
||||
{
|
||||
"@type": "emotionSet",
|
||||
"onyx:hasEmotion": {
|
||||
"@type": "emotion",
|
||||
"onyx:hasEmotionCategory": "emoml:big6anger"
|
||||
},
|
||||
"prov:wasGeneratedBy": "plugins/emoRand_0.1"
|
||||
}, {
|
||||
"@type": "emotionSet",
|
||||
"onyx:hasEmotion": {
|
||||
"@type": "emotion",
|
||||
"A": 7.22,
|
||||
"D": 6.28,
|
||||
"V": 8.6
|
||||
},
|
||||
"prov:wasGeneratedBy": "plugins/Ekman2VAD_0.1"
|
||||
|
||||
}
|
||||
|
||||
|
||||
That is called a *full* response, as it simply adds the converted emotion alongside.
|
||||
It is also possible to get the original emotion nested within the new converted emotion, using the `conversion=nested` parameter:
|
||||
|
||||
.. code:: json
|
||||
|
||||
|
||||
... rest of the document ...
|
||||
{
|
||||
"@type": "emotionSet",
|
||||
"onyx:hasEmotion": {
|
||||
"@type": "emotion",
|
||||
"onyx:hasEmotionCategory": "emoml:big6anger"
|
||||
},
|
||||
"prov:wasGeneratedBy": "plugins/emoRand_0.1"
|
||||
"onyx:wasDerivedFrom": {
|
||||
"@type": "emotionSet",
|
||||
"onyx:hasEmotion": {
|
||||
"@type": "emotion",
|
||||
"A": 7.22,
|
||||
"D": 6.28,
|
||||
"V": 8.6
|
||||
},
|
||||
"prov:wasGeneratedBy": "plugins/Ekman2VAD_0.1"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
Lastly, `conversion=filtered` would only return the converted emotions.
|
||||
|
||||
Developing a conversion plugin
|
||||
================================
|
||||
|
||||
Conversion plugins are discovered by the server just like any other plugin.
|
||||
The difference is the slightly different API, and the need to specify the `source` and `target` of the conversion.
|
||||
For instance, an emotion conversion plugin needs the following:
|
||||
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
|
||||
---
|
||||
onyx:doesConversion:
|
||||
- onyx:conversionFrom: emoml:big6
|
||||
onyx:conversionTo: emoml:fsre-dimensions
|
||||
- onyx:conversionFrom: emoml:fsre-dimensions
|
||||
onyx:conversionTo: emoml:big6
|
||||
|
||||
|
||||
|
||||
|
||||
.. code:: python
|
||||
|
||||
|
||||
class MyConversion(EmotionConversionPlugin):
|
||||
|
||||
def convert(self, emotionSet, fromModel, toModel, params):
|
||||
pass
|
@@ -1,8 +1,3 @@
|
||||
.. Senpy documentation master file, created by
|
||||
sphinx-quickstart on Tue Feb 24 08:57:32 2015.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
Welcome to Senpy's documentation!
|
||||
=================================
|
||||
|
||||
@@ -15,5 +10,6 @@ Contents:
|
||||
api
|
||||
schema
|
||||
plugins
|
||||
conversion
|
||||
demo
|
||||
:maxdepth: 2
|
||||
|
@@ -1,5 +1,7 @@
|
||||
Developing new plugins
|
||||
----------------------
|
||||
This document describes how to develop a new analysis plugin. For an example of conversion plugins, see :doc:`conversion`.
|
||||
|
||||
Each plugin represents a different analysis process.There are two types of files that are needed by senpy for loading a plugin:
|
||||
|
||||
- Definition file, has the ".senpy" extension.
|
||||
|
@@ -48,8 +48,8 @@ Once the server is launched, there is a basic endpoint in the server, which prov
|
||||
|
||||
In case you want to know the different endpoints of the server, there is more information available in the NIF API section_.
|
||||
|
||||
Video example
|
||||
=============
|
||||
CLI
|
||||
===
|
||||
|
||||
This video shows how to use senpy through command-line tool.
|
||||
|
||||
@@ -58,18 +58,23 @@ https://asciinema.org/a/9uwef1ghkjk062cw2t4mhzpyk
|
||||
Request example in python
|
||||
=========================
|
||||
|
||||
This example shows how to make a request to a plugin.
|
||||
This example shows how to make a request to the default plugin:
|
||||
|
||||
.. code:: python
|
||||
|
||||
import requests
|
||||
import json
|
||||
from senpy.client import Client
|
||||
|
||||
r = requests.get('http://127.0.0.1:5000/api/?algo=rand&i=Testing')
|
||||
response = r.content.decode('utf-8')
|
||||
response_json = json.loads(response)
|
||||
c = Client('http://127.0.0.1:5000/api/')
|
||||
r = c.analyse('hello world')
|
||||
|
||||
for entry in r.entries:
|
||||
print('{} -> {}'.format(entry.text, entry.emotions))
|
||||
|
||||
|
||||
|
||||
.. _section: http://senpy.readthedocs.org/en/latest/api.html
|
||||
|
||||
|
||||
Conversion
|
||||
==========
|
||||
See :doc:`conversion`
|
||||
|
@@ -17,7 +17,6 @@
|
||||
"""
|
||||
Sentiment analysis server in Python
|
||||
"""
|
||||
from __future__ import print_function
|
||||
from .version import __version__
|
||||
|
||||
import logging
|
||||
|
@@ -18,7 +18,6 @@ class Client(object):
|
||||
try:
|
||||
resp = models.from_dict(response.json())
|
||||
resp.validate(resp)
|
||||
return resp
|
||||
except Exception as ex:
|
||||
logger.error(('There seems to be a problem with the response:\n'
|
||||
'\tURL: {url}\n'
|
||||
@@ -33,3 +32,6 @@ class Client(object):
|
||||
code=response.status_code,
|
||||
content=response.content))
|
||||
raise ex
|
||||
if isinstance(resp, models.Error):
|
||||
raise resp
|
||||
return resp
|
||||
|
@@ -13,10 +13,11 @@ from .api import API_PARAMS, NIF_PARAMS, parse_params
|
||||
from threading import Thread
|
||||
|
||||
import os
|
||||
import copy
|
||||
import fnmatch
|
||||
import inspect
|
||||
import sys
|
||||
import imp
|
||||
import importlib
|
||||
import logging
|
||||
import traceback
|
||||
import yaml
|
||||
@@ -180,7 +181,7 @@ class Senpy(object):
|
||||
newentries = []
|
||||
for i in resp.entries:
|
||||
if output == "full":
|
||||
newemotions = i.emotions.copy()
|
||||
newemotions = copy.deepcopy(i.emotions)
|
||||
else:
|
||||
newemotions = []
|
||||
for j in i.emotions:
|
||||
@@ -252,7 +253,7 @@ class Senpy(object):
|
||||
logger.error(msg)
|
||||
raise Error(msg)
|
||||
|
||||
if sync:
|
||||
if sync or 'async' in plugin and not plugin.async:
|
||||
act()
|
||||
else:
|
||||
th = Thread(target=act)
|
||||
@@ -276,7 +277,7 @@ class Senpy(object):
|
||||
"Error deactivating plugin {}: {}".format(plugin.name, ex))
|
||||
logger.error("Trace: {}".format(traceback.format_exc()))
|
||||
|
||||
if sync:
|
||||
if sync or 'async' in plugin and not plugin.async:
|
||||
deact()
|
||||
else:
|
||||
th = Thread(target=deact)
|
||||
@@ -284,11 +285,11 @@ class Senpy(object):
|
||||
|
||||
@classmethod
|
||||
def validate_info(cls, info):
|
||||
return all(x in info for x in ('name', 'module', 'version'))
|
||||
return all(x in info for x in ('name', 'module', 'description', 'version'))
|
||||
|
||||
def install_deps(self):
|
||||
for i in self.plugins.values():
|
||||
self._install_deps(i._info)
|
||||
self._install_deps(i)
|
||||
|
||||
@classmethod
|
||||
def _install_deps(cls, info=None):
|
||||
@@ -302,6 +303,13 @@ class Senpy(object):
|
||||
logger.info('Installing requirements: ' + str(requirements))
|
||||
pip.main(pip_args)
|
||||
|
||||
@classmethod
|
||||
def _load_module(cls, name, root):
|
||||
sys.path.append(root)
|
||||
tmp = importlib.import_module(name)
|
||||
sys.path.remove(root)
|
||||
return tmp
|
||||
|
||||
@classmethod
|
||||
def _load_plugin_from_info(cls, info, root):
|
||||
if not cls.validate_info(info):
|
||||
@@ -309,11 +317,10 @@ class Senpy(object):
|
||||
return None, None
|
||||
module = info["module"]
|
||||
name = info["name"]
|
||||
sys.path.append(root)
|
||||
(fp, pathname, desc) = imp.find_module(module, [root, ])
|
||||
|
||||
cls._install_deps(info)
|
||||
tmp = imp.load_module(module, fp, pathname, desc)
|
||||
sys.path.remove(root)
|
||||
tmp = cls._load_module(module, root)
|
||||
|
||||
candidate = None
|
||||
for _, obj in inspect.getmembers(tmp):
|
||||
if inspect.isclass(obj) and inspect.getmodule(obj) == tmp:
|
||||
|
@@ -3,11 +3,12 @@ standard_library.install_aliases()
|
||||
|
||||
import inspect
|
||||
import os.path
|
||||
import os
|
||||
import pickle
|
||||
import logging
|
||||
import tempfile
|
||||
import copy
|
||||
from . import models
|
||||
from .. import models
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -92,8 +93,8 @@ class ShelfMixin(object):
|
||||
@property
|
||||
def shelf_file(self):
|
||||
if 'shelf_file' not in self or not self['shelf_file']:
|
||||
self.shelf_file = os.path.join(tempfile.gettempdir(),
|
||||
self.name + '.p')
|
||||
sd = os.environ.get('SENPY_DATA', tempfile.gettempdir())
|
||||
self.shelf_file = os.path.join(sd, self.name + '.p')
|
||||
return self['shelf_file']
|
||||
|
||||
def save(self):
|
0
senpy/plugins/conversion/__init__.py
Normal file
0
senpy/plugins/conversion/__init__.py
Normal file
52
senpy/plugins/conversion/centroids.py
Normal file
52
senpy/plugins/conversion/centroids.py
Normal file
@@ -0,0 +1,52 @@
|
||||
from senpy.plugins import EmotionConversionPlugin
|
||||
from senpy.models import EmotionSet, Emotion, Error
|
||||
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CentroidConversion(EmotionConversionPlugin):
|
||||
|
||||
def _forward_conversion(self, original):
|
||||
"""Sum the VAD value of all categories found."""
|
||||
res = Emotion()
|
||||
for e in original.onyx__hasEmotion:
|
||||
category = e.onyx__hasEmotionCategory
|
||||
if category in self.centroids:
|
||||
for dim, value in self.centroids[category].items():
|
||||
try:
|
||||
res[dim] += value
|
||||
except Exception:
|
||||
res[dim] = value
|
||||
return res
|
||||
|
||||
def _backwards_conversion(self, original):
|
||||
"""Find the closest category"""
|
||||
dimensions = list(self.centroids.values())[0]
|
||||
|
||||
def distance(e1, e2):
|
||||
return sum((e1[k] - e2.get(self.aliases[k], 0)) for k in dimensions)
|
||||
|
||||
emotion = ''
|
||||
mindistance = 10000000000000000000000.0
|
||||
for state in self.centroids:
|
||||
d = distance(self.centroids[state], original)
|
||||
if d < mindistance:
|
||||
mindistance = d
|
||||
emotion = state
|
||||
result = Emotion(onyx__hasEmotionCategory=emotion)
|
||||
return result
|
||||
|
||||
def convert(self, emotionSet, fromModel, toModel, params):
|
||||
|
||||
cf, ct = self.centroids_direction
|
||||
logger.debug('{}\n{}\n{}\n{}'.format(emotionSet, fromModel, toModel, params))
|
||||
e = EmotionSet()
|
||||
if fromModel == cf:
|
||||
e.onyx__hasEmotion.append(self._forward_conversion(emotionSet))
|
||||
elif fromModel == ct:
|
||||
for i in emotionSet.onyx__hasEmotion:
|
||||
e.onyx__hasEmotion.append(self._backwards_conversion(i))
|
||||
else:
|
||||
raise Error('EMOTION MODEL NOT KNOWN')
|
||||
yield e
|
@@ -1,56 +0,0 @@
|
||||
from senpy.plugins import EmotionConversionPlugin
|
||||
from senpy.models import EmotionSet, Emotion, Error
|
||||
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
import math
|
||||
|
||||
|
||||
class WNA2VAD(EmotionConversionPlugin):
|
||||
|
||||
def _ekman_to_vad(self, ekmanSet):
|
||||
potency = 0
|
||||
arousal = 0
|
||||
dominance = 0
|
||||
for e in ekmanSet.onyx__hasEmotion:
|
||||
category = e.onyx__hasEmotionCategory
|
||||
centroid = self.centroids[category]
|
||||
potency += centroid['V']
|
||||
arousal += centroid['A']
|
||||
dominance += centroid['D']
|
||||
e = Emotion({'emoml:potency': potency,
|
||||
'emoml:arousal': arousal,
|
||||
'emoml:dominance': dominance})
|
||||
return e
|
||||
|
||||
def _vad_to_ekman(self, VADEmotion):
|
||||
V = VADEmotion['emoml:valence']
|
||||
A = VADEmotion['emoml:potency']
|
||||
D = VADEmotion['emoml:dominance']
|
||||
emotion = ''
|
||||
value = 10000000000000000000000.0
|
||||
for state in self.centroids:
|
||||
valence = V - self.centroids[state]['V']
|
||||
arousal = A - self.centroids[state]['A']
|
||||
dominance = D - self.centroids[state]['D']
|
||||
new_value = math.sqrt((valence**2) +
|
||||
(arousal**2) +
|
||||
(dominance**2))
|
||||
if new_value < value:
|
||||
value = new_value
|
||||
emotion = state
|
||||
result = Emotion(onyx__hasEmotionCategory=emotion)
|
||||
return result
|
||||
|
||||
def convert(self, emotionSet, fromModel, toModel, params):
|
||||
logger.debug('{}\n{}\n{}\n{}'.format(emotionSet, fromModel, toModel, params))
|
||||
e = EmotionSet()
|
||||
if fromModel == 'emoml:big6':
|
||||
e.onyx__hasEmotion.append(self._ekman_to_vad(emotionSet))
|
||||
elif fromModel == 'emoml:fsre-dimensions':
|
||||
for i in emotionSet.onyx__hasEmotion:
|
||||
e.onyx__hasEmotion.append(self._vad_to_ekman(e))
|
||||
else:
|
||||
raise Error('EMOTION MODEL NOT KNOWN')
|
||||
yield e
|
@@ -1,13 +1,13 @@
|
||||
---
|
||||
name: Ekman2VAD
|
||||
module: ekman2vad
|
||||
description: Plugin to convert from Ekman to VAD
|
||||
module: senpy.plugins.conversion.centroids
|
||||
description: Plugin to convert emotion sets from Ekman to VAD
|
||||
version: 0.1
|
||||
onyx:doesConversion:
|
||||
- onyx:conversionFrom: emoml:big6
|
||||
onyx:conversionTo: emoml:fsre-dimensions
|
||||
- onyx:conversionFrom: emoml:fsre-dimensions
|
||||
onyx:conversionTo: wna:WNAModel
|
||||
onyx:conversionTo: emoml:big6
|
||||
centroids:
|
||||
emoml:big6anger:
|
||||
A: 6.95
|
||||
@@ -29,7 +29,10 @@ centroids:
|
||||
A: 5.21
|
||||
D: 2.82
|
||||
V: 2.21
|
||||
centroids_direction:
|
||||
- emoml:big6
|
||||
- emoml:fsre-dimensions
|
||||
aliases:
|
||||
A: emoml:arousal
|
||||
V: emoml:potency
|
||||
V: emoml:valence
|
||||
D: emoml:dominance
|
@@ -9,4 +9,6 @@ test=pytest
|
||||
ignore = E402
|
||||
max-line-length = 100
|
||||
[bdist_wheel]
|
||||
universal=1
|
||||
universal=1
|
||||
[tool:pytest]
|
||||
addopts = --cov=senpy --cov-report term-missing
|
@@ -1,3 +1,3 @@
|
||||
pytest
|
||||
mock
|
||||
pytest-cov
|
||||
pytest
|
||||
|
68
tests/test_api.py
Normal file
68
tests/test_api.py
Normal file
@@ -0,0 +1,68 @@
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
from unittest import TestCase
|
||||
from senpy.api import parse_params, API_PARAMS, NIF_PARAMS, WEB_PARAMS
|
||||
from senpy.models import Error
|
||||
|
||||
|
||||
class APITest(TestCase):
|
||||
|
||||
def test_api_params(self):
|
||||
"""The API should not define any required parameters without a default"""
|
||||
parse_params({}, spec=API_PARAMS)
|
||||
|
||||
def test_web_params(self):
|
||||
"""The WEB should not define any required parameters without a default"""
|
||||
parse_params({}, spec=WEB_PARAMS)
|
||||
|
||||
def test_basic(self):
|
||||
a = {}
|
||||
try:
|
||||
parse_params(a, spec=NIF_PARAMS)
|
||||
raise AssertionError()
|
||||
except Error:
|
||||
pass
|
||||
a = {'input': 'hello'}
|
||||
p = parse_params(a, spec=NIF_PARAMS)
|
||||
assert 'input' in p
|
||||
b = {'i': 'hello'}
|
||||
p = parse_params(b, spec=NIF_PARAMS)
|
||||
assert 'input' in p
|
||||
|
||||
def test_plugin(self):
|
||||
query = {}
|
||||
plug_params = {
|
||||
'hello': {
|
||||
'aliases': ['hello', 'hiya'],
|
||||
'required': True
|
||||
}
|
||||
}
|
||||
try:
|
||||
parse_params(query, spec=plug_params)
|
||||
raise AssertionError()
|
||||
except Error:
|
||||
pass
|
||||
query['hello'] = 'world'
|
||||
p = parse_params(query, spec=plug_params)
|
||||
assert 'hello' in p
|
||||
assert p['hello'] == 'world'
|
||||
del query['hello']
|
||||
|
||||
query['hiya'] = 'dlrow'
|
||||
p = parse_params(query, spec=plug_params)
|
||||
assert 'hello' in p
|
||||
assert 'hiya' in p
|
||||
assert p['hello'] == 'dlrow'
|
||||
|
||||
def test_default(self):
|
||||
spec = {
|
||||
'hello': {
|
||||
'required': True,
|
||||
'default': 1
|
||||
}
|
||||
}
|
||||
p = parse_params({}, spec=spec)
|
||||
assert 'hello' in p
|
||||
assert p['hello'] == 1
|
@@ -34,8 +34,11 @@ class ModelsTest(TestCase):
|
||||
url=endpoint + '/', method='GET', params={'input': 'hello'})
|
||||
error = Call(Error('Nothing'))
|
||||
with patch('requests.request', return_value=error) as patched:
|
||||
resp = client.analyse(input='hello', algorithm='NONEXISTENT')
|
||||
assert isinstance(resp, Error)
|
||||
try:
|
||||
client.analyse(input='hello', algorithm='NONEXISTENT')
|
||||
raise Exception('Exceptions should be raised. This is not golang')
|
||||
except Error:
|
||||
pass
|
||||
patched.assert_called_with(
|
||||
url=endpoint + '/',
|
||||
method='GET',
|
||||
|
@@ -1,5 +1,6 @@
|
||||
from __future__ import print_function
|
||||
import os
|
||||
from copy import deepcopy
|
||||
import logging
|
||||
|
||||
try:
|
||||
@@ -9,7 +10,7 @@ except ImportError:
|
||||
|
||||
from functools import partial
|
||||
from senpy.extensions import Senpy
|
||||
from senpy.models import Error
|
||||
from senpy.models import Error, Results, Entry, EmotionSet, Emotion
|
||||
from flask import Flask
|
||||
from unittest import TestCase
|
||||
|
||||
@@ -42,6 +43,7 @@ class ExtensionsTest(TestCase):
|
||||
info = {
|
||||
'name': 'TestPip',
|
||||
'module': 'dummy',
|
||||
'description': None,
|
||||
'requirements': ['noop'],
|
||||
'version': 0
|
||||
}
|
||||
@@ -51,6 +53,7 @@ class ExtensionsTest(TestCase):
|
||||
assert module
|
||||
import noop
|
||||
dir(noop)
|
||||
self.senpy.install_deps()
|
||||
|
||||
def test_installing(self):
|
||||
""" Enabling a plugin """
|
||||
@@ -119,3 +122,42 @@ class ExtensionsTest(TestCase):
|
||||
def test_load_default_plugins(self):
|
||||
senpy = Senpy(plugin_folder=self.dir, default_plugins=True)
|
||||
assert len(senpy.plugins) > 1
|
||||
|
||||
def test_convert_emotions(self):
|
||||
self.senpy.activate_all()
|
||||
plugin = {
|
||||
'id': 'imaginary',
|
||||
'onyx:usesEmotionModel': 'emoml:fsre-dimensions'
|
||||
}
|
||||
eSet1 = EmotionSet()
|
||||
eSet1['onyx:hasEmotion'].append(Emotion({
|
||||
'emoml:arousal': 1,
|
||||
'emoml:potency': 0,
|
||||
'emoml:valence': 0
|
||||
}))
|
||||
response = Results({
|
||||
'entries': [Entry({
|
||||
'text': 'much ado about nothing',
|
||||
'emotions': [eSet1]
|
||||
})]
|
||||
})
|
||||
params = {'emotionModel': 'emoml:big6',
|
||||
'conversion': 'full'}
|
||||
r1 = deepcopy(response)
|
||||
self.senpy.convert_emotions(r1,
|
||||
plugin,
|
||||
params)
|
||||
assert len(r1.entries[0].emotions) == 2
|
||||
params['conversion'] = 'nested'
|
||||
r2 = deepcopy(response)
|
||||
self.senpy.convert_emotions(r2,
|
||||
plugin,
|
||||
params)
|
||||
assert len(r2.entries[0].emotions) == 1
|
||||
assert r2.entries[0].emotions[0]['prov:wasDerivedFrom'] == eSet1
|
||||
params['conversion'] = 'filtered'
|
||||
r3 = deepcopy(response)
|
||||
self.senpy.convert_emotions(r3,
|
||||
plugin,
|
||||
params)
|
||||
assert len(r3.entries[0].emotions) == 1
|
||||
|
@@ -143,7 +143,3 @@ class ModelsTest(TestCase):
|
||||
print(t)
|
||||
g = rdflib.Graph().parse(data=t, format='turtle')
|
||||
assert len(g) == len(triples)
|
||||
|
||||
def test_convert_emotions(self):
|
||||
"""It should be possible to convert between different emotion models"""
|
||||
pass
|
||||
|
@@ -1,2 +0,0 @@
|
||||
#!/bin/sh
|
||||
VERSION=$(git describe --long --tags --dirty)
|
Reference in New Issue
Block a user