Compare commits
416 Commits
0.8.x-asyn
...
master
Author | SHA1 | Date | |
---|---|---|---|
|
297e9e8106 | ||
|
1eb8e432af | ||
|
8236569818 | ||
|
98d368dd9a | ||
|
9747140b54 | ||
|
e915766449 | ||
|
b33a70620b | ||
|
e324c730e2 | ||
|
a5c135faac | ||
|
894942b3ab | ||
|
9bb980f6b4 | ||
|
66371c1cd8 | ||
|
f3d4415ffb | ||
|
3f227986f3 | ||
|
f2f28644a1 | ||
|
82a456705c | ||
|
3c35b4ac91 | ||
|
268d2a4848 | ||
|
5330ae93fc | ||
|
4f95fbcbd1 | ||
|
5b28b6d1b4 | ||
|
e1d888ebd6 | ||
|
7f712952be | ||
|
07348de59a | ||
|
39123cea8a | ||
|
8a6ef3852a | ||
|
99c8782a92 | ||
|
efa93f5456 | ||
|
55e5ce3a66 | ||
|
92b654b36c | ||
|
7febb4d673 | ||
|
7ca5057705 | ||
|
8489457370 | ||
|
e9266af924 | ||
|
a7849bb029 | ||
|
bd083a0e55 | ||
|
a390f51097 | ||
|
adbcd7d196 | ||
|
4b1eecd1c2 | ||
|
c1e4e092a7 | ||
|
a0abbede49 | ||
|
c5a2cf23cb | ||
|
49a183aeb6 | ||
|
3088d9474a | ||
|
4c73797246 | ||
|
0f5bc514b7 | ||
|
228eb6321b | ||
|
d575220712 | ||
|
7ae493b3f3 | ||
|
435d107677 | ||
|
bf2feb9839 | ||
|
5c98326acf | ||
|
d961d8ac5b | ||
|
c4321dc500 | ||
|
3bd3c87af4 | ||
|
45421f4613 | ||
|
7aa69e3d02 | ||
|
a20252e4bd | ||
|
96ec10d791 | ||
|
6858a139ed | ||
|
4f286057c9 | ||
|
9758a2977f | ||
|
8a516d927e | ||
|
fa993c6e2a | ||
|
238f76442c | ||
|
a015ee81f7 | ||
|
d665017154 | ||
|
00832e2e1c | ||
|
4ecabadae9 | ||
|
bb6f9ee367 | ||
|
80acb9307c | ||
|
94394af20b | ||
|
d5f9ef88b2 | ||
|
675a905ab4 | ||
|
4ba30304a4 | ||
|
41aa142ce0 | ||
|
4507449266 | ||
|
b48730137d | ||
|
f1ec057b16 | ||
|
f6ca82cac8 | ||
|
318acd5a71 | ||
|
2e91a83eb6 | ||
|
c8f6f5613d | ||
|
748d1a00bd | ||
|
a82e4ed440 | ||
|
c939b095de | ||
|
6dd4a44924 | ||
|
d8c47220b1 | ||
|
0e4146ed8d | ||
|
6d3fc6f861 | ||
|
666632a032 | ||
|
9dbe22b81f | ||
|
4291c5eabf | ||
|
7c7a815d1a | ||
|
9355d27e71 | ||
|
a3eb8f196c | ||
|
00ffbb3804 | ||
|
13cf0c71c5 | ||
|
0ed434ef0c | ||
|
dbc238989b | ||
|
48ba936a7b | ||
|
e5662d482e | ||
|
bbe91e1924 | ||
|
c7091e6323 | ||
|
61181db199 | ||
|
a1663a3f31 | ||
|
83b23dbdf4 | ||
|
4675d9acf1 | ||
|
6832a2816d | ||
|
f11439d944 | ||
|
7a8abf1823 | ||
|
a21ce0d90e | ||
|
a964e586d7 | ||
|
b15a0d7dbe | ||
|
bce42b5bb4 | ||
|
f92617d147 | ||
|
2a773d45aa | ||
|
e4e1a74971 | ||
|
1659285f0b | ||
|
57016e1380 | ||
|
54da48b548 | ||
|
62142482dc | ||
|
982baa04cf | ||
|
c52a894017 | ||
|
1313853788 | ||
|
e51b659030 | ||
|
2a4cc96905 | ||
|
7c959aace8 | ||
|
15ac26428a | ||
|
402b49f43f | ||
|
98ec4817cf | ||
|
08c1b4ce79 | ||
|
50a0599597 | ||
|
23c6cdd58d | ||
|
7825802341 | ||
|
4a0b6c1bf4 | ||
|
cd73cd3fc6 | ||
|
704aba2ff0 | ||
|
bf67422f2f | ||
|
1eec6ecbad | ||
|
bec22e44a0 | ||
|
697e779767 | ||
|
48f5ffafa1 | ||
|
f3961378e0 | ||
|
fbde8a9462 | ||
|
73f7cbbe8a | ||
|
07a41236f8 | ||
|
55db97cf62 | ||
|
d8dead1908 | ||
|
87dcdb9fbc | ||
|
67ef4b60bd | ||
|
da4b11e5b5 | ||
|
c0aa7ddc3c | ||
|
5e2ada1654 | ||
|
7a188586c5 | ||
|
b768b215c5 | ||
|
d1f1b9a15a | ||
|
52a0f3f4c8 | ||
|
55c32dcd7c | ||
|
582ae8a340 | ||
|
0093bc34d5 | ||
|
67bae9a20d | ||
|
551a5cb176 | ||
|
d6f4cc2dd2 | ||
|
4af692091a | ||
|
ec68ff0b90 | ||
|
738da490db | ||
|
d29c42fd2e | ||
|
23c88d0acc | ||
|
dcaaa591b7 | ||
|
15ab5f4c25 | ||
|
92189822d8 | ||
|
fbb418c365 | ||
|
081078ddd6 | ||
|
7c8dbf3262 | ||
|
41dc89b23b | ||
|
a951696317 | ||
|
1087692de2 | ||
|
3e2b8baeb2 | ||
|
21a5a3f201 | ||
|
abd401f863 | ||
|
bfc588a915 | ||
|
f93eed2cf5 | ||
|
0204e0b8e9 | ||
|
701f46b9f1 | ||
|
d1eca04eeb | ||
|
89f3a0eca9 | ||
|
df7efbc57d | ||
|
aa54d1c9c8 | ||
|
869c00f709 | ||
|
e329e84eef | ||
|
55be0e57da | ||
|
778746c5e8 | ||
|
19278d0acd | ||
|
a75ba6994d | ||
|
919c4a07a2 | ||
|
42224e343c | ||
|
f0c211c00a | ||
|
694201d8d3 | ||
|
e8413fb645 | ||
|
24d85b18bb | ||
|
d150321741 | ||
|
4f88009bd7 | ||
|
390225df45 | ||
|
1f0703d535 | ||
|
b03e03fd0a | ||
|
b20982cae1 | ||
|
79e107bdcd | ||
|
c6e79fa50d | ||
|
f6bf7459a8 | ||
|
c23f7986b4 | ||
|
300f4c374a | ||
|
97cd443c16 | ||
|
8fe7616bae | ||
|
49afd2cfdd | ||
|
1543f5550e | ||
|
c1174189c6 | ||
|
ea536c0daf | ||
|
a67e0e45d2 | ||
|
f04cbeeddb | ||
|
5f4dc3ac5d | ||
|
1a3b8ee703 | ||
|
9c61c18220 | ||
|
21ff551769 | ||
|
3dc27f12f7 | ||
|
9957486f4f | ||
|
0dc93fc16b | ||
|
cbef9630b4 | ||
|
a3a9414073 | ||
|
14bcfd511f | ||
|
3d23370a59 | ||
|
f9a75f4e21 | ||
|
99d4bc70bc | ||
|
fc94b45448 | ||
|
53db670715 | ||
|
963211caf2 | ||
|
2ca9d36f80 | ||
|
b1dbe432c1 | ||
|
921d7f23ce | ||
|
8bfb88c926 | ||
|
8ad2fae774 | ||
|
eb09da878f | ||
|
407fd718a3 | ||
|
3bba29fc4e | ||
|
4344fccd57 | ||
|
0ccdf735e1 | ||
|
7444aa7ec8 | ||
|
5d68c0225a | ||
|
cda9f5c4ca | ||
|
a825e91425 | ||
|
473efd8dd7 | ||
|
6f489acdfc | ||
|
a73f3112ab | ||
|
5a53ba23e2 | ||
|
e3a9a3464c | ||
|
3e3f5555ff | ||
|
7aa91d1d60 | ||
|
2480ec310e | ||
|
9b956b2358 | ||
|
83ddd5e990 | ||
|
d77d01c3e1 | ||
|
f6271495c1 | ||
|
1cccd9d5cb | ||
|
a243f68bfc | ||
|
fca0ac00c4 | ||
|
9a2932b569 | ||
|
9acee50837 | ||
|
cd22291dc9 | ||
|
85aef3d15f | ||
|
b6f00385ab | ||
|
8c5f894843 | ||
|
9870391088 | ||
|
ad2051307a | ||
|
7fd16a17fb | ||
|
7547fc49af | ||
|
b671ff51f9 | ||
|
7f44f9e85d | ||
|
aaad5e8f2b | ||
|
9bea267f52 | ||
|
4d7e8e7589 | ||
|
8e4578dc25 | ||
|
24c97256e8 | ||
|
312e7f7f12 | ||
|
c555b9547e | ||
|
991ade8f4d | ||
|
1104e816cb | ||
|
c19d03b41d | ||
|
42c9068991 | ||
|
96843827bd | ||
|
d76e4618fe | ||
|
c9bc485535 | ||
|
6d7575bbcd | ||
|
852bcc72ba | ||
|
bf5ed1bd7d | ||
|
00da75153a | ||
|
fa082e11e7 | ||
|
6331d31b18 | ||
|
8ee324f566 | ||
|
188c33332a | ||
|
955e17eb2a | ||
|
3e0f55dcff | ||
|
2ea01aef42 | ||
|
147fd4a333 | ||
|
e31bca7016 | ||
|
7956d54c35 | ||
|
5bab9a6a02 | ||
|
69ac95bb08 | ||
|
6b843a4384 | ||
|
65d6e47513 | ||
|
8d56a0b630 | ||
|
e7ac6e66b0 | ||
|
0f8d1dff69 | ||
|
236183593c | ||
|
7637498517 | ||
|
8c70433312 | ||
|
ce83fb3981 | ||
|
28f29d159a | ||
|
c803f60fd4 | ||
|
12eae16e37 | ||
|
f3372c27b6 | ||
|
b6de72a143 | ||
|
0f89b92457 | ||
|
ea91e3e4a4 | ||
|
f76b777b9f | ||
|
dee007eacf | ||
|
1ef9dac86a | ||
|
18486aa3e0 | ||
|
dcc965ea63 | ||
|
400f647b7b | ||
|
ec1a2ff5f9 | ||
|
86fdd8678a | ||
|
e112dd55ce | ||
|
60ef304108 | ||
|
1a9dd07f7e | ||
|
1b8a24c530 | ||
|
7f765d004f | ||
|
b22ac843b6 | ||
|
e88ca98438 | ||
|
65bb517fd2 | ||
|
23d15e9274 | ||
|
23a5595d18 | ||
|
e341cc82fa | ||
|
b80b0c7947 | ||
|
1ca6ec52fd | ||
|
7927cf1587 | ||
|
13cefbedfb | ||
|
4ba9535d56 | ||
|
e582ef07d4 | ||
|
ef40bdb545 | ||
|
e0b4c76238 | ||
|
14c86ec38c | ||
|
d3d05b3218 | ||
|
eababcadb0 | ||
|
7efece0224 | ||
|
53138e6942 | ||
|
1302b0b93c | ||
|
ad1092690b | ||
|
e35e810ede | ||
|
d5ddcb8d3f | ||
|
54c0c9c437 | ||
|
6e970d01f2 | ||
|
1d0a54ecd2 | ||
|
800d4a9c2c | ||
|
035ef98b7e | ||
|
d7e115d7c2 | ||
|
548cb4c9ba | ||
|
7e5b55ff9c | ||
|
8b2c3e8d40 | ||
|
0c8f98d466 | ||
|
cc298742ec | ||
|
250052fb99 | ||
|
603e086606 | ||
|
a8614bab0c | ||
|
85db4db01d | ||
|
9f7a0e6907 | ||
|
70ca74b03c | ||
|
c9e6d78183 | ||
|
1a582c0843 | ||
|
241f478a68 | ||
|
5427b02a1a | ||
|
df2dc17ac0 | ||
|
0394bcd69c | ||
|
cbeb3adbdb | ||
|
efb305173e | ||
|
2288b04c92 | ||
|
7899cb4d33 | ||
|
62ddca79ac | ||
|
cc112c5ac5 | ||
|
65b8873092 | ||
|
9ea177a780 | ||
|
bb0b0fadc2 | ||
|
076813cb1a | ||
|
51b4737c43 | ||
|
140ea9c159 | ||
|
7250f56f17 | ||
|
b40ac19130 | ||
|
37b130abaa | ||
|
99e13f32e5 | ||
|
71d976acb2 | ||
|
c57cfff0cc | ||
|
f9c4e4bd59 | ||
|
7b583f504c | ||
|
6f3c08f8aa | ||
|
67ff20440a | ||
|
82e3062a6b | ||
|
864ca75b8f | ||
|
ac5ac2d06b | ||
|
1f5188c251 | ||
|
90b55a4b27 | ||
|
aa628518ec | ||
|
5e8bc717a8 | ||
|
0e9db7081c | ||
|
17976d85b1 | ||
|
94d82238b8 | ||
|
ed22679e7c | ||
|
6561201cc2 |
3
.gitignore
vendored
@ -7,4 +7,5 @@ README.html
|
||||
__pycache__
|
||||
VERSION
|
||||
Dockerfile-*
|
||||
Dockerfile
|
||||
Dockerfile
|
||||
senpy_data
|
172
.gitlab-ci.yml
@ -1,65 +1,133 @@
|
||||
image: gsiupm/dockermake:latest
|
||||
|
||||
|
||||
# Uncomment if you want to use docker-in-docker
|
||||
# image: gsiupm/dockermake:latest
|
||||
# services:
|
||||
# - docker:dind
|
||||
# When using dind, it's wise to use the overlayfs driver for
|
||||
# improved performance.
|
||||
variables:
|
||||
DOCKER_DRIVER: overlay
|
||||
DOCKERFILE: Dockerfile
|
||||
IMAGENAME: $CI_REGISTRY_IMAGE
|
||||
|
||||
stages:
|
||||
- test
|
||||
- push
|
||||
- clean
|
||||
- publish
|
||||
- test_image
|
||||
- deploy
|
||||
|
||||
.test: &test_definition
|
||||
stage: test
|
||||
script:
|
||||
- make -e test-$PYTHON_VERSION
|
||||
|
||||
test-3.5:
|
||||
<<: *test_definition
|
||||
variables:
|
||||
KUBENS: senpy
|
||||
LATEST_IMAGE: "${HUB_REPO}:${CI_COMMIT_SHORT_SHA}"
|
||||
SENPY_DATA: "/senpy-data/" # This is configured in the CI job
|
||||
NLTK_DATA: "/senpy-data/nltk_data" # Store NLTK downloaded data
|
||||
|
||||
docker:
|
||||
stage: publish
|
||||
image:
|
||||
name: gcr.io/kaniko-project/executor:debug
|
||||
entrypoint: [""]
|
||||
variables:
|
||||
PYTHON_VERSION: "3.5"
|
||||
|
||||
test-2.7:
|
||||
<<: *test_definition
|
||||
variables:
|
||||
PYTHON_VERSION: "2.7"
|
||||
|
||||
|
||||
.image: &image_definition
|
||||
stage: push
|
||||
before_script:
|
||||
- docker login -u gitlab-ci-token -p $CI_BUILD_TOKEN $CI_REGISTRY
|
||||
PYTHON_VERSION: "3.10"
|
||||
tags:
|
||||
- docker
|
||||
script:
|
||||
- make -e push-$PYTHON_VERSION
|
||||
- echo $CI_COMMIT_TAG > senpy/VERSION
|
||||
- sed "s/{{PYVERSION}}/$PYTHON_VERSION/" Dockerfile.template > Dockerfile
|
||||
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"},\"https://index.docker.io/v1/\":{\"auth\":\"$HUB_AUTH\"}}}" > /kaniko/.docker/config.json
|
||||
# The skip-tls-verify flag is there because our registry certificate is self signed
|
||||
- /kaniko/executor --context $CI_PROJECT_DIR --skip-tls-verify --dockerfile $CI_PROJECT_DIR/Dockerfile --destination $CI_REGISTRY_IMAGE:$CI_COMMIT_TAG --destination $HUB_REPO:$CI_COMMIT_TAG
|
||||
only:
|
||||
- tags
|
||||
- triggers
|
||||
|
||||
push-3.5:
|
||||
<<: *image_definition
|
||||
docker-latest:
|
||||
stage: publish
|
||||
image:
|
||||
name: gcr.io/kaniko-project/executor:debug
|
||||
entrypoint: [""]
|
||||
variables:
|
||||
PYTHON_VERSION: "3.5"
|
||||
|
||||
push-2.7:
|
||||
<<: *image_definition
|
||||
variables:
|
||||
PYTHON_VERSION: "2.7"
|
||||
|
||||
push-latest:
|
||||
<<: *image_definition
|
||||
variables:
|
||||
PYTHON_VERSION: latest
|
||||
only:
|
||||
- master
|
||||
- triggers
|
||||
|
||||
clean :
|
||||
stage: clean
|
||||
PYTHON_VERSION: "3.10"
|
||||
tags:
|
||||
- docker
|
||||
script:
|
||||
- make -e clean
|
||||
- echo git.${CI_COMMIT_SHORT_SHA} > senpy/VERSION
|
||||
- sed "s/{{PYVERSION}}/$PYTHON_VERSION/" Dockerfile.template > Dockerfile
|
||||
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"},\"https://index.docker.io/v1/\":{\"auth\":\"$HUB_AUTH\"}}}" > /kaniko/.docker/config.json
|
||||
# The skip-tls-verify flag is there because our registry certificate is self signed
|
||||
- /kaniko/executor --context $CI_PROJECT_DIR --skip-tls-verify --dockerfile $CI_PROJECT_DIR/Dockerfile --destination $LATEST_IMAGE --destination "${HUB_REPO}:latest"
|
||||
only:
|
||||
- master
|
||||
refs:
|
||||
- master
|
||||
|
||||
testimage:
|
||||
only:
|
||||
- tags
|
||||
tags:
|
||||
- docker
|
||||
stage: test_image
|
||||
image: "$CI_REGISTRY_IMAGE:$CI_COMMIT_TAG"
|
||||
script:
|
||||
- python -m senpy --no-run --test
|
||||
|
||||
testpy37:
|
||||
tags:
|
||||
- docker
|
||||
variables:
|
||||
SENPY_STRICT: "false"
|
||||
image: python:3.7
|
||||
stage: test
|
||||
script:
|
||||
- pip install -r requirements.txt -r test-requirements.txt
|
||||
- python setup.py test
|
||||
|
||||
testpy310:
|
||||
tags:
|
||||
- docker
|
||||
variables:
|
||||
SENPY_STRICT: "true"
|
||||
image: python:3.10
|
||||
stage: test
|
||||
script:
|
||||
- pip install -r requirements.txt -r test-requirements.txt -r extra-requirements.txt
|
||||
- python setup.py test
|
||||
|
||||
push_pypi:
|
||||
only:
|
||||
- tags
|
||||
tags:
|
||||
- docker
|
||||
image: python:3.10
|
||||
stage: publish
|
||||
script:
|
||||
- echo $CI_COMMIT_TAG > senpy/VERSION
|
||||
- pip install twine
|
||||
- python setup.py sdist bdist_wheel
|
||||
- TWINE_PASSWORD=$PYPI_PASSWORD TWINE_USERNAME=$PYPI_USERNAME python -m twine upload dist/*
|
||||
|
||||
check_pypi:
|
||||
only:
|
||||
- tags
|
||||
tags:
|
||||
- docker
|
||||
image: python:3.10
|
||||
stage: deploy
|
||||
script:
|
||||
- pip install senpy==$CI_COMMIT_TAG
|
||||
# Allow PYPI to update its index before we try to install
|
||||
when: delayed
|
||||
start_in: 10 minutes
|
||||
|
||||
latest-demo:
|
||||
only:
|
||||
refs:
|
||||
- master
|
||||
tags:
|
||||
- docker
|
||||
image: alpine/k8s:1.22.6
|
||||
stage: deploy
|
||||
environment: production
|
||||
variables:
|
||||
KUBECONFIG: "/kubeconfig"
|
||||
# Same image as docker-latest
|
||||
IMAGEWTAG: "${LATEST_IMAGE}"
|
||||
KUBEAPP: "senpy"
|
||||
script:
|
||||
- echo "${KUBECONFIG_RAW}" > $KUBECONFIG
|
||||
- kubectl --kubeconfig $KUBECONFIG version
|
||||
- cd k8s/
|
||||
- cat *.yaml *.tmpl 2>/dev/null | envsubst | kubectl --kubeconfig $KUBECONFIG apply --namespace ${KUBENS:-default} -f -
|
||||
- kubectl --kubeconfig $KUBECONFIG get all,ing -l app=${KUBEAPP} --namespace=${KUBENS:-default}
|
||||
|
27
.makefiles/README.md
Normal file
@ -0,0 +1,27 @@
|
||||
These makefiles are recipes for several common tasks in different types of projects.
|
||||
To add them to your project, simply do:
|
||||
|
||||
```
|
||||
git remote add makefiles ssh://git@lab.gsi.upm.es:2200/docs/templates/makefiles.git
|
||||
git subtree add --prefix=.makefiles/ makefiles master
|
||||
touch Makefile
|
||||
echo "include .makefiles/base.mk" >> Makefile
|
||||
```
|
||||
|
||||
Now you can take advantage of the recipes.
|
||||
For instance, to add useful targets for a python project, just add this to your Makefile:
|
||||
|
||||
```
|
||||
include .makefiles/python.mk
|
||||
```
|
||||
|
||||
You may need to set special variables like the name of your project or the python versions you're targetting.
|
||||
Take a look at each specific `.mk` file for more information, and the `Makefile` in the [senpy](https://lab.gsi.upm.es/senpy/senpy) project for a real use case.
|
||||
|
||||
If you update the makefiles from your repository, make sure to push the changes for review in upstream (this repository):
|
||||
|
||||
```
|
||||
make makefiles-push
|
||||
```
|
||||
|
||||
It will automatically commit all unstaged changes in the .makefiles folder.
|
36
.makefiles/base.mk
Normal file
@ -0,0 +1,36 @@
|
||||
export
|
||||
NAME ?= $(shell basename $(CURDIR))
|
||||
VERSION ?= $(shell git describe --tags --dirty 2>/dev/null)
|
||||
|
||||
ifeq ($(VERSION),)
|
||||
VERSION:=unknown
|
||||
endif
|
||||
|
||||
# Get the location of this makefile.
|
||||
MK_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
|
||||
|
||||
-include .env
|
||||
-include ../.env
|
||||
|
||||
help: ## Show this help.
|
||||
@fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/\(.*:\)[^#]*##\s*\(.*\)/\1\t\2/' | column -t -s " "
|
||||
|
||||
config: ## Load config from the environment. You should run it once in every session before other tasks. Run: eval $(make config)
|
||||
@awk '{ print "export " $$0}' ../.env
|
||||
@awk '{ print "export " $$0}' .env
|
||||
@echo "# Please, run: "
|
||||
@echo "# eval \$$(make config)"
|
||||
# If you need to run a command on the key/value pairs, use this:
|
||||
# @awk '{ split($$0, a, "="); "echo " a[2] " | base64 -w 0" |& getline b64; print "export " a[1] "=" a[2]; print "export " a[1] "_BASE64=" b64}' .env
|
||||
|
||||
ci: ## Run a task using gitlab-runner. Only use to debug problems in the CI pipeline
|
||||
gitlab-runner exec shell --builds-dir '.builds' --env CI_PROJECT_NAME=$(NAME) ${action}
|
||||
|
||||
include $(MK_DIR)/makefiles.mk
|
||||
include $(MK_DIR)/docker.mk
|
||||
include $(MK_DIR)/git.mk
|
||||
|
||||
info:: ## List all variables
|
||||
env
|
||||
|
||||
.PHONY:: config help ci
|
51
.makefiles/docker.mk
Normal file
@ -0,0 +1,51 @@
|
||||
ifndef IMAGENAME
|
||||
ifdef CI_REGISTRY_IMAGE
|
||||
IMAGENAME=$(CI_REGISTRY_IMAGE)
|
||||
else
|
||||
IMAGENAME=$(NAME)
|
||||
endif
|
||||
endif
|
||||
|
||||
IMAGEWTAG?=$(IMAGENAME):$(VERSION)
|
||||
DOCKER_FLAGS?=$(-ti)
|
||||
DOCKER_CMD?=
|
||||
|
||||
docker-login: ## Log in to the registry. It will only be used in the server, or when running a CI task locally (if CI_BUILD_TOKEN is set).
|
||||
ifeq ($(CI_BUILD_TOKEN),)
|
||||
@echo "Not logging in to the docker registry" "$(CI_REGISTRY)"
|
||||
else
|
||||
@docker login -u gitlab-ci-token -p $(CI_BUILD_TOKEN) $(CI_REGISTRY)
|
||||
endif
|
||||
ifeq ($(HUB_USER),)
|
||||
@echo "Not logging in to global the docker registry"
|
||||
else
|
||||
@docker login -u $(HUB_USER) -p $(HUB_PASSWORD)
|
||||
endif
|
||||
|
||||
docker-clean: ## Remove docker credentials
|
||||
ifeq ($(HUB_USER),)
|
||||
else
|
||||
@docker logout
|
||||
endif
|
||||
|
||||
docker-run: ## Build a generic docker image
|
||||
docker run $(DOCKER_FLAGS) $(IMAGEWTAG) $(DOCKER_CMD)
|
||||
|
||||
docker-build: ## Build a generic docker image
|
||||
docker build . -t $(IMAGEWTAG)
|
||||
|
||||
docker-push: docker-login ## Push a generic docker image
|
||||
docker push $(IMAGEWTAG)
|
||||
|
||||
docker-latest-push: docker-login ## Push the latest image
|
||||
docker tag $(IMAGEWTAG) $(IMAGENAME)
|
||||
docker push $(IMAGENAME)
|
||||
|
||||
login:: docker-login
|
||||
|
||||
clean:: docker-clean
|
||||
|
||||
docker-info:
|
||||
@echo IMAGEWTAG=${IMAGEWTAG}
|
||||
|
||||
.PHONY:: docker-login docker-clean login clean
|
25
.makefiles/git.mk
Normal file
@ -0,0 +1,25 @@
|
||||
commit:
|
||||
git commit -a
|
||||
|
||||
tag:
|
||||
git tag ${VERSION}
|
||||
|
||||
git-push::
|
||||
git push --tags -u origin HEAD
|
||||
|
||||
git-pull:
|
||||
git pull --all
|
||||
|
||||
push-github: ## Push the code to github. You need to set up GITHUB_DEPLOY_KEY
|
||||
ifeq ($(GITHUB_DEPLOY_KEY),)
|
||||
else
|
||||
$(eval KEY_FILE := "$(shell mktemp)")
|
||||
@printf '%b' '$(GITHUB_DEPLOY_KEY)' > $(KEY_FILE)
|
||||
@git remote rm github-deploy || true
|
||||
git remote add github-deploy $(GITHUB_REPO)
|
||||
-@GIT_SSH_COMMAND="ssh -i $(KEY_FILE)" git fetch github-deploy $(CI_COMMIT_REF_NAME)
|
||||
@GIT_SSH_COMMAND="ssh -i $(KEY_FILE)" git push github-deploy HEAD:$(CI_COMMIT_REF_NAME)
|
||||
rm $(KEY_FILE)
|
||||
endif
|
||||
|
||||
.PHONY:: commit tag git-push git-pull push-github
|
51
.makefiles/k8s.mk
Normal file
@ -0,0 +1,51 @@
|
||||
# Deployment with Kubernetes
|
||||
|
||||
# KUBE_CA_PEM_FILE is the path of a certificate file. It automatically set by GitLab
|
||||
# if you enable Kubernetes integration in a project.
|
||||
#
|
||||
# As of this writing, Kubernetes integration can not be set on a group level, so it has to
|
||||
# be manually set in every project.
|
||||
# Alternatively, we use a custom KUBE_CA_BUNDLE environment variable, which can be set at
|
||||
# the group level. In this case, the variable contains the whole content of the certificate,
|
||||
# which we dump to a temporary file
|
||||
#
|
||||
# Check if the KUBE_CA_PEM_FILE exists. Otherwise, create it from KUBE_CA_BUNDLE
|
||||
KUBE_CA_TEMP=false
|
||||
ifndef KUBE_CA_PEM_FILE
|
||||
KUBE_CA_PEM_FILE:=$$PWD/.ca.crt
|
||||
CREATED:=$(shell printf '%b\n' '$(KUBE_CA_BUNDLE)' > $(KUBE_CA_PEM_FILE))
|
||||
endif
|
||||
KUBE_TOKEN?=""
|
||||
KUBE_NAMESPACE?=$(NAME)
|
||||
KUBECTL=docker run --rm -v $(KUBE_CA_PEM_FILE):/tmp/ca.pem -i lachlanevenson/k8s-kubectl --server="$(KUBE_URL)" --token="$(KUBE_TOKEN)" --certificate-authority="/tmp/ca.pem" -n $(KUBE_NAMESPACE)
|
||||
CI_COMMIT_REF_NAME?=master
|
||||
|
||||
info:: ## Print variables. Useful for debugging.
|
||||
@echo "#KUBERNETES"
|
||||
@echo KUBE_URL=$(KUBE_URL)
|
||||
@echo KUBE_CA_PEM_FILE=$(KUBE_CA_PEM_FILE)
|
||||
@echo KUBE_CA_BUNDLE=$$KUBE_CA_BUNDLE
|
||||
@echo KUBE_TOKEN=$(KUBE_TOKEN)
|
||||
@echo KUBE_NAMESPACE=$(KUBE_NAMESPACE)
|
||||
@echo KUBECTL=$(KUBECTL)
|
||||
|
||||
@echo "#CI"
|
||||
@echo CI_PROJECT_NAME=$(CI_PROJECT_NAME)
|
||||
@echo CI_REGISTRY=$(CI_REGISTRY)
|
||||
@echo CI_REGISTRY_USER=$(CI_REGISTRY_USER)
|
||||
@echo CI_COMMIT_REF_NAME=$(CI_COMMIT_REF_NAME)
|
||||
@echo "CREATED=$(CREATED)"
|
||||
|
||||
#
|
||||
# Deployment and advanced features
|
||||
#
|
||||
|
||||
|
||||
deploy: ## Deploy to kubernetes using the credentials in KUBE_CA_PEM_FILE (or KUBE_CA_BUNDLE ) and TOKEN
|
||||
@ls k8s/*.yaml k8s/*.yml k8s/*.tmpl 2>/dev/null || true
|
||||
@cat k8s/*.yaml k8s/*.yml k8s/*.tmpl 2>/dev/null | envsubst | $(KUBECTL) apply -f -
|
||||
|
||||
deploy-check: ## Get the deployed configuration.
|
||||
@$(KUBECTL) get deploy,pods,svc,ingress
|
||||
|
||||
.PHONY:: info deploy deploy-check
|
15
.makefiles/makefiles.mk
Normal file
@ -0,0 +1,15 @@
|
||||
makefiles-remote:
|
||||
git ls-remote --exit-code makefiles 2> /dev/null || git remote add makefiles ssh://git@lab.gsi.upm.es:2200/docs/templates/makefiles.git
|
||||
|
||||
makefiles-commit: makefiles-remote
|
||||
git add -f .makefiles
|
||||
git commit -em "Updated makefiles from ${NAME}"
|
||||
|
||||
makefiles-push:
|
||||
git fetch makefiles $(NAME)
|
||||
git subtree push --prefix=.makefiles/ makefiles $(NAME)
|
||||
|
||||
makefiles-pull: makefiles-remote
|
||||
git subtree pull --prefix=.makefiles/ makefiles master --squash
|
||||
|
||||
.PHONY:: makefiles-remote makefiles-commit makefiles-push makefiles-pull
|
5
.makefiles/precommit.mk
Normal file
@ -0,0 +1,5 @@
|
||||
init: ## Init pre-commit hooks (i.e. enforcing format checking before allowing a commit)
|
||||
pip install --user pre-commit
|
||||
pre-commit install
|
||||
|
||||
.PHONY:: init
|
101
.makefiles/python.mk
Normal file
@ -0,0 +1,101 @@
|
||||
PYVERSIONS ?= 3.5
|
||||
PYMAIN ?= $(firstword $(PYVERSIONS))
|
||||
TARNAME ?= $(NAME)-$(VERSION).tar.gz
|
||||
VERSIONFILE ?= $(NAME)/VERSION
|
||||
|
||||
DEVPORT ?= 6000
|
||||
|
||||
|
||||
.FORCE:
|
||||
|
||||
version: .FORCE
|
||||
@echo $(VERSION) > $(VERSIONFILE)
|
||||
@echo $(VERSION)
|
||||
|
||||
yapf: ## Format python code
|
||||
yapf -i -r $(NAME)
|
||||
yapf -i -r tests
|
||||
|
||||
dockerfiles: $(addprefix Dockerfile-,$(PYVERSIONS)) ## Generate dockerfiles for each python version
|
||||
@unlink Dockerfile >/dev/null
|
||||
ln -s Dockerfile-$(PYMAIN) Dockerfile
|
||||
|
||||
Dockerfile-%: Dockerfile.template ## Generate a specific dockerfile (e.g. Dockerfile-2.7)
|
||||
sed "s/{{PYVERSION}}/$*/" Dockerfile.template > Dockerfile-$*
|
||||
|
||||
quick_build: $(addprefix build-, $(PYMAIN))
|
||||
|
||||
build: $(addprefix build-, $(PYVERSIONS)) ## Build all images / python versions
|
||||
docker tag $(IMAGEWTAG)-python$(PYMAIN) $(IMAGEWTAG)
|
||||
|
||||
build-%: version Dockerfile-% ## Build a specific version (e.g. build-2.7)
|
||||
docker build --pull -t '$(IMAGEWTAG)-python$*' -f Dockerfile-$* .;
|
||||
|
||||
dev-%: ## Launch a specific development environment using docker (e.g. dev-2.7)
|
||||
@docker start $(NAME)-dev$* || (\
|
||||
$(MAKE) build-$*; \
|
||||
docker run -d -w /usr/src/app/ -p $(DEVPORT):5000 -v $$PWD:/usr/src/app --entrypoint=/bin/bash -ti --name $(NAME)-dev$* '$(IMAGEWTAG)-python$*'; \
|
||||
)\
|
||||
|
||||
docker exec -ti $(NAME)-dev$* bash
|
||||
|
||||
dev: dev-$(PYMAIN) ## Launch a development environment using docker, using the default python version
|
||||
|
||||
quick_test: test-$(PYMAIN)
|
||||
|
||||
test-%: build-% ## Run setup.py from in an isolated container, built from the base image. (e.g. test-2.7)
|
||||
# This speeds tests up because the image has most (if not all) of the dependencies already.
|
||||
docker rm $(NAME)-test-$* || true
|
||||
docker create -ti --name $(NAME)-test-$* --entrypoint="" -w /usr/src/app/ $(IMAGEWTAG)-python$* python setup.py test
|
||||
docker cp . $(NAME)-test-$*:/usr/src/app
|
||||
docker start -a $(NAME)-test-$*
|
||||
|
||||
test: $(addprefix test-,$(PYVERSIONS)) ## Run the tests with the main python version
|
||||
|
||||
run-%: build-%
|
||||
docker run --rm -p $(DEVPORT):5000 -ti '$(IMAGEWTAG)-python$(PYMAIN)' --default-plugins
|
||||
|
||||
run: run-$(PYMAIN)
|
||||
|
||||
# Pypy - Upload a package
|
||||
|
||||
dist/$(TARNAME): version
|
||||
python setup.py sdist;
|
||||
|
||||
sdist: dist/$(TARNAME) ## Generate the distribution file (wheel)
|
||||
|
||||
pip_test-%: sdist ## Test the distribution file using pip install and a specific python version (e.g. pip_test-2.7)
|
||||
docker run --rm -v $$PWD/dist:/dist/ python:$* pip install /dist/$(TARNAME);
|
||||
|
||||
pip_test: $(addprefix pip_test-,$(PYVERSIONS)) ## Test pip installation with the main python version
|
||||
|
||||
pip_upload: pip_test ## Upload package to pip
|
||||
python setup.py sdist upload ;
|
||||
|
||||
# Pushing to docker
|
||||
|
||||
push-latest: $(addprefix push-latest-,$(PYVERSIONS)) ## Push the "latest" tag to dockerhub
|
||||
docker tag '$(IMAGEWTAG)-python$(PYMAIN)' '$(IMAGEWTAG)'
|
||||
docker tag '$(IMAGEWTAG)-python$(PYMAIN)' '$(IMAGENAME):latest'
|
||||
docker push '$(IMAGENAME):latest'
|
||||
docker push '$(IMAGEWTAG)'
|
||||
|
||||
push-latest-%: build-% ## Push the latest image for a specific python version
|
||||
docker tag $(IMAGENAME):$(VERSION)-python$* $(IMAGENAME):python$*
|
||||
docker push $(IMAGENAME):$(VERSION)-python$*
|
||||
docker push $(IMAGENAME):python$*
|
||||
|
||||
push-%: build-% ## Push the image of the current version (tagged). e.g. push-2.7
|
||||
docker push $(IMAGENAME):$(VERSION)-python$*
|
||||
|
||||
push:: $(addprefix push-,$(PYVERSIONS)) ## Push an image with the current version for every python version
|
||||
docker tag '$(IMAGEWTAG)-python$(PYMAIN)' '$(IMAGEWTAG)'
|
||||
docker push $(IMAGENAME):$(VERSION)
|
||||
|
||||
clean:: ## Clean older docker images and containers related to this project and dev environments
|
||||
@docker stop $(addprefix $(NAME)-dev,$(PYVERSIONS)) 2>/dev/null || true
|
||||
@docker rm $(addprefix $(NAME)-dev,$(PYVERSIONS)) 2>/dev/null || true
|
||||
@docker ps -a | grep $(IMAGENAME) | awk '{ split($$2, vers, "-"); if(vers[0] != "${VERSION}"){ print $$1;}}' | xargs docker rm -v 2>/dev/null|| true
|
||||
@docker images | grep $(IMAGENAME) | awk '{ split($$2, vers, "-"); if(vers[0] != "${VERSION}"){ print $$1":"$$2;}}' | xargs docker rmi 2>/dev/null|| true
|
||||
|
||||
.PHONY:: yapf dockerfiles Dockerfile-% quick_build build build-% dev-% quick-dev test quick_test push-latest push-latest-% push-% push version .FORCE
|
22
.readthedocs.yaml
Normal file
@ -0,0 +1,22 @@
|
||||
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
|
||||
|
||||
version: 2
|
||||
|
||||
# Set the OS, Python version and other tools you might need
|
||||
|
||||
build:
|
||||
os: ubuntu-22.04
|
||||
tools:
|
||||
python: "3.10"
|
||||
|
||||
# Build documentation in the "docs/" directory with Sphinx
|
||||
sphinx:
|
||||
configuration: docs/conf.py
|
||||
|
||||
# formats:
|
||||
# - pdf
|
||||
# - epub
|
||||
|
||||
python:
|
||||
install:
|
||||
- requirements: docs/requirements.txt
|
52
.travis.yml
@ -1,13 +1,43 @@
|
||||
sudo: required
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
language: python
|
||||
|
||||
env:
|
||||
- PYV=2.7
|
||||
- PYV=3.4
|
||||
- PYV=3.5
|
||||
# run nosetests - Tests
|
||||
script: make test-$PYV
|
||||
matrix:
|
||||
allow_failures:
|
||||
# Windows is experimental in Travis.
|
||||
# As of this writing, senpy installs but hangs on tests that use the flask test client (e.g. blueprints)
|
||||
- os: windows
|
||||
include:
|
||||
- os: linux
|
||||
language: python
|
||||
python: 3.4
|
||||
before_install:
|
||||
- pip install --upgrade --force-reinstall pandas
|
||||
- os: linux
|
||||
language: python
|
||||
python: 3.5
|
||||
- os: linux
|
||||
language: python
|
||||
python: 3.6
|
||||
- os: linux
|
||||
language: python
|
||||
python: 3.7
|
||||
- os: osx
|
||||
language: generic
|
||||
addons:
|
||||
homebrew:
|
||||
# update: true
|
||||
packages: python3
|
||||
before_install:
|
||||
- python3 -m pip install --upgrade virtualenv
|
||||
- virtualenv -p python3 --system-site-packages "$HOME/venv"
|
||||
- source "$HOME/venv/bin/activate"
|
||||
- os: windows
|
||||
language: bash
|
||||
before_install:
|
||||
- choco install -y python3
|
||||
- python -m pip install --upgrade pip
|
||||
env: PATH=/c/Python37:/c/Python37/Scripts:$PATH
|
||||
# command to run tests
|
||||
# 'python' points to Python 2.7 on macOS but points to Python 3.7 on Linux and Windows
|
||||
# 'python3' is a 'command not found' error on Windows but 'py' works on Windows only
|
||||
script:
|
||||
- python3 setup.py test || python setup.py test
|
||||
|
81
CHANGELOG.md
Normal file
@ -0,0 +1,81 @@
|
||||
# Changelog
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [Unreleased]
|
||||
### Added
|
||||
* The code of many senpy community plugins have been included by default. However, additional files (e.g., licensed data) and/or installing additional dependencies may be necessary for some plugins. Read each plugin's documentation for more information.
|
||||
* `--strict` flag, to fail and not start when a
|
||||
* `optional` attribute in plugins. Optional plugins may fail to load or activate but the server will be started regardless, unless running in strict mode
|
||||
* Option in shelf plugins to ignore pickling errors
|
||||
### Removed
|
||||
* `--only-install`, `--only-test` and `--only-list` flags were removed in favor of `--no-run` + `--install`/`--test`/`--dependencies`
|
||||
### Changed
|
||||
* data directory selection logic is slightly modified, and will choose one of the following (in this order): `data_folder` (argument), `$SENPY_DATA` or `$CWD`
|
||||
|
||||
## [1.0.6]
|
||||
### Fixed
|
||||
* Plugins now get activated for testing
|
||||
## [1.0.1]
|
||||
### Added
|
||||
* License headers
|
||||
* Description for PyPI (setup.py)
|
||||
|
||||
### Changed
|
||||
* The evaluation tab shows datasets inline, and a tooltip shows the number of instances
|
||||
* The docs should be clearer now
|
||||
|
||||
## [1.0.0]
|
||||
### Fixed
|
||||
* Restored hash changing function in `main.js`
|
||||
|
||||
## 0.20
|
||||
|
||||
### Added
|
||||
* Objects can control the keys that will be used in `serialize`/`jsonld`/`as_dict` by specifying a list of keys in `terse_keys`.
|
||||
e.g.
|
||||
```python
|
||||
>>> class MyModel(senpy.models.BaseModel):
|
||||
... _terse_keys = ['visible']
|
||||
... invisible = 5
|
||||
... visible = 1
|
||||
...
|
||||
>>> m = MyModel(id='testing')
|
||||
>>> m.jsonld()
|
||||
{'invisible': 5, 'visible': 1, '@id': 'testing'}
|
||||
>>> m.jsonld(verbose=False)
|
||||
{'visible': 1}
|
||||
```
|
||||
* Configurable logging format.
|
||||
* Added default terse keys for the most common classes (entry, sentiment, emotion...).
|
||||
* Flag parameters (boolean) are set to true even when no value is added (e.g. `&verbose` is the same as `&verbose=true`).
|
||||
* Plugin and parameter descriptions are now formatted with (showdown)[https://github.com/showdownjs/showdown].
|
||||
* The web UI requests extra_parameters from the server. This is useful for pipelines. See #52
|
||||
* First batch of semantic tests (using SPARQL)
|
||||
* `Plugin.path()` method to get a file path from a relative path (using the senpy data folder)
|
||||
|
||||
### Changed
|
||||
* `install_deps` now checks what requirements are already met before installing with pip.
|
||||
* Help is now provided verbosely by default
|
||||
* Other outputs are terse by default. This means some properties are now hidden unless verbose is set.
|
||||
* `sentiments` and `emotions` are now `marl:hasOpinion` and `onyx:hasEmotionSet`, respectively.
|
||||
* Nicer logging format
|
||||
* Context aliases (e.g. `sentiments` and `emotions` properties) have been replaced with the original properties (e.g. `marl:hasOpinion` and `onyx:hasEmotionSet**), to use aliases, pass the `aliases** parameter.
|
||||
* Several UI improvements
|
||||
* Dedicated tab to show the list of plugins
|
||||
* URLs in plugin descriptions are shown as links
|
||||
* The format of the response is selected by clicking on a tab instead of selecting from a drop-down
|
||||
* list of examples
|
||||
* Bootstrap v4
|
||||
* RandEmotion and RandSentiment are no longer included in the base set of plugins
|
||||
* The `--plugin-folder` option can be used more than once, and every folder will be added to the app.
|
||||
|
||||
### Deprecated
|
||||
### Removed
|
||||
* Python 2.7 is no longer test or officially supported
|
||||
### Fixed
|
||||
* Plugin descriptions are now dedented when they are extracted from the docstring.
|
||||
### Security
|
||||
|
@ -2,21 +2,24 @@ from python:{{PYVERSION}}
|
||||
|
||||
MAINTAINER J. Fernando Sánchez <jf.sanchez@upm.es>
|
||||
|
||||
RUN mkdir /cache/ /senpy-plugins /data/
|
||||
RUN apt-get update && apt-get install -y \
|
||||
libblas-dev liblapack-dev liblapacke-dev gfortran \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir -p /cache/ /senpy-plugins /data/
|
||||
|
||||
VOLUME /data/
|
||||
|
||||
ENV PIP_CACHE_DIR=/cache/ SENPY_DATA=/data
|
||||
|
||||
WORKDIR /usr/src/app
|
||||
COPY test-requirements.txt requirements.txt extra-requirements.txt /usr/src/app/
|
||||
RUN pip install --no-cache-dir -r test-requirements.txt -r requirements.txt -r extra-requirements.txt
|
||||
COPY . /usr/src/app/
|
||||
RUN pip install --no-cache-dir --no-index --no-deps --editable .
|
||||
|
||||
ONBUILD COPY . /senpy-plugins/
|
||||
ONBUILD RUN python -m senpy --only-install -f /senpy-plugins
|
||||
ONBUILD RUN python -m senpy -i --no-run -f /senpy-plugins
|
||||
ONBUILD WORKDIR /senpy-plugins/
|
||||
|
||||
|
||||
WORKDIR /usr/src/app
|
||||
COPY test-requirements.txt requirements.txt /usr/src/app/
|
||||
RUN pip install --use-wheel -r test-requirements.txt -r requirements.txt
|
||||
COPY . /usr/src/app/
|
||||
RUN pip install --no-deps --no-index .
|
||||
|
||||
ENTRYPOINT ["python", "-m", "senpy", "-f", "/senpy-plugins/", "--host", "0.0.0.0"]
|
||||
|
@ -1,9 +1,11 @@
|
||||
include requirements.txt
|
||||
include test-requirements.txt
|
||||
include extra-requirements.txt
|
||||
include README.rst
|
||||
include LICENSE.txt
|
||||
include senpy/VERSION
|
||||
graft senpy/plugins
|
||||
graft senpy/schemas
|
||||
graft senpy/templates
|
||||
graft senpy/static
|
||||
graft img
|
||||
graft img
|
||||
|
118
Makefile
@ -1,107 +1,17 @@
|
||||
PYVERSIONS=3.5 2.7
|
||||
PYMAIN=$(firstword $(PYVERSIONS))
|
||||
NAME=senpy
|
||||
REPO=gsiupm
|
||||
VERSION=$(shell git describe --tags --dirty 2>/dev/null)
|
||||
TARNAME=$(NAME)-$(VERSION).tar.gz
|
||||
IMAGENAME=$(REPO)/$(NAME)
|
||||
IMAGEWTAG=$(IMAGENAME):$(VERSION)
|
||||
GITHUB_REPO=git@github.com:gsi-upm/senpy.git
|
||||
|
||||
IMAGENAME=gsiupm/senpy
|
||||
|
||||
# The first version is the main one (used for quick builds)
|
||||
# See .makefiles/python.mk for more info
|
||||
PYVERSIONS ?= 3.10 3.7
|
||||
|
||||
DEVPORT=5000
|
||||
|
||||
action="test-${PYMAIN}"
|
||||
GITHUB_REPO=git@github.com:gsi-upm/senpy.git
|
||||
|
||||
all: build run
|
||||
|
||||
.FORCE:
|
||||
|
||||
version: .FORCE
|
||||
@echo $(VERSION) > $(NAME)/VERSION
|
||||
@echo $(VERSION)
|
||||
|
||||
yapf:
|
||||
yapf -i -r senpy
|
||||
yapf -i -r tests
|
||||
|
||||
init:
|
||||
pip install --user pre-commit
|
||||
pre-commit install
|
||||
|
||||
dockerfiles: $(addprefix Dockerfile-,$(PYVERSIONS))
|
||||
@unlink Dockerfile >/dev/null
|
||||
ln -s Dockerfile-$(PYMAIN) Dockerfile
|
||||
|
||||
Dockerfile-%: Dockerfile.template
|
||||
sed "s/{{PYVERSION}}/$*/" Dockerfile.template > Dockerfile-$*
|
||||
|
||||
quick_build: $(addprefix build-, $(PYMAIN))
|
||||
|
||||
build: $(addprefix build-, $(PYVERSIONS))
|
||||
|
||||
build-%: version Dockerfile-%
|
||||
docker build -t '$(IMAGEWTAG)-python$*' -f Dockerfile-$* .;
|
||||
|
||||
quick_test: $(addprefix test-,$(PYMAIN))
|
||||
|
||||
dev-%:
|
||||
@docker start $(NAME)-dev$* || (\
|
||||
$(MAKE) build-$*; \
|
||||
docker run -d -w /usr/src/app/ -v $$PWD:/usr/src/app --entrypoint=/bin/bash -ti --name $(NAME)-dev$* '$(IMAGEWTAG)-python$*'; \
|
||||
)\
|
||||
|
||||
docker exec -ti $(NAME)-dev$* bash
|
||||
|
||||
dev: dev-$(PYMAIN)
|
||||
|
||||
test-all: $(addprefix test-,$(PYVERSIONS))
|
||||
|
||||
test-%: build-%
|
||||
docker run --rm --entrypoint /usr/local/bin/python -w /usr/src/app $(IMAGEWTAG)-python$* setup.py test
|
||||
|
||||
test: test-$(PYMAIN)
|
||||
|
||||
dist/$(TARNAME):
|
||||
docker run --rm -ti -v $$PWD:/usr/src/app/ -w /usr/src/app/ python:$(PYMAIN) python setup.py sdist;
|
||||
|
||||
sdist: dist/$(TARNAME)
|
||||
|
||||
pip_test-%: sdist
|
||||
docker run --rm -v $$PWD/dist:/dist/ -ti python:$* pip install /dist/$(TARNAME);
|
||||
|
||||
pip_test: $(addprefix pip_test-,$(PYVERSIONS))
|
||||
|
||||
clean:
|
||||
@docker ps -a | awk '/$(REPO)\/$(NAME)/{ split($$2, vers, "-"); if(vers[0] != "${VERSION}"){ print $$1;}}' | xargs docker rm -v 2>/dev/null|| true
|
||||
@docker images | awk '/$(REPO)\/$(NAME)/{ split($$2, vers, "-"); if(vers[0] != "${VERSION}"){ print $$1":"$$2;}}' | xargs docker rmi 2>/dev/null|| true
|
||||
@docker rmi $(NAME)-dev 2>/dev/null || true
|
||||
|
||||
|
||||
git_commit:
|
||||
git commit -a
|
||||
|
||||
git_tag:
|
||||
git tag ${VERSION}
|
||||
|
||||
git_push:
|
||||
git push --tags origin master
|
||||
|
||||
pip_upload:
|
||||
python setup.py sdist upload ;
|
||||
|
||||
pip_test: $(addprefix pip_test-,$(PYVERSIONS))
|
||||
|
||||
run-%: build-%
|
||||
docker run --rm -p 5000:5000 -ti '$(IMAGEWTAG)-python$(PYMAIN)' --default-plugins
|
||||
|
||||
run: run-$(PYMAIN)
|
||||
|
||||
push-latest: build-$(PYMAIN)
|
||||
docker tag '$(IMAGEWTAG)-python$(PYMAIN)' '$(IMAGEWTAG)'
|
||||
docker tag '$(IMAGEWTAG)-python$(PYMAIN)' '$(IMAGENAME)'
|
||||
docker push '$(IMAGENAME)'
|
||||
docker push '$(IMAGEWTAG)'
|
||||
|
||||
push-%: build-%
|
||||
docker push $(IMAGENAME):$(VERSION)-python$*
|
||||
|
||||
ci:
|
||||
gitlab-runner exec docker --docker-volumes /var/run/docker.sock:/var/run/docker.sock --env CI_PROJECT_NAME=$(NAME) ${action}
|
||||
|
||||
.PHONY: test test-% test-all build-% build test pip_test run yapf push-main push-% dev ci version .FORCE
|
||||
include .makefiles/base.mk
|
||||
include .makefiles/k8s.mk
|
||||
include .makefiles/python.mk
|
||||
|
2
Procfile
@ -1 +1 @@
|
||||
web: python -m senpy --host 0.0.0.0 --port $PORT --default-plugins
|
||||
web: python -m senpy --host 0.0.0.0 --port $PORT
|
||||
|
120
README.rst
@ -1,18 +1,25 @@
|
||||
.. image:: img/header.png
|
||||
:height: 6em
|
||||
:target: http://demos.gsi.dit.upm.es/senpy
|
||||
:width: 100%
|
||||
:target: http://senpy.gsi.upm.es
|
||||
|
||||
.. image:: https://travis-ci.org/gsi-upm/senpy.svg?branch=master
|
||||
:target: https://travis-ci.org/gsi-upm/senpy
|
||||
.. image:: https://readthedocs.org/projects/senpy/badge/?version=latest
|
||||
:target: http://senpy.readthedocs.io/en/latest/
|
||||
.. image:: https://badge.fury.io/py/senpy.svg
|
||||
:target: https://badge.fury.io/py/senpy
|
||||
.. image:: https://travis-ci.org/gsi-upm/senpy.svg
|
||||
:target: https://github.com/gsi-upm/senpy/senpy/tree/master
|
||||
.. image:: https://img.shields.io/pypi/l/requests.svg
|
||||
:target: https://lab.gsi.upm.es/senpy/senpy/
|
||||
|
||||
|
||||
Senpy lets you create sentiment analysis web services easily, fast and using a well known API.
|
||||
As a bonus, senpy services use semantic vocabularies (e.g. `NIF <http://persistence.uni-leipzig.org/nlp2rdf/>`_, `Marl <http://www.gsi.dit.upm.es/ontologies/marl>`_, `Onyx <http://www.gsi.dit.upm.es/ontologies/onyx>`_) and formats (turtle, JSON-LD, xml-rdf).
|
||||
As a bonus, Senpy services use semantic vocabularies (e.g. `NIF <http://persistence.uni-leipzig.org/nlp2rdf/>`_, `Marl <http://www.gsi.upm.es/ontologies/marl>`_, `Onyx <http://www.gsi.upm.es/ontologies/onyx>`_) and formats (turtle, JSON-LD, xml-rdf).
|
||||
|
||||
Have you ever wanted to turn your sentiment analysis algorithms into a service?
|
||||
With senpy, now you can.
|
||||
With Senpy, now you can.
|
||||
It provides all the tools so you just have to worry about improving your algorithms:
|
||||
|
||||
`See it in action. <http://senpy.cluster.gsi.dit.upm.es/>`_
|
||||
`See it in action. <http://senpy.gsi.upm.es/>`_
|
||||
|
||||
Installation
|
||||
------------
|
||||
@ -23,7 +30,7 @@ Through PIP
|
||||
|
||||
.. code:: bash
|
||||
|
||||
pip install --user senpy
|
||||
pip install -U --user senpy
|
||||
|
||||
|
||||
Alternatively, you can use the development version:
|
||||
@ -34,13 +41,76 @@ Alternatively, you can use the development version:
|
||||
cd senpy
|
||||
pip install --user .
|
||||
|
||||
If you want to install senpy globally, use sudo instead of the ``--user`` flag.
|
||||
If you want to install Senpy globally, use sudo instead of the ``--user`` flag.
|
||||
|
||||
Docker Image
|
||||
************
|
||||
Build the image or use the pre-built one: ``docker run -ti -p 5000:5000 gsiupm/senpy --default-plugins``.
|
||||
Build the image or use the pre-built one: ``docker run -ti -p 5000:5000 gsiupm/senpy``.
|
||||
|
||||
To add custom plugins, add a volume and tell senpy where to find the plugins: ``docker run -ti -p 5000:5000 -v <PATH OF PLUGINS>:/plugins gsiupm/senpy --default-plugins -f /plugins``
|
||||
To add custom plugins, add a volume and tell Senpy where to find the plugins: ``docker run -ti -p 5000:5000 -v <PATH OF PLUGINS>:/plugins gsiupm/senpy -f /plugins``
|
||||
|
||||
|
||||
Compatibility
|
||||
-------------
|
||||
|
||||
Senpy should run on any major operating system.
|
||||
Its code is pure Python, and the only limitations are imposed by its dependencies (e.g., nltk, pandas).
|
||||
|
||||
Currently, the CI/CD pipeline tests the code on:
|
||||
|
||||
* GNU/Linux with Python versions 3.7+ (3.10+ recommended for all plugins)
|
||||
* MacOS and homebrew's python3
|
||||
* Windows 10 and chocolatey's python3
|
||||
|
||||
The latest PyPI package is verified to install on Ubuntu, Debian and Arch Linux.
|
||||
|
||||
If you have trouble installing Senpy on your platform, see `Having problems?`_.
|
||||
|
||||
Developing
|
||||
----------
|
||||
|
||||
Running/debugging
|
||||
*****************
|
||||
This command will run the senpy container using the latest image available, mounting your current folder so you get your latest code:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
|
||||
# Python 3.5
|
||||
make dev
|
||||
# Python 2.7
|
||||
make dev-2.7
|
||||
|
||||
Building a docker image
|
||||
***********************
|
||||
|
||||
.. code:: bash
|
||||
|
||||
|
||||
# Python 3.5
|
||||
make build-3.5
|
||||
# Python 2.7
|
||||
make build-2.7
|
||||
|
||||
Testing
|
||||
*******
|
||||
|
||||
.. code:: bash
|
||||
|
||||
|
||||
make test
|
||||
|
||||
Running
|
||||
*******
|
||||
This command will run the senpy server listening on localhost:5000
|
||||
|
||||
.. code:: bash
|
||||
|
||||
|
||||
# Python 3.5
|
||||
make run-3.5
|
||||
# Python 2.7
|
||||
make run-2.7
|
||||
|
||||
Usage
|
||||
-----
|
||||
@ -49,19 +119,21 @@ However, the easiest and recommended way is to just use the command-line tool to
|
||||
|
||||
.. code:: bash
|
||||
|
||||
|
||||
senpy
|
||||
|
||||
or, alternatively:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
|
||||
python -m senpy
|
||||
|
||||
|
||||
This will create a server with any modules found in the current path.
|
||||
For more options, see the `--help` page.
|
||||
|
||||
Alternatively, you can use the modules included in senpy to build your own application.
|
||||
Alternatively, you can use the modules included in Senpy to build your own application.
|
||||
|
||||
Deploying on Heroku
|
||||
-------------------
|
||||
@ -69,13 +141,31 @@ Use a free heroku instance to share your service with the world.
|
||||
Just use the example Procfile in this repository, or build your own.
|
||||
|
||||
|
||||
`DEMO on heroku <http://senpy.herokuapp.com>`_
|
||||
|
||||
|
||||
For more information, check out the `documentation <http://senpy.readthedocs.org>`_.
|
||||
------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
Python 2.x compatibility
|
||||
------------------------
|
||||
|
||||
Keeping compatibility between python 2.7 and 3.x is not always easy, especially for a framework that deals both with text and web requests.
|
||||
Hence, starting February 2019, this project will no longer make efforts to support python 2.7, which will reach its end of life in 2020.
|
||||
Most of the functionality should still work, and the compatibility shims will remain for now, but we cannot make any guarantees at this point.
|
||||
Instead, the maintainers will focus their efforts on keeping the codebase compatible across different Python 3.3+ versions, including upcoming ones.
|
||||
We apologize for the inconvenience.
|
||||
|
||||
|
||||
Having problems?
|
||||
----------------
|
||||
|
||||
Please, file a new issue `on GitHub <https://github.com/gsi-upm/senpy/issues>`_ including enough details to reproduce the bug, including:
|
||||
|
||||
* Operating system
|
||||
* Version of Senpy (or docker tag)
|
||||
* Installed libraries
|
||||
* Relevant logs
|
||||
* A simple code example
|
||||
|
||||
Acknowledgement
|
||||
---------------
|
||||
This development has been partially funded by the European Union through the MixedEmotions Project (project number H2020 655632), as part of the `RIA ICT 15 Big data and Open Data Innovation and take-up` programme.
|
||||
|
@ -1,4 +0,0 @@
|
||||
import os
|
||||
|
||||
SERVER_PORT = os.environ.get("SERVER_PORT", 5000)
|
||||
DEBUG = os.environ.get("DEBUG", True)
|
10
docker-compose.dev.yml
Normal file
@ -0,0 +1,10 @@
|
||||
version: '3'
|
||||
services:
|
||||
senpy:
|
||||
image: "${IMAGENAME-gsiupm/senpy}:${VERSION-latest}"
|
||||
entrypoint: ["/bin/bash"]
|
||||
working_dir: "/senpy-plugins"
|
||||
ports:
|
||||
- 5000:5000
|
||||
volumes:
|
||||
- ".:/usr/src/app/"
|
9
docker-compose.test.yml
Normal file
@ -0,0 +1,9 @@
|
||||
version: '3'
|
||||
services:
|
||||
test:
|
||||
image: "${IMAGENAME-gsiupm/senpy}:${VERSION-dev}"
|
||||
entrypoint: ["py.test"]
|
||||
volumes:
|
||||
- ".:/usr/src/app/"
|
||||
command:
|
||||
[]
|
11
docker-compose.yml
Normal file
@ -0,0 +1,11 @@
|
||||
version: '3'
|
||||
services:
|
||||
senpy:
|
||||
image: "${IMAGENAME-gsiupm/senpy}:${VERSION-dev}"
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile${PYVERSION--2.7}
|
||||
ports:
|
||||
- 5001:5000
|
||||
volumes:
|
||||
- "./data:/data"
|
4428
docs/Advanced.ipynb
Normal file
592
docs/Evaluation.ipynb
Normal file
@ -0,0 +1,592 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Evaluating Services"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Sentiment analysis plugins can also be evaluated on a series of pre-defined datasets.\n",
|
||||
"This can be done in three ways: through the Web UI (playground), through the web API and programmatically.\n",
|
||||
"\n",
|
||||
"Regardless of the way you perform the evaluation, you will need to specify a plugin (service) that you want to evaluate, and a series of datasets on which it should be evaluated.\n",
|
||||
"\n",
|
||||
"to evaluate a plugin on a dataset, senpy use the plugin to predict the sentiment in each entry in the dataset.\n",
|
||||
"These predictions are compared with the expected values to produce several metrics, such as: accuracy, precision and f1-score.\n",
|
||||
"\n",
|
||||
"**note**: the evaluation process might take long for plugins that use external services, such as `sentiment140`.\n",
|
||||
"\n",
|
||||
"**note**: plugins are assumed to be pre-trained and invariant. i.e., the prediction for an entry should "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Web UI (Playground)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The playground should contain a tab for Evaluation, where you can select any plugin that can be evaluated, and the set of datasets that you want to test the plugin on.\n",
|
||||
"\n",
|
||||
"For example, the image below shows the results of the `sentiment-vader` plugin on the `vader` and `sts` datasets:\n",
|
||||
"\n",
|
||||
"\n",
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Web API"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The api exposes an endpoint (`/evaluate`), which accents the plugin and the set of datasets on which it should be evaluated."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The following code is not necessary, but it will display the results better:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here is a simple call using the requests library:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<style>.output_html .hll { background-color: #ffffcc }\n",
|
||||
".output_html { background: #f8f8f8; }\n",
|
||||
".output_html .c { color: #408080; font-style: italic } /* Comment */\n",
|
||||
".output_html .err { border: 1px solid #FF0000 } /* Error */\n",
|
||||
".output_html .k { color: #008000; font-weight: bold } /* Keyword */\n",
|
||||
".output_html .o { color: #666666 } /* Operator */\n",
|
||||
".output_html .ch { color: #408080; font-style: italic } /* Comment.Hashbang */\n",
|
||||
".output_html .cm { color: #408080; font-style: italic } /* Comment.Multiline */\n",
|
||||
".output_html .cp { color: #BC7A00 } /* Comment.Preproc */\n",
|
||||
".output_html .cpf { color: #408080; font-style: italic } /* Comment.PreprocFile */\n",
|
||||
".output_html .c1 { color: #408080; font-style: italic } /* Comment.Single */\n",
|
||||
".output_html .cs { color: #408080; font-style: italic } /* Comment.Special */\n",
|
||||
".output_html .gd { color: #A00000 } /* Generic.Deleted */\n",
|
||||
".output_html .ge { font-style: italic } /* Generic.Emph */\n",
|
||||
".output_html .gr { color: #FF0000 } /* Generic.Error */\n",
|
||||
".output_html .gh { color: #000080; font-weight: bold } /* Generic.Heading */\n",
|
||||
".output_html .gi { color: #00A000 } /* Generic.Inserted */\n",
|
||||
".output_html .go { color: #888888 } /* Generic.Output */\n",
|
||||
".output_html .gp { color: #000080; font-weight: bold } /* Generic.Prompt */\n",
|
||||
".output_html .gs { font-weight: bold } /* Generic.Strong */\n",
|
||||
".output_html .gu { color: #800080; font-weight: bold } /* Generic.Subheading */\n",
|
||||
".output_html .gt { color: #0044DD } /* Generic.Traceback */\n",
|
||||
".output_html .kc { color: #008000; font-weight: bold } /* Keyword.Constant */\n",
|
||||
".output_html .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */\n",
|
||||
".output_html .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */\n",
|
||||
".output_html .kp { color: #008000 } /* Keyword.Pseudo */\n",
|
||||
".output_html .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */\n",
|
||||
".output_html .kt { color: #B00040 } /* Keyword.Type */\n",
|
||||
".output_html .m { color: #666666 } /* Literal.Number */\n",
|
||||
".output_html .s { color: #BA2121 } /* Literal.String */\n",
|
||||
".output_html .na { color: #7D9029 } /* Name.Attribute */\n",
|
||||
".output_html .nb { color: #008000 } /* Name.Builtin */\n",
|
||||
".output_html .nc { color: #0000FF; font-weight: bold } /* Name.Class */\n",
|
||||
".output_html .no { color: #880000 } /* Name.Constant */\n",
|
||||
".output_html .nd { color: #AA22FF } /* Name.Decorator */\n",
|
||||
".output_html .ni { color: #999999; font-weight: bold } /* Name.Entity */\n",
|
||||
".output_html .ne { color: #D2413A; font-weight: bold } /* Name.Exception */\n",
|
||||
".output_html .nf { color: #0000FF } /* Name.Function */\n",
|
||||
".output_html .nl { color: #A0A000 } /* Name.Label */\n",
|
||||
".output_html .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */\n",
|
||||
".output_html .nt { color: #008000; font-weight: bold } /* Name.Tag */\n",
|
||||
".output_html .nv { color: #19177C } /* Name.Variable */\n",
|
||||
".output_html .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */\n",
|
||||
".output_html .w { color: #bbbbbb } /* Text.Whitespace */\n",
|
||||
".output_html .mb { color: #666666 } /* Literal.Number.Bin */\n",
|
||||
".output_html .mf { color: #666666 } /* Literal.Number.Float */\n",
|
||||
".output_html .mh { color: #666666 } /* Literal.Number.Hex */\n",
|
||||
".output_html .mi { color: #666666 } /* Literal.Number.Integer */\n",
|
||||
".output_html .mo { color: #666666 } /* Literal.Number.Oct */\n",
|
||||
".output_html .sa { color: #BA2121 } /* Literal.String.Affix */\n",
|
||||
".output_html .sb { color: #BA2121 } /* Literal.String.Backtick */\n",
|
||||
".output_html .sc { color: #BA2121 } /* Literal.String.Char */\n",
|
||||
".output_html .dl { color: #BA2121 } /* Literal.String.Delimiter */\n",
|
||||
".output_html .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */\n",
|
||||
".output_html .s2 { color: #BA2121 } /* Literal.String.Double */\n",
|
||||
".output_html .se { color: #BB6622; font-weight: bold } /* Literal.String.Escape */\n",
|
||||
".output_html .sh { color: #BA2121 } /* Literal.String.Heredoc */\n",
|
||||
".output_html .si { color: #BB6688; font-weight: bold } /* Literal.String.Interpol */\n",
|
||||
".output_html .sx { color: #008000 } /* Literal.String.Other */\n",
|
||||
".output_html .sr { color: #BB6688 } /* Literal.String.Regex */\n",
|
||||
".output_html .s1 { color: #BA2121 } /* Literal.String.Single */\n",
|
||||
".output_html .ss { color: #19177C } /* Literal.String.Symbol */\n",
|
||||
".output_html .bp { color: #008000 } /* Name.Builtin.Pseudo */\n",
|
||||
".output_html .fm { color: #0000FF } /* Name.Function.Magic */\n",
|
||||
".output_html .vc { color: #19177C } /* Name.Variable.Class */\n",
|
||||
".output_html .vg { color: #19177C } /* Name.Variable.Global */\n",
|
||||
".output_html .vi { color: #19177C } /* Name.Variable.Instance */\n",
|
||||
".output_html .vm { color: #19177C } /* Name.Variable.Magic */\n",
|
||||
".output_html .il { color: #666666 } /* Literal.Number.Integer.Long */</style><div class=\"highlight\"><pre><span></span><span class=\"p\">{</span>\n",
|
||||
" <span class=\"nt\">"@context"</span><span class=\"p\">:</span> <span class=\"s2\">"http://senpy.gsi.upm.es/api/contexts/YXBpL2V2YWx1YXRlLz9hbGdvPXNlbnRpbWVudC12YWRlciZkYXRhc2V0PXZhZGVyJTJDc3RzJm91dGZvcm1hdD1qc29uLWxkIw%3D%3D"</span><span class=\"p\">,</span>\n",
|
||||
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"AggregatedEvaluation"</span><span class=\"p\">,</span>\n",
|
||||
" <span class=\"nt\">"senpy:evaluations"</span><span class=\"p\">:</span> <span class=\"p\">[</span>\n",
|
||||
" <span class=\"p\">{</span>\n",
|
||||
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"Evaluation"</span><span class=\"p\">,</span>\n",
|
||||
" <span class=\"nt\">"evaluates"</span><span class=\"p\">:</span> <span class=\"s2\">"endpoint:plugins/sentiment-vader_0.1.1__vader"</span><span class=\"p\">,</span>\n",
|
||||
" <span class=\"nt\">"evaluatesOn"</span><span class=\"p\">:</span> <span class=\"s2\">"vader"</span><span class=\"p\">,</span>\n",
|
||||
" <span class=\"nt\">"metrics"</span><span class=\"p\">:</span> <span class=\"p\">[</span>\n",
|
||||
" <span class=\"p\">{</span>\n",
|
||||
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"Accuracy"</span><span class=\"p\">,</span>\n",
|
||||
" <span class=\"nt\">"value"</span><span class=\"p\">:</span> <span class=\"mf\">0.6907142857142857</span>\n",
|
||||
" <span class=\"p\">},</span>\n",
|
||||
" <span class=\"p\">{</span>\n",
|
||||
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"Precision_macro"</span><span class=\"p\">,</span>\n",
|
||||
" <span class=\"nt\">"value"</span><span class=\"p\">:</span> <span class=\"mf\">0.34535714285714286</span>\n",
|
||||
" <span class=\"p\">},</span>\n",
|
||||
" <span class=\"p\">{</span>\n",
|
||||
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"Recall_macro"</span><span class=\"p\">,</span>\n",
|
||||
" <span class=\"nt\">"value"</span><span class=\"p\">:</span> <span class=\"mf\">0.5</span>\n",
|
||||
" <span class=\"p\">},</span>\n",
|
||||
" <span class=\"p\">{</span>\n",
|
||||
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"F1_macro"</span><span class=\"p\">,</span>\n",
|
||||
" <span class=\"nt\">"value"</span><span class=\"p\">:</span> <span class=\"mf\">0.40853400929446554</span>\n",
|
||||
" <span class=\"p\">},</span>\n",
|
||||
" <span class=\"p\">{</span>\n",
|
||||
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"F1_weighted"</span><span class=\"p\">,</span>\n",
|
||||
" <span class=\"nt\">"value"</span><span class=\"p\">:</span> <span class=\"mf\">0.5643605528396403</span>\n",
|
||||
" <span class=\"p\">},</span>\n",
|
||||
" <span class=\"p\">{</span>\n",
|
||||
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"F1_micro"</span><span class=\"p\">,</span>\n",
|
||||
" <span class=\"nt\">"value"</span><span class=\"p\">:</span> <span class=\"mf\">0.6907142857142857</span>\n",
|
||||
" <span class=\"p\">},</span>\n",
|
||||
" <span class=\"p\">{</span>\n",
|
||||
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"F1_macro"</span><span class=\"p\">,</span>\n",
|
||||
" <span class=\"nt\">"value"</span><span class=\"p\">:</span> <span class=\"mf\">0.40853400929446554</span>\n",
|
||||
" <span class=\"p\">}</span>\n",
|
||||
" <span class=\"p\">]</span>\n",
|
||||
" <span class=\"p\">},</span>\n",
|
||||
" <span class=\"p\">{</span>\n",
|
||||
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"Evaluation"</span><span class=\"p\">,</span>\n",
|
||||
" <span class=\"nt\">"evaluates"</span><span class=\"p\">:</span> <span class=\"s2\">"endpoint:plugins/sentiment-vader_0.1.1__sts"</span><span class=\"p\">,</span>\n",
|
||||
" <span class=\"nt\">"evaluatesOn"</span><span class=\"p\">:</span> <span class=\"s2\">"sts"</span><span class=\"p\">,</span>\n",
|
||||
" <span class=\"nt\">"metrics"</span><span class=\"p\">:</span> <span class=\"p\">[</span>\n",
|
||||
" <span class=\"p\">{</span>\n",
|
||||
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"Accuracy"</span><span class=\"p\">,</span>\n",
|
||||
" <span class=\"nt\">"value"</span><span class=\"p\">:</span> <span class=\"mf\">0.3107177974434612</span>\n",
|
||||
" <span class=\"p\">},</span>\n",
|
||||
" <span class=\"p\">{</span>\n",
|
||||
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"Precision_macro"</span><span class=\"p\">,</span>\n",
|
||||
" <span class=\"nt\">"value"</span><span class=\"p\">:</span> <span class=\"mf\">0.1553588987217306</span>\n",
|
||||
" <span class=\"p\">},</span>\n",
|
||||
" <span class=\"p\">{</span>\n",
|
||||
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"Recall_macro"</span><span class=\"p\">,</span>\n",
|
||||
" <span class=\"nt\">"value"</span><span class=\"p\">:</span> <span class=\"mf\">0.5</span>\n",
|
||||
" <span class=\"p\">},</span>\n",
|
||||
" <span class=\"p\">{</span>\n",
|
||||
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"F1_macro"</span><span class=\"p\">,</span>\n",
|
||||
" <span class=\"nt\">"value"</span><span class=\"p\">:</span> <span class=\"mf\">0.23705926481620407</span>\n",
|
||||
" <span class=\"p\">},</span>\n",
|
||||
" <span class=\"p\">{</span>\n",
|
||||
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"F1_weighted"</span><span class=\"p\">,</span>\n",
|
||||
" <span class=\"nt\">"value"</span><span class=\"p\">:</span> <span class=\"mf\">0.14731706525451424</span>\n",
|
||||
" <span class=\"p\">},</span>\n",
|
||||
" <span class=\"p\">{</span>\n",
|
||||
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"F1_micro"</span><span class=\"p\">,</span>\n",
|
||||
" <span class=\"nt\">"value"</span><span class=\"p\">:</span> <span class=\"mf\">0.3107177974434612</span>\n",
|
||||
" <span class=\"p\">},</span>\n",
|
||||
" <span class=\"p\">{</span>\n",
|
||||
" <span class=\"nt\">"@type"</span><span class=\"p\">:</span> <span class=\"s2\">"F1_macro"</span><span class=\"p\">,</span>\n",
|
||||
" <span class=\"nt\">"value"</span><span class=\"p\">:</span> <span class=\"mf\">0.23705926481620407</span>\n",
|
||||
" <span class=\"p\">}</span>\n",
|
||||
" <span class=\"p\">]</span>\n",
|
||||
" <span class=\"p\">}</span>\n",
|
||||
" <span class=\"p\">]</span>\n",
|
||||
"<span class=\"p\">}</span>\n",
|
||||
"</pre></div>\n"
|
||||
],
|
||||
"text/latex": [
|
||||
"\\begin{Verbatim}[commandchars=\\\\\\{\\}]\n",
|
||||
"\\PY{p}{\\PYZob{}}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}@context\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}http://senpy.gsi.upm.es/api/contexts/YXBpL2V2YWx1YXRlLz9hbGdvPXNlbnRpbWVudC12YWRlciZkYXRhc2V0PXZhZGVyJTJDc3RzJm91dGZvcm1hdD1qc29uLWxkIw\\PYZpc{}3D\\PYZpc{}3D\\PYZdq{}}\\PY{p}{,}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}AggregatedEvaluation\\PYZdq{}}\\PY{p}{,}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}senpy:evaluations\\PYZdq{}}\\PY{p}{:} \\PY{p}{[}\n",
|
||||
" \\PY{p}{\\PYZob{}}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Evaluation\\PYZdq{}}\\PY{p}{,}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}evaluates\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}endpoint:plugins/sentiment\\PYZhy{}vader\\PYZus{}0.1.1\\PYZus{}\\PYZus{}vader\\PYZdq{}}\\PY{p}{,}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}evaluatesOn\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}vader\\PYZdq{}}\\PY{p}{,}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}metrics\\PYZdq{}}\\PY{p}{:} \\PY{p}{[}\n",
|
||||
" \\PY{p}{\\PYZob{}}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Accuracy\\PYZdq{}}\\PY{p}{,}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.6907142857142857}\n",
|
||||
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
|
||||
" \\PY{p}{\\PYZob{}}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Precision\\PYZus{}macro\\PYZdq{}}\\PY{p}{,}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.34535714285714286}\n",
|
||||
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
|
||||
" \\PY{p}{\\PYZob{}}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Recall\\PYZus{}macro\\PYZdq{}}\\PY{p}{,}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.5}\n",
|
||||
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
|
||||
" \\PY{p}{\\PYZob{}}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}F1\\PYZus{}macro\\PYZdq{}}\\PY{p}{,}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.40853400929446554}\n",
|
||||
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
|
||||
" \\PY{p}{\\PYZob{}}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}F1\\PYZus{}weighted\\PYZdq{}}\\PY{p}{,}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.5643605528396403}\n",
|
||||
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
|
||||
" \\PY{p}{\\PYZob{}}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}F1\\PYZus{}micro\\PYZdq{}}\\PY{p}{,}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.6907142857142857}\n",
|
||||
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
|
||||
" \\PY{p}{\\PYZob{}}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}F1\\PYZus{}macro\\PYZdq{}}\\PY{p}{,}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.40853400929446554}\n",
|
||||
" \\PY{p}{\\PYZcb{}}\n",
|
||||
" \\PY{p}{]}\n",
|
||||
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
|
||||
" \\PY{p}{\\PYZob{}}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Evaluation\\PYZdq{}}\\PY{p}{,}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}evaluates\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}endpoint:plugins/sentiment\\PYZhy{}vader\\PYZus{}0.1.1\\PYZus{}\\PYZus{}sts\\PYZdq{}}\\PY{p}{,}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}evaluatesOn\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}sts\\PYZdq{}}\\PY{p}{,}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}metrics\\PYZdq{}}\\PY{p}{:} \\PY{p}{[}\n",
|
||||
" \\PY{p}{\\PYZob{}}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Accuracy\\PYZdq{}}\\PY{p}{,}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.3107177974434612}\n",
|
||||
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
|
||||
" \\PY{p}{\\PYZob{}}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Precision\\PYZus{}macro\\PYZdq{}}\\PY{p}{,}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.1553588987217306}\n",
|
||||
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
|
||||
" \\PY{p}{\\PYZob{}}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}Recall\\PYZus{}macro\\PYZdq{}}\\PY{p}{,}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.5}\n",
|
||||
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
|
||||
" \\PY{p}{\\PYZob{}}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}F1\\PYZus{}macro\\PYZdq{}}\\PY{p}{,}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.23705926481620407}\n",
|
||||
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
|
||||
" \\PY{p}{\\PYZob{}}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}F1\\PYZus{}weighted\\PYZdq{}}\\PY{p}{,}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.14731706525451424}\n",
|
||||
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
|
||||
" \\PY{p}{\\PYZob{}}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}F1\\PYZus{}micro\\PYZdq{}}\\PY{p}{,}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.3107177974434612}\n",
|
||||
" \\PY{p}{\\PYZcb{}}\\PY{p}{,}\n",
|
||||
" \\PY{p}{\\PYZob{}}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}@type\\PYZdq{}}\\PY{p}{:} \\PY{l+s+s2}{\\PYZdq{}F1\\PYZus{}macro\\PYZdq{}}\\PY{p}{,}\n",
|
||||
" \\PY{n+nt}{\\PYZdq{}value\\PYZdq{}}\\PY{p}{:} \\PY{l+m+mf}{0.23705926481620407}\n",
|
||||
" \\PY{p}{\\PYZcb{}}\n",
|
||||
" \\PY{p}{]}\n",
|
||||
" \\PY{p}{\\PYZcb{}}\n",
|
||||
" \\PY{p}{]}\n",
|
||||
"\\PY{p}{\\PYZcb{}}\n",
|
||||
"\\end{Verbatim}\n"
|
||||
],
|
||||
"text/plain": [
|
||||
"{\n",
|
||||
" \"@context\": \"http://senpy.gsi.upm.es/api/contexts/YXBpL2V2YWx1YXRlLz9hbGdvPXNlbnRpbWVudC12YWRlciZkYXRhc2V0PXZhZGVyJTJDc3RzJm91dGZvcm1hdD1qc29uLWxkIw%3D%3D\",\n",
|
||||
" \"@type\": \"AggregatedEvaluation\",\n",
|
||||
" \"senpy:evaluations\": [\n",
|
||||
" {\n",
|
||||
" \"@type\": \"Evaluation\",\n",
|
||||
" \"evaluates\": \"endpoint:plugins/sentiment-vader_0.1.1__vader\",\n",
|
||||
" \"evaluatesOn\": \"vader\",\n",
|
||||
" \"metrics\": [\n",
|
||||
" {\n",
|
||||
" \"@type\": \"Accuracy\",\n",
|
||||
" \"value\": 0.6907142857142857\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"@type\": \"Precision_macro\",\n",
|
||||
" \"value\": 0.34535714285714286\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"@type\": \"Recall_macro\",\n",
|
||||
" \"value\": 0.5\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"@type\": \"F1_macro\",\n",
|
||||
" \"value\": 0.40853400929446554\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"@type\": \"F1_weighted\",\n",
|
||||
" \"value\": 0.5643605528396403\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"@type\": \"F1_micro\",\n",
|
||||
" \"value\": 0.6907142857142857\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"@type\": \"F1_macro\",\n",
|
||||
" \"value\": 0.40853400929446554\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"@type\": \"Evaluation\",\n",
|
||||
" \"evaluates\": \"endpoint:plugins/sentiment-vader_0.1.1__sts\",\n",
|
||||
" \"evaluatesOn\": \"sts\",\n",
|
||||
" \"metrics\": [\n",
|
||||
" {\n",
|
||||
" \"@type\": \"Accuracy\",\n",
|
||||
" \"value\": 0.3107177974434612\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"@type\": \"Precision_macro\",\n",
|
||||
" \"value\": 0.1553588987217306\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"@type\": \"Recall_macro\",\n",
|
||||
" \"value\": 0.5\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"@type\": \"F1_macro\",\n",
|
||||
" \"value\": 0.23705926481620407\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"@type\": \"F1_weighted\",\n",
|
||||
" \"value\": 0.14731706525451424\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"@type\": \"F1_micro\",\n",
|
||||
" \"value\": 0.3107177974434612\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"@type\": \"F1_macro\",\n",
|
||||
" \"value\": 0.23705926481620407\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import requests\n",
|
||||
"from IPython.display import Code\n",
|
||||
"\n",
|
||||
"endpoint = 'http://senpy.gsi.upm.es/api'\n",
|
||||
"res = requests.get(f'{endpoint}/evaluate',\n",
|
||||
" params={\"algo\": \"sentiment-vader\",\n",
|
||||
" \"dataset\": \"vader,sts\",\n",
|
||||
" 'outformat': 'json-ld'\n",
|
||||
" })\n",
|
||||
"Code(res.text, language='json')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Programmatically (expert)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"A third option is to evaluate plugins manually without launching the server.\n",
|
||||
"\n",
|
||||
"This option is particularly interesting for advanced users that want faster iterations and evaluation results, and for automation.\n",
|
||||
"\n",
|
||||
"We would first need an instance of a plugin.\n",
|
||||
"In this example we will use the Sentiment140 plugin that is included in every senpy installation:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from senpy.plugins.sentiment import sentiment140_plugin"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"s140 = sentiment140_plugin.Sentiment140()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Then, we need to know what datasets are available.\n",
|
||||
"We can list all datasets and basic stats (e.g., number of instances and labels used) like this:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 32,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"vader {'instances': 4200, 'labels': [1, -1]}\n",
|
||||
"sts {'instances': 4200, 'labels': [1, -1]}\n",
|
||||
"imdb_unsup {'instances': 50000, 'labels': [1, -1]}\n",
|
||||
"imdb {'instances': 50000, 'labels': [1, -1]}\n",
|
||||
"sst {'instances': 11855, 'labels': [1, -1]}\n",
|
||||
"multidomain {'instances': 38548, 'labels': [1, -1]}\n",
|
||||
"sentiment140 {'instances': 1600000, 'labels': [1, -1]}\n",
|
||||
"semeval07 {'instances': 'None', 'labels': [1, -1]}\n",
|
||||
"semeval14 {'instances': 7838, 'labels': [1, -1]}\n",
|
||||
"pl04 {'instances': 4000, 'labels': [1, -1]}\n",
|
||||
"pl05 {'instances': 10662, 'labels': [1, -1]}\n",
|
||||
"semeval13 {'instances': 6259, 'labels': [1, -1]}\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from senpy.gsitk_compat import datasets\n",
|
||||
"for k, d in datasets.items():\n",
|
||||
" print(k, d['stats'])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now, we will evaluate our plugin in one of the smallest datasets, `sts`:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 37,
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{\n",
|
||||
" \"@type\": \"Evaluation\",\n",
|
||||
" \"evaluates\": \"endpoint:plugins/sentiment140_0.2\",\n",
|
||||
" \"evaluatesOn\": \"sts\",\n",
|
||||
" \"metrics\": [\n",
|
||||
" {\n",
|
||||
" \"@type\": \"Accuracy\",\n",
|
||||
" \"value\": 0.872173058013766\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"@type\": \"Precision_macro\",\n",
|
||||
" \"value\": 0.9035254323131467\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"@type\": \"Recall_macro\",\n",
|
||||
" \"value\": 0.8021249029415483\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"@type\": \"F1_macro\",\n",
|
||||
" \"value\": 0.8320673712021136\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"@type\": \"F1_weighted\",\n",
|
||||
" \"value\": 0.8631351567604358\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"@type\": \"F1_micro\",\n",
|
||||
" \"value\": 0.872173058013766\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"@type\": \"F1_macro\",\n",
|
||||
" \"value\": 0.8320673712021136\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
" }]"
|
||||
]
|
||||
},
|
||||
"execution_count": 37,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"s140.evaluate(['sts', ])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"anaconda-cloud": {},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.3"
|
||||
},
|
||||
"toc": {
|
||||
"colors": {
|
||||
"hover_highlight": "#DAA520",
|
||||
"running_highlight": "#FF0000",
|
||||
"selected_highlight": "#FFD700"
|
||||
},
|
||||
"moveMenuLeft": true,
|
||||
"nav_menu": {
|
||||
"height": "68px",
|
||||
"width": "252px"
|
||||
},
|
||||
"navigate_menu": true,
|
||||
"number_sections": true,
|
||||
"sideBar": true,
|
||||
"threshold": 4,
|
||||
"toc_cell": false,
|
||||
"toc_section_display": "block",
|
||||
"toc_window_display": false
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 1
|
||||
}
|
@ -24,6 +24,7 @@ I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
||||
help:
|
||||
@echo "Please use \`make <target>' where <target> is one of"
|
||||
@echo " html to make standalone HTML files"
|
||||
@echo " entr to watch for changes and continuously make HTML files"
|
||||
@echo " dirhtml to make HTML files named index.html in directories"
|
||||
@echo " singlehtml to make a single large HTML file"
|
||||
@echo " pickle to make pickle files"
|
||||
@ -49,6 +50,9 @@ help:
|
||||
clean:
|
||||
rm -rf $(BUILDDIR)/*
|
||||
|
||||
entr:
|
||||
while true; do ag -g rst | entr -d make html; done
|
||||
|
||||
html:
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
@echo
|
||||
|
1717
docs/Quickstart.ipynb
Normal file
BIN
docs/_static/header.png
vendored
Normal file
After Width: | Height: | Size: 208 KiB |
252
docs/api.rst
@ -1,5 +1,5 @@
|
||||
NIF API
|
||||
=======
|
||||
-------
|
||||
.. http:get:: /api
|
||||
|
||||
Basic endpoint for sentiment/emotion analysis.
|
||||
@ -22,38 +22,32 @@ NIF API
|
||||
Content-Type: text/javascript
|
||||
|
||||
{
|
||||
"@context": [
|
||||
"http://127.0.0.1/static/context.jsonld",
|
||||
],
|
||||
"analysis": [
|
||||
{
|
||||
"@id": "SentimentAnalysisExample",
|
||||
"@type": "marl:SentimentAnalysis",
|
||||
"dc:language": "en",
|
||||
"marl:maxPolarityValue": 10.0,
|
||||
"marl:minPolarityValue": 0.0
|
||||
}
|
||||
],
|
||||
"domain": "wndomains:electronics",
|
||||
"entries": [
|
||||
{
|
||||
"opinions": [
|
||||
{
|
||||
"prov:generatedBy": "SentimentAnalysisExample",
|
||||
"marl:polarityValue": 7.8,
|
||||
"marl:hasPolarity": "marl:Positive",
|
||||
"marl:describesObject": "http://www.gsi.dit.upm.es",
|
||||
}
|
||||
],
|
||||
"nif:isString": "I love GSI",
|
||||
"strings": [
|
||||
{
|
||||
"nif:anchorOf": "GSI",
|
||||
"nif:taIdentRef": "http://www.gsi.dit.upm.es"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
"@context":"http://127.0.0.1/api/contexts/Results.jsonld",
|
||||
"@id":"_:Results_11241245.22",
|
||||
"@type":"results"
|
||||
"activities": [
|
||||
"plugins/sentiment-140_0.1"
|
||||
],
|
||||
"entries": [
|
||||
{
|
||||
"@id": "_:Entry_11241245.22"
|
||||
"@type":"entry",
|
||||
"emotions": [],
|
||||
"entities": [],
|
||||
"sentiments": [
|
||||
{
|
||||
"@id": "Sentiment0",
|
||||
"@type": "sentiment",
|
||||
"marl:hasPolarity": "marl:Negative",
|
||||
"marl:polarityValue": 0,
|
||||
"prefix": ""
|
||||
}
|
||||
],
|
||||
"suggestions": [],
|
||||
"text": "This text makes me sad.\nwhilst this text makes me happy and surprised at the same time.\nI cannot believe it!",
|
||||
"topics": []
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
:query i input: No default. Depends on informat and intype
|
||||
@ -79,7 +73,7 @@ NIF API
|
||||
.. http:get:: /api/plugins
|
||||
|
||||
Returns a list of installed plugins.
|
||||
**Example request**:
|
||||
**Example request and response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
@ -88,104 +82,126 @@ NIF API
|
||||
Accept: application/json, text/javascript
|
||||
|
||||
|
||||
**Example response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
{
|
||||
"@context": {
|
||||
...
|
||||
},
|
||||
"@type": "plugins",
|
||||
"plugins": [
|
||||
{
|
||||
"name": "sentiment140",
|
||||
"is_activated": true,
|
||||
"version": "0.1",
|
||||
"extra_params": {
|
||||
"@id": "extra_params_sentiment140_0.1",
|
||||
"language": {
|
||||
"required": false,
|
||||
"@id": "lang_sentiment140",
|
||||
"options": [
|
||||
"es",
|
||||
"en",
|
||||
"auto"
|
||||
],
|
||||
"aliases": [
|
||||
"language",
|
||||
"l"
|
||||
]
|
||||
}
|
||||
},
|
||||
"@id": "sentiment140_0.1"
|
||||
}, {
|
||||
"name": "rand",
|
||||
"is_activated": true,
|
||||
"version": "0.1",
|
||||
"extra_params": {
|
||||
"@id": "extra_params_rand_0.1",
|
||||
"language": {
|
||||
"required": false,
|
||||
"@id": "lang_rand",
|
||||
"options": [
|
||||
"es",
|
||||
"en",
|
||||
"auto"
|
||||
],
|
||||
"aliases": [
|
||||
"language",
|
||||
"l"
|
||||
]
|
||||
}
|
||||
},
|
||||
"@id": "rand_0.1"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
{
|
||||
"@id": "plugins/sentiment-140_0.1",
|
||||
"@type": "sentimentPlugin",
|
||||
"author": "@balkian",
|
||||
"description": "Sentiment classifier using rule-based classification for English and Spanish. This plugin uses sentiment140 data to perform classification. For more information: http://help.sentiment140.com/for-students/",
|
||||
"extra_params": {
|
||||
"language": {
|
||||
"@id": "lang_sentiment140",
|
||||
"aliases": [
|
||||
"language",
|
||||
"l"
|
||||
],
|
||||
"options": [
|
||||
"es",
|
||||
"en",
|
||||
"auto"
|
||||
],
|
||||
"required": false
|
||||
}
|
||||
},
|
||||
"is_activated": true,
|
||||
"maxPolarityValue": 1.0,
|
||||
"minPolarityValue": 0.0,
|
||||
"module": "sentiment-140",
|
||||
"name": "sentiment-140",
|
||||
"requirements": {},
|
||||
"version": "0.1"
|
||||
},
|
||||
{
|
||||
"@id": "plugins/ExamplePlugin_0.1",
|
||||
"@type": "sentimentPlugin",
|
||||
"author": "@balkian",
|
||||
"custom_attribute": "42",
|
||||
"description": "I am just an example",
|
||||
"extra_params": {
|
||||
"parameter": {
|
||||
"@id": "parameter",
|
||||
"aliases": [
|
||||
"parameter",
|
||||
"param"
|
||||
],
|
||||
"default": 42,
|
||||
"required": true
|
||||
}
|
||||
},
|
||||
"is_activated": true,
|
||||
"maxPolarityValue": 1.0,
|
||||
"minPolarityValue": 0.0,
|
||||
"module": "example",
|
||||
"name": "ExamplePlugin",
|
||||
"requirements": "noop",
|
||||
"version": "0.1"
|
||||
}
|
||||
|
||||
.. http:get:: /api/plugins/<pluginname>
|
||||
|
||||
Returns the information of a specific plugin.
|
||||
**Example request**:
|
||||
**Example request and response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
GET /api/plugins/rand/ HTTP/1.1
|
||||
GET /api/plugins/sentiment-random/ HTTP/1.1
|
||||
Host: localhost
|
||||
Accept: application/json, text/javascript
|
||||
|
||||
|
||||
**Example response**:
|
||||
|
||||
.. sourcecode:: http
|
||||
|
||||
{
|
||||
"@id": "rand_0.1",
|
||||
"@type": "sentimentPlugin",
|
||||
"extra_params": {
|
||||
"@id": "extra_params_rand_0.1",
|
||||
"language": {
|
||||
"@id": "lang_rand",
|
||||
"aliases": [
|
||||
"language",
|
||||
"l"
|
||||
],
|
||||
"options": [
|
||||
"es",
|
||||
"en",
|
||||
"auto"
|
||||
],
|
||||
"required": false
|
||||
}
|
||||
},
|
||||
"is_activated": true,
|
||||
"name": "rand",
|
||||
"version": "0.1"
|
||||
"@context": "http://127.0.0.1/api/contexts/ExamplePlugin.jsonld",
|
||||
"@id": "plugins/ExamplePlugin_0.1",
|
||||
"@type": "sentimentPlugin",
|
||||
"author": "@balkian",
|
||||
"custom_attribute": "42",
|
||||
"description": "I am just an example",
|
||||
"extra_params": {
|
||||
"parameter": {
|
||||
"@id": "parameter",
|
||||
"aliases": [
|
||||
"parameter",
|
||||
"param"
|
||||
],
|
||||
"default": 42,
|
||||
"required": true
|
||||
}
|
||||
},
|
||||
"is_activated": true,
|
||||
"maxPolarityValue": 1.0,
|
||||
"minPolarityValue": 0.0,
|
||||
"module": "example",
|
||||
"name": "ExamplePlugin",
|
||||
"requirements": "noop",
|
||||
"version": "0.1"
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
.. http:get:: /api/plugins/default
|
||||
|
||||
Return the information about the default plugin.
|
||||
|
8
docs/apischema.rst
Normal file
@ -0,0 +1,8 @@
|
||||
API and vocabularies
|
||||
####################
|
||||
|
||||
.. toctree::
|
||||
|
||||
vocabularies.rst
|
||||
api.rst
|
||||
examples.rst
|
@ -1,4 +1,4 @@
|
||||
{
|
||||
"plugins": [
|
||||
]
|
||||
"@type": "plugins",
|
||||
"plugins": {}
|
||||
}
|
||||
|
77
docs/bad-examples/results/example-analysis-as-id-FAIL.json
Normal file
@ -0,0 +1,77 @@
|
||||
{
|
||||
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
||||
"@id": "me:Result1",
|
||||
"@type": "results",
|
||||
"activities": [
|
||||
"me:SAnalysis1",
|
||||
"me:SgAnalysis1",
|
||||
"me:EmotionAnalysis1",
|
||||
"me:NER1",
|
||||
{
|
||||
"description": "missing @id and @type"
|
||||
}
|
||||
],
|
||||
"entries": [
|
||||
{
|
||||
"@id": "http://micro.blog/status1",
|
||||
"@type": [
|
||||
"nif:RFC5147String",
|
||||
"nif:Context"
|
||||
],
|
||||
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
||||
"entities": [
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=5,13",
|
||||
"nif:beginIndex": 5,
|
||||
"nif:endIndex": 13,
|
||||
"nif:anchorOf": "Microsoft",
|
||||
"me:references": "http://dbpedia.org/page/Microsoft",
|
||||
"prov:wasGeneratedBy": "me:NER1"
|
||||
},
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=25,37",
|
||||
"nif:beginIndex": 25,
|
||||
"nif:endIndex": 37,
|
||||
"nif:anchorOf": "Windows Phone",
|
||||
"me:references": "http://dbpedia.org/page/Windows_Phone",
|
||||
"prov:wasGeneratedBy": "me:NER1"
|
||||
}
|
||||
],
|
||||
"suggestions": [
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=16,77",
|
||||
"nif:beginIndex": 16,
|
||||
"nif:endIndex": 77,
|
||||
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
|
||||
"prov:wasGeneratedBy": "me:SgAnalysis1"
|
||||
}
|
||||
],
|
||||
"sentiments": [
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=80,97",
|
||||
"nif:beginIndex": 80,
|
||||
"nif:endIndex": 97,
|
||||
"nif:anchorOf": "You'll be awesome.",
|
||||
"marl:hasPolarity": "marl:Positive",
|
||||
"marl:polarityValue": 0.9,
|
||||
"prov:wasGeneratedBy": "me:SAnalysis1"
|
||||
}
|
||||
],
|
||||
"emotions": [
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=0,109",
|
||||
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
||||
"prov:wasGeneratedBy": "me:EAnalysis1",
|
||||
"onyx:hasEmotion": [
|
||||
{
|
||||
"onyx:hasEmotionCategory": "wna:liking"
|
||||
},
|
||||
{
|
||||
"onyx:hasEmotionCategory": "wna:excitement"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
@ -2,17 +2,13 @@
|
||||
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
||||
"@id": "http://example.com#NIFExample",
|
||||
"@type": "results",
|
||||
"analysis": [
|
||||
"activities": [
|
||||
],
|
||||
"entries": [
|
||||
{
|
||||
"@type": [
|
||||
"nif:RFC5147String",
|
||||
"nif:Context"
|
||||
],
|
||||
"nif:beginIndex": 0,
|
||||
"nif:endIndex": 40,
|
||||
"nif:isString": "My favourite actress is Natalie Portman"
|
||||
"text": "An entry should have a nif:isString key"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
56
docs/conf.py
@ -37,6 +37,9 @@ extensions = [
|
||||
'sphinx.ext.todo',
|
||||
'sphinxcontrib.httpdomain',
|
||||
'sphinx.ext.coverage',
|
||||
'sphinx.ext.autosectionlabel',
|
||||
'nbsphinx',
|
||||
'sphinx.ext.mathjax',
|
||||
]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
@ -53,21 +56,22 @@ master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'Senpy'
|
||||
copyright = u'2016, J. Fernando Sánchez'
|
||||
copyright = u'2019, J. Fernando Sánchez'
|
||||
description = u'A framework for sentiment and emotion analysis services'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
with open('../senpy/VERSION') as f:
|
||||
version = f.read().strip()
|
||||
# with open('../senpy/VERSION') as f:
|
||||
# version = f.read().strip()
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = version
|
||||
# release = version
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#language = None
|
||||
language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
@ -77,7 +81,9 @@ release = version
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = ['_build']
|
||||
exclude_patterns = ['_build', '**.ipynb_checkpoints']
|
||||
|
||||
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all
|
||||
# documents.
|
||||
@ -104,14 +110,14 @@ pygments_style = 'sphinx'
|
||||
#keep_warnings = False
|
||||
|
||||
|
||||
html_theme = 'alabaster'
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
if not on_rtd: # only import and set the theme if we're building docs locally
|
||||
import sphinx_rtd_theme
|
||||
html_theme = 'sphinx_rtd_theme'
|
||||
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
||||
# if not on_rtd: # only import and set the theme if we're building docs locally
|
||||
# import sphinx_rtd_theme
|
||||
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
||||
|
||||
else:
|
||||
html_theme = 'default'
|
||||
# else:
|
||||
# html_theme = 'default'
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
@ -119,7 +125,14 @@ else:
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#html_theme_options = {}
|
||||
html_theme_options = {
|
||||
'logo': 'header.png',
|
||||
'github_user': 'gsi-upm',
|
||||
'github_repo': 'senpy',
|
||||
'github_banner': True,
|
||||
'sidebar_collapse': True,
|
||||
}
|
||||
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
#html_theme_path = []
|
||||
@ -159,7 +172,13 @@ html_static_path = ['_static']
|
||||
#html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
#html_sidebars = {}
|
||||
html_sidebars = {
|
||||
'**': [
|
||||
'about.html',
|
||||
'navigation.html',
|
||||
'searchbox.html',
|
||||
]
|
||||
}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
@ -272,3 +291,12 @@ texinfo_documents = [
|
||||
|
||||
# If true, do not generate a @detailmenu in the "Top" node's menu.
|
||||
#texinfo_no_detailmenu = False
|
||||
|
||||
nbsphinx_prolog = """
|
||||
.. note:: This is an `auto-generated <https://nbsphinx.readthedocs.io>`_ static view of a Jupyter notebook.
|
||||
|
||||
To run the code examples in your computer, you may download the original notebook from the repository: https://github.com/gsi-upm/senpy/tree/master/docs/{{ env.doc2path(env.docname, base=None) }}
|
||||
|
||||
|
||||
----
|
||||
"""
|
||||
|
@ -1,93 +1,152 @@
|
||||
Introduction
|
||||
------------
|
||||
Automatic Model Conversion
|
||||
--------------------------
|
||||
|
||||
Senpy includes experimental support for emotion/sentiment conversion plugins.
|
||||
Senpy includes support for emotion and sentiment conversion.
|
||||
When a user requests a specific model, senpy will choose a strategy to convert the model that the service usually outputs and the model requested by the user.
|
||||
|
||||
Out of the box, senpy can convert from the `emotionml:pad` (pleasure-arousal-dominance) dimensional model to `emoml:big6` (Ekman's big-6) categories, and vice versa.
|
||||
This specific conversion uses a series of dimensional centroids (`emotionml:pad`) for each emotion category (`emotionml:big6`).
|
||||
A dimensional value is converted to a category by looking for the nearest centroid.
|
||||
The centroids are calculated according to this article:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
Kim, S. M., Valitutti, A., & Calvo, R. A. (2010, June).
|
||||
Evaluation of unsupervised emotion models to textual affect recognition.
|
||||
In Proceedings of the NAACL HLT 2010 Workshop on Computational Approaches to Analysis and Generation of Emotion in Text (pp. 62-70).
|
||||
Association for Computational Linguistics.
|
||||
|
||||
|
||||
|
||||
It is possible to add new conversion strategies by `Developing a conversion plugin`_.
|
||||
|
||||
|
||||
Use
|
||||
---
|
||||
===
|
||||
|
||||
Consider the original query: `http://127.0.0.1:5000/api/?i=hello&algo=emoRand`_
|
||||
Consider the following query to an emotion service: http://senpy.gsi.upm.es/api/emotion-anew?i=good
|
||||
|
||||
The requested plugin (emoRand) returns emotions using Ekman's model (or big6 in EmotionML):
|
||||
The requested plugin (emotion-random) returns emotions using the VAD space (FSRE dimensions in EmotionML):
|
||||
|
||||
.. code:: json
|
||||
|
||||
|
||||
... rest of the document ...
|
||||
{
|
||||
"@type": "emotionSet",
|
||||
"onyx:hasEmotion": {
|
||||
"@type": "emotion",
|
||||
"onyx:hasEmotionCategory": "emoml:big6anger"
|
||||
},
|
||||
"prov:wasGeneratedBy": "plugins/emoRand_0.1"
|
||||
}
|
||||
[
|
||||
{
|
||||
"@type": "EmotionSet",
|
||||
"onyx:hasEmotion": [
|
||||
{
|
||||
"@type": "Emotion",
|
||||
"emoml:pad-dimensions_arousal": 5.43,
|
||||
"emoml:pad-dimensions_dominance": 6.41,
|
||||
"emoml:pad-dimensions_pleasure": 7.47,
|
||||
"prov:wasGeneratedBy": "prefix:Analysis_1562744784.8789825"
|
||||
}
|
||||
],
|
||||
"prov:wasGeneratedBy": "prefix:Analysis_1562744784.8789825"
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
|
||||
|
||||
To get these emotions in VAD space (FSRE dimensions in EmotionML), we'd do this:
|
||||
To get the equivalent of these emotions in Ekman's categories (i.e., Ekman's Big 6 in EmotionML), we'd do this:
|
||||
|
||||
`http://127.0.0.1:5000/api/?i=hello&algo=emoRand&emotionModel=emoml:fsre-dimensions`_
|
||||
http://senpy.gsi.upm.es/api/emotion-anew?i=good&emotion-model=emoml:big6
|
||||
|
||||
This call, provided there is a valid conversion plugin from Ekman's to VAD, would return something like this:
|
||||
|
||||
.. code:: json
|
||||
|
||||
|
||||
... rest of the document ...
|
||||
[
|
||||
{
|
||||
"@type": "EmotionSet",
|
||||
"onyx:hasEmotion": [
|
||||
{
|
||||
"@type": "emotionSet",
|
||||
"onyx:hasEmotion": {
|
||||
"@type": "emotion",
|
||||
"onyx:hasEmotionCategory": "emoml:big6anger"
|
||||
},
|
||||
"prov:wasGeneratedBy": "plugins/emoRand_0.1"
|
||||
}, {
|
||||
"@type": "emotionSet",
|
||||
"onyx:hasEmotion": {
|
||||
"@type": "emotion",
|
||||
"A": 7.22,
|
||||
"D": 6.28,
|
||||
"V": 8.6
|
||||
},
|
||||
"prov:wasGeneratedBy": "plugins/Ekman2VAD_0.1"
|
||||
|
||||
"@type": "Emotion",
|
||||
"onyx:algorithmConfidence": 4.4979,
|
||||
"onyx:hasEmotionCategory": "emoml:big6happiness"
|
||||
}
|
||||
],
|
||||
"prov:wasDerivedFrom": {
|
||||
"@id": "Emotions0",
|
||||
"@type": "EmotionSet",
|
||||
"onyx:hasEmotion": [
|
||||
{
|
||||
"@id": "Emotion0",
|
||||
"@type": "Emotion",
|
||||
"emoml:pad-dimensions_arousal": 5.43,
|
||||
"emoml:pad-dimensions_dominance": 6.41,
|
||||
"emoml:pad-dimensions_pleasure": 7.47,
|
||||
"prov:wasGeneratedBy": "prefix:Analysis_1562745220.1553965"
|
||||
}
|
||||
],
|
||||
"prov:wasGeneratedBy": "prefix:Analysis_1562745220.1553965"
|
||||
},
|
||||
"prov:wasGeneratedBy": "prefix:Analysis_1562745220.1570725"
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
That is called a *full* response, as it simply adds the converted emotion alongside.
|
||||
It is also possible to get the original emotion nested within the new converted emotion, using the `conversion=nested` parameter:
|
||||
|
||||
http://senpy.gsi.upm.es/api/emotion-anew?i=good&emotion-model=emoml:big6&conversion=nested
|
||||
|
||||
.. code:: json
|
||||
|
||||
[
|
||||
{
|
||||
"@type": "EmotionSet",
|
||||
"onyx:hasEmotion": [
|
||||
{
|
||||
"@type": "Emotion",
|
||||
"onyx:algorithmConfidence": 4.4979,
|
||||
"onyx:hasEmotionCategory": "emoml:big6happiness"
|
||||
}
|
||||
],
|
||||
"prov:wasDerivedFrom": {
|
||||
"@id": "Emotions0",
|
||||
"@type": "EmotionSet",
|
||||
"onyx:hasEmotion": [
|
||||
{
|
||||
"@id": "Emotion0",
|
||||
"@type": "Emotion",
|
||||
"emoml:pad-dimensions_arousal": 5.43,
|
||||
"emoml:pad-dimensions_dominance": 6.41,
|
||||
"emoml:pad-dimensions_pleasure": 7.47,
|
||||
"prov:wasGeneratedBy": "prefix:Analysis_1562744962.896306"
|
||||
}
|
||||
],
|
||||
"prov:wasGeneratedBy": "prefix:Analysis_1562744962.896306"
|
||||
},
|
||||
"prov:wasGeneratedBy": "prefix:Analysis_1562744962.8978968"
|
||||
}
|
||||
]
|
||||
|
||||
... rest of the document ...
|
||||
{
|
||||
"@type": "emotionSet",
|
||||
"onyx:hasEmotion": {
|
||||
"@type": "emotion",
|
||||
"onyx:hasEmotionCategory": "emoml:big6anger"
|
||||
},
|
||||
"prov:wasGeneratedBy": "plugins/emoRand_0.1"
|
||||
"onyx:wasDerivedFrom": {
|
||||
"@type": "emotionSet",
|
||||
"onyx:hasEmotion": {
|
||||
"@type": "emotion",
|
||||
"A": 7.22,
|
||||
"D": 6.28,
|
||||
"V": 8.6
|
||||
},
|
||||
"prov:wasGeneratedBy": "plugins/Ekman2VAD_0.1"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
Lastly, `conversion=filtered` would only return the converted emotions.
|
||||
|
||||
|
||||
.. code:: json
|
||||
|
||||
[
|
||||
{
|
||||
"@type": "EmotionSet",
|
||||
"onyx:hasEmotion": [
|
||||
{
|
||||
"@type": "Emotion",
|
||||
"onyx:algorithmConfidence": 4.4979,
|
||||
"onyx:hasEmotionCategory": "emoml:big6happiness"
|
||||
}
|
||||
],
|
||||
"prov:wasGeneratedBy": "prefix:Analysis_1562744925.7322266"
|
||||
}
|
||||
]
|
||||
|
||||
Developing a conversion plugin
|
||||
------------------------------
|
||||
==============================
|
||||
|
||||
Conversion plugins are discovered by the server just like any other plugin.
|
||||
The difference is the slightly different API, and the need to specify the `source` and `target` of the conversion.
|
||||
@ -106,7 +165,6 @@ For instance, an emotion conversion plugin needs the following:
|
||||
|
||||
|
||||
|
||||
|
||||
.. code:: python
|
||||
|
||||
|
||||
@ -114,3 +172,6 @@ For instance, an emotion conversion plugin needs the following:
|
||||
|
||||
def convert(self, emotionSet, fromModel, toModel, params):
|
||||
pass
|
||||
|
||||
|
||||
More implementation details are shown in the `centroids plugin <https://github.com/gsi-upm/senpy/blob/master/senpy/plugins/postprocessing/emotion/centroids.py>`_.
|
||||
|
@ -1,75 +1,13 @@
|
||||
Demo
|
||||
----
|
||||
|
||||
There is a demo available on http://senpy.demos.gsi.dit.upm.es/, where you can a serie of different plugins. You can use them in the playground or make a directly requests to the service.
|
||||
There is a demo available on http://senpy.gsi.upm.es/, where you can test a live instance of Senpy, with several open source plugins.
|
||||
You can use the playground (a web interface) or the HTTP API.
|
||||
|
||||
.. image:: senpy-playground.png
|
||||
:height: 400px
|
||||
.. image:: playground-0.20.png
|
||||
:target: http://senpy.gsi.upm.es
|
||||
:width: 800px
|
||||
:scale: 100 %
|
||||
:align: center
|
||||
|
||||
Plugins Demo
|
||||
============
|
||||
|
||||
The next plugins are available at the demo:
|
||||
|
||||
* emoTextAnew extracts the VAD (valence-arousal-dominance) of a sentence by matching words from the ANEW dictionary.
|
||||
* emoTextWordnetAffect based on the hierarchy of WordnetAffect to calculate the emotion of the sentence.
|
||||
* vaderSentiment utilizes the software from vaderSentiment to calculate the sentiment of a sentence.
|
||||
* sentiText is a software developed during the TASS 2015 competition, it has been adapted for English and Spanish.
|
||||
|
||||
emoTextANEW plugin
|
||||
******************
|
||||
|
||||
This plugin is going to used the ANEW lexicon dictionary to calculate de VAD (valence-arousal-dominance) of the sentence and the determinate which emotion is closer to this value.
|
||||
|
||||
Each emotion has a centroid, which it has been approximated using the formula described in this article:
|
||||
|
||||
http://www.aclweb.org/anthology/W10-0208
|
||||
|
||||
The plugin is going to look for the words in the sentence that appear in the ANEW dictionary and calculate the average VAD score for the sentence. Once this score is calculated, it is going to seek the emotion that is closest to this value.
|
||||
|
||||
emoTextWAF plugin
|
||||
*****************
|
||||
|
||||
This plugin uses WordNet-Affect (http://wndomains.fbk.eu/wnaffect.html) to calculate the percentage of each emotion. The emotions that are going to be used are: anger, fear, disgust, joy and sadness. It is has been used a emotion mapping enlarge the emotions:
|
||||
|
||||
* anger : general-dislike
|
||||
* fear : negative-fear
|
||||
* disgust : shame
|
||||
* joy : gratitude, affective, enthusiasm, love, joy, liking
|
||||
* sadness : ingrattitude, daze, humlity, compassion, despair, anxiety, sadness
|
||||
|
||||
sentiText plugin
|
||||
****************
|
||||
|
||||
This plugin is based in the classifier developed for the TASS 2015 competition. It has been developed for Spanish and English. The different phases that has this plugin when it is activated:
|
||||
|
||||
* Train both classifiers (English and Spanish).
|
||||
* Initialize resources (dictionaries,stopwords,etc.).
|
||||
* Extract bag of words,lemmas and chars.
|
||||
|
||||
Once the plugin is activated, the features that are going to be extracted for the classifiers are:
|
||||
|
||||
* Matches with the bag of words extracted from the train corpus.
|
||||
* Sentiment score of the sentences extracted from the dictionaries (lexicons and emoticons).
|
||||
* Identify negations and intensifiers in the sentences.
|
||||
* Complementary features such as exclamation and interrogation marks, eloganted and caps words, hashtags, etc.
|
||||
|
||||
The plugin has a preprocessor, which is focues on Twitter corpora, that is going to be used for cleaning the text to simplify the feature extraction.
|
||||
|
||||
There is more information avaliable in the next article.
|
||||
|
||||
Aspect based Sentiment Analysis of Spanish Tweets, Oscar Araque and Ignacio Corcuera-Platas and Constantino Román-Gómez and Carlos A. Iglesias and J. Fernando Sánchez-Rada. http://gsi.dit.upm.es/es/investigacion/publicaciones?view=publication&task=show&id=37
|
||||
|
||||
vaderSentiment plugin
|
||||
*********************
|
||||
|
||||
For developing this plugin, it has been used the module vaderSentiment, which is described in the paper: VADER: A Parsimonious Rule-based Model for Sentiment Analysis of Social Media Text C.J. Hutto and Eric Gilbert Eighth International Conference on Weblogs and Social Media (ICWSM-14). Ann Arbor, MI, June 2014.
|
||||
|
||||
If you use this plugin in your research, please cite the above paper
|
||||
|
||||
For more information about the functionality, check the official repository
|
||||
|
||||
https://github.com/cjhutto/vaderSentiment
|
||||
The source code and description of the plugins used in the demo are available here: https://github.com/gsi-upm/senpy-plugins-community/.
|
||||
|
25
docs/development.rst
Normal file
@ -0,0 +1,25 @@
|
||||
Developing new services
|
||||
-----------------------
|
||||
|
||||
Developing web services can be hard.
|
||||
A text analysis service must implement all the typical features, such as: extraction of parameters, validation, format conversion, visualization...
|
||||
|
||||
Senpy implements all the common blocks, so developers can focus on what really matters: great analysis algorithms that solve real problems.
|
||||
Among other things, Senpy takes care of these tasks:
|
||||
|
||||
* Interfacing with the user: parameter validation, error handling.
|
||||
* Formatting: JSON-LD, Turtle/n-triples input and output, or simple text input
|
||||
* Linked Data: senpy results are semantically annotated, using a series of well established vocabularies, and sane default URIs.
|
||||
* User interface: a web UI where users can explore your service and test different settings
|
||||
* A client to interact with the service. Currently only available in Python.
|
||||
|
||||
You only need to provide the algorithm to turn a piece of text into an annotation
|
||||
Sharing your sentiment analysis with the world has never been easier!
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
server-cli
|
||||
plugins-quickstart
|
||||
plugins-faq
|
||||
plugins-definition
|
BIN
docs/eval_table.png
Normal file
After Width: | Height: | Size: 77 KiB |
BIN
docs/evaluation-results.png
Normal file
After Width: | Height: | Size: 76 KiB |
45
docs/examples.rst
Normal file
@ -0,0 +1,45 @@
|
||||
Examples
|
||||
--------
|
||||
|
||||
All the examples in this page use the :download:`the main schema <_static/schemas/definitions.json>`.
|
||||
|
||||
Simple NIF annotation
|
||||
.....................
|
||||
Description
|
||||
,,,,,,,,,,,
|
||||
This example covers the basic example in the NIF documentation: `<http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core/nif-core.html>`_.
|
||||
|
||||
Representation
|
||||
,,,,,,,,,,,,,,
|
||||
.. literalinclude:: examples/results/example-basic.json
|
||||
:language: json-ld
|
||||
|
||||
Sentiment Analysis
|
||||
.....................
|
||||
Description
|
||||
,,,,,,,,,,,
|
||||
|
||||
This annotation corresponds to the sentiment analysis of an input. The example shows the sentiment represented according to Marl format.
|
||||
The sentiments detected are contained in the Sentiments array with their related part of the text.
|
||||
|
||||
Representation
|
||||
,,,,,,,,,,,,,,
|
||||
|
||||
.. literalinclude:: examples/results/example-sentiment.json
|
||||
:emphasize-lines: 5-11,20-30
|
||||
:language: json-ld
|
||||
|
||||
Emotion Analysis
|
||||
................
|
||||
Description
|
||||
,,,,,,,,,,,
|
||||
This annotation represents the emotion analysis of an input to Senpy. The emotions are contained in the emotions section with the text that refers to following Onyx format and the emotion model defined beforehand.
|
||||
|
||||
Representation
|
||||
,,,,,,,,,,,,,,
|
||||
|
||||
.. literalinclude:: examples/results/example-emotion.json
|
||||
:language: json-ld
|
||||
:emphasize-lines: 5-11,22-36
|
||||
|
||||
|
85
docs/examples/results/example-analysis-as-id.json
Normal file
@ -0,0 +1,85 @@
|
||||
{
|
||||
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
||||
"@id": "me:Result1",
|
||||
"@type": "results",
|
||||
"activities": [
|
||||
{
|
||||
"@id": "_:SAnalysis1_Activity",
|
||||
"@type": "marl:SentimentAnalysis",
|
||||
"prov:wasAssociatedWith": "me:SAnalysis1"
|
||||
},
|
||||
{
|
||||
"@id": "_:EmotionAnalysis1_Activity",
|
||||
"@type": "onyx:EmotionAnalysis",
|
||||
"prov:wasAssociatedWith": "me:EmotionAnalysis1"
|
||||
},
|
||||
{
|
||||
"@id": "_:NER1_Activity",
|
||||
"@type": "me:NER",
|
||||
"prov:wasAssociatedWith": "me:NER1"
|
||||
}
|
||||
],
|
||||
"entries": [
|
||||
{
|
||||
"@id": "http://micro.blog/status1",
|
||||
"@type": [
|
||||
"nif:RFC5147String",
|
||||
"nif:Context"
|
||||
],
|
||||
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
||||
"entities": [
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=5,13",
|
||||
"nif:beginIndex": 5,
|
||||
"nif:endIndex": 13,
|
||||
"nif:anchorOf": "Microsoft",
|
||||
"me:references": "http://dbpedia.org/page/Microsoft",
|
||||
"prov:wasGeneratedBy": "_:NER1_Activity"
|
||||
},
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=25,37",
|
||||
"nif:beginIndex": 25,
|
||||
"nif:endIndex": 37,
|
||||
"nif:anchorOf": "Windows Phone",
|
||||
"me:references": "http://dbpedia.org/page/Windows_Phone",
|
||||
"prov:wasGeneratedBy": "_:NER1_Activity"
|
||||
}
|
||||
],
|
||||
"suggestions": [
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=16,77",
|
||||
"nif:beginIndex": 16,
|
||||
"nif:endIndex": 77,
|
||||
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
|
||||
"prov:wasGeneratedBy": "_:SgAnalysis1_Activity"
|
||||
}
|
||||
],
|
||||
"sentiments": [
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=80,97",
|
||||
"nif:beginIndex": 80,
|
||||
"nif:endIndex": 97,
|
||||
"nif:anchorOf": "You'll be awesome.",
|
||||
"marl:hasPolarity": "marl:Positive",
|
||||
"marl:polarityValue": 0.9,
|
||||
"prov:wasGeneratedBy": "_:SgAnalysis1_Activity"
|
||||
}
|
||||
],
|
||||
"emotions": [
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=0,109",
|
||||
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
||||
"prov:wasGeneratedBy": "_:EmotionAnalysis1_Activity",
|
||||
"onyx:hasEmotion": [
|
||||
{
|
||||
"onyx:hasEmotionCategory": "wna:liking"
|
||||
},
|
||||
{
|
||||
"onyx:hasEmotionCategory": "wna:excitement"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
@ -1,19 +1,18 @@
|
||||
{
|
||||
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
||||
"@id": "http://example.com#NIFExample",
|
||||
"@type": "results",
|
||||
"analysis": [
|
||||
],
|
||||
"entries": [
|
||||
{
|
||||
"@id": "http://example.org#char=0,40",
|
||||
"@type": [
|
||||
"nif:RFC5147String",
|
||||
"nif:Context"
|
||||
],
|
||||
"nif:beginIndex": 0,
|
||||
"nif:endIndex": 40,
|
||||
"nif:isString": "My favourite actress is Natalie Portman"
|
||||
}
|
||||
]
|
||||
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
||||
"@id": "me:Result1",
|
||||
"@type": "results",
|
||||
"activities": [ ],
|
||||
"entries": [
|
||||
{
|
||||
"@id": "http://example.org#char=0,40",
|
||||
"@type": [
|
||||
"nif:RFC5147String",
|
||||
"nif:Context"
|
||||
],
|
||||
"nif:beginIndex": 0,
|
||||
"nif:endIndex": 40,
|
||||
"nif:isString": "My favourite actress is Natalie Portman"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
@ -1,88 +1,100 @@
|
||||
{
|
||||
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
||||
"@id": "me:Result1",
|
||||
"@type": "results",
|
||||
"analysis": [
|
||||
{
|
||||
"@id": "me:SAnalysis1",
|
||||
"@type": "marl:SentimentAnalysis",
|
||||
"marl:maxPolarityValue": 1,
|
||||
"marl:minPolarityValue": 0
|
||||
},
|
||||
{
|
||||
"@id": "me:SgAnalysis1",
|
||||
"@type": "me:SuggestionAnalysis"
|
||||
},
|
||||
{
|
||||
"@id": "me:EmotionAnalysis1",
|
||||
"@type": "me:EmotionAnalysis"
|
||||
},
|
||||
{
|
||||
"@id": "me:NER1",
|
||||
"@type": "me:NER"
|
||||
}
|
||||
],
|
||||
"entries": [
|
||||
{
|
||||
"@id": "http://micro.blog/status1",
|
||||
"@type": [
|
||||
"nif:RFC5147String",
|
||||
"nif:Context"
|
||||
],
|
||||
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
||||
"entities": [
|
||||
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
||||
"@id": "me:Result1",
|
||||
"@type": "results",
|
||||
"activities": [
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=5,13",
|
||||
"nif:beginIndex": 5,
|
||||
"nif:endIndex": 13,
|
||||
"nif:anchorOf": "Microsoft",
|
||||
"me:references": "http://dbpedia.org/page/Microsoft",
|
||||
"prov:wasGeneratedBy": "me:NER1"
|
||||
"@id": "_:SAnalysis1_Activity",
|
||||
"@type": "marl:SentimentAnalysis",
|
||||
"prov:wasAssociatedWith": "me:SentimentAnalysis",
|
||||
"prov:used": [
|
||||
{
|
||||
"name": "marl:maxPolarityValue",
|
||||
"prov:value": "1"
|
||||
},
|
||||
{
|
||||
"name": "marl:minPolarityValue",
|
||||
"prov:value": "0"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=25,37",
|
||||
"nif:beginIndex": 25,
|
||||
"nif:endIndex": 37,
|
||||
"nif:anchorOf": "Windows Phone",
|
||||
"me:references": "http://dbpedia.org/page/Windows_Phone",
|
||||
"prov:wasGeneratedBy": "me:NER1"
|
||||
}
|
||||
],
|
||||
"suggestions": [
|
||||
"@id": "_:SgAnalysis1_Activity",
|
||||
"prov:wasAssociatedWith": "me:SgAnalysis1",
|
||||
"@type": "me:SuggestionAnalysis"
|
||||
},
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=16,77",
|
||||
"nif:beginIndex": 16,
|
||||
"nif:endIndex": 77,
|
||||
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
|
||||
"prov:wasGeneratedBy": "me:SgAnalysis1"
|
||||
}
|
||||
],
|
||||
"sentiments": [
|
||||
"@id": "_:EmotionAnalysis1_Activity",
|
||||
"@type": "me:EmotionAnalysis",
|
||||
"prov:wasAssociatedWith": "me:EmotionAnalysis1"
|
||||
},
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=80,97",
|
||||
"nif:beginIndex": 80,
|
||||
"nif:endIndex": 97,
|
||||
"nif:anchorOf": "You'll be awesome.",
|
||||
"marl:hasPolarity": "marl:Positive",
|
||||
"marl:polarityValue": 0.9,
|
||||
"prov:wasGeneratedBy": "me:SAnalysis1"
|
||||
"@id": "_:NER1_Activity",
|
||||
"@type": "me:NER",
|
||||
"prov:wasAssociatedWith": "me:EmotionNER1"
|
||||
}
|
||||
],
|
||||
"emotions": [
|
||||
],
|
||||
"entries": [
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=0,109",
|
||||
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
||||
"prov:wasGeneratedBy": "me:EAnalysis1",
|
||||
"onyx:hasEmotion": [
|
||||
{
|
||||
"onyx:hasEmotionCategory": "wna:liking"
|
||||
},
|
||||
{
|
||||
"onyx:hasEmotionCategory": "wna:excitement"
|
||||
}
|
||||
]
|
||||
"@id": "http://micro.blog/status1",
|
||||
"@type": [
|
||||
"nif:RFC5147String",
|
||||
"nif:Context"
|
||||
],
|
||||
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
||||
"entities": [
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=5,13",
|
||||
"nif:beginIndex": 5,
|
||||
"nif:endIndex": 13,
|
||||
"nif:anchorOf": "Microsoft",
|
||||
"me:references": "http://dbpedia.org/page/Microsoft",
|
||||
"prov:wasGeneratedBy": "me:NER1"
|
||||
},
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=25,37",
|
||||
"nif:beginIndex": 25,
|
||||
"nif:endIndex": 37,
|
||||
"nif:anchorOf": "Windows Phone",
|
||||
"me:references": "http://dbpedia.org/page/Windows_Phone",
|
||||
"prov:wasGeneratedBy": "me:NER1"
|
||||
}
|
||||
],
|
||||
"suggestions": [
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=16,77",
|
||||
"nif:beginIndex": 16,
|
||||
"nif:endIndex": 77,
|
||||
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
|
||||
"prov:wasGeneratedBy": "me:SgAnalysis1"
|
||||
}
|
||||
],
|
||||
"sentiments": [
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=80,97",
|
||||
"nif:beginIndex": 80,
|
||||
"nif:endIndex": 97,
|
||||
"nif:anchorOf": "You'll be awesome.",
|
||||
"marl:hasPolarity": "marl:Positive",
|
||||
"marl:polarityValue": 0.9,
|
||||
"prov:wasGeneratedBy": "me:SAnalysis1"
|
||||
}
|
||||
],
|
||||
"emotions": [
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=0,109",
|
||||
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
||||
"prov:wasGeneratedBy": "me:EAnalysis1",
|
||||
"onyx:hasEmotion": [
|
||||
{
|
||||
"onyx:hasEmotionCategory": "wna:liking"
|
||||
},
|
||||
{
|
||||
"onyx:hasEmotionCategory": "wna:excitement"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
]
|
||||
}
|
||||
|
@ -2,10 +2,11 @@
|
||||
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
||||
"@id": "me:Result1",
|
||||
"@type": "results",
|
||||
"analysis": [
|
||||
"activities": [
|
||||
{
|
||||
"@id": "me:EmotionAnalysis1",
|
||||
"@type": "onyx:EmotionAnalysis"
|
||||
"@id": "me:EmotionAnalysis1_Activity",
|
||||
"@type": "me:EmotionAnalysis1",
|
||||
"prov:wasAssociatedWith": "me:EmotionAnalysis1"
|
||||
}
|
||||
],
|
||||
"entries": [
|
||||
@ -16,17 +17,13 @@
|
||||
"nif:Context"
|
||||
],
|
||||
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
||||
"entities": [
|
||||
],
|
||||
"suggestions": [
|
||||
],
|
||||
"sentiments": [
|
||||
],
|
||||
"emotions": [
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=0,109",
|
||||
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
||||
"prov:wasGeneratedBy": "me:EmotionAnalysis1",
|
||||
"prov:wasGeneratedBy": "_:EmotionAnalysis1_Activity",
|
||||
"onyx:hasEmotion": [
|
||||
{
|
||||
"onyx:hasEmotionCategory": "wna:liking"
|
||||
|
@ -2,10 +2,11 @@
|
||||
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
||||
"@id": "me:Result1",
|
||||
"@type": "results",
|
||||
"analysis": [
|
||||
"activities": [
|
||||
{
|
||||
"@id": "me:NER1",
|
||||
"@type": "me:NERAnalysis"
|
||||
"@id": "_:NER1_Activity",
|
||||
"@type": "me:NERAnalysis",
|
||||
"prov:wasAssociatedWith": "me:NER1"
|
||||
}
|
||||
],
|
||||
"entries": [
|
||||
|
@ -2,16 +2,22 @@
|
||||
"@context": [
|
||||
"http://mixedemotions-project.eu/ns/context.jsonld",
|
||||
{
|
||||
"emovoc": "http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/emotionml/ns#"
|
||||
"emovoc": "http://www.gsi.upm.es/ontologies/onyx/vocabularies/emotionml/ns#"
|
||||
}
|
||||
],
|
||||
"@id": "me:Result1",
|
||||
"@type": "results",
|
||||
"analysis": [
|
||||
"activities": [
|
||||
{
|
||||
"@id": "me:HesamsAnalysis",
|
||||
"@id": "me:HesamsAnalysis_Activity",
|
||||
"@type": "onyx:EmotionAnalysis",
|
||||
"onyx:usesEmotionModel": "emovoc:pad-dimensions"
|
||||
"prov:wasAssociatedWith": "me:HesamsAnalysis",
|
||||
"prov:used": [
|
||||
{
|
||||
"name": "emotion-model",
|
||||
"prov:value": "emovoc:pad-dimensions"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"entries": [
|
||||
@ -32,7 +38,7 @@
|
||||
{
|
||||
"@id": "Entry1#char=0,21",
|
||||
"nif:anchorOf": "This is a test string",
|
||||
"prov:wasGeneratedBy": "me:HesamAnalysis",
|
||||
"prov:wasGeneratedBy": "_:HesamAnalysis_Activity",
|
||||
"onyx:hasEmotion": [
|
||||
{
|
||||
"emovoc:pleasure": 0.5,
|
||||
|
@ -2,12 +2,11 @@
|
||||
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
||||
"@id": "me:Result1",
|
||||
"@type": "results",
|
||||
"analysis": [
|
||||
"activities": [
|
||||
{
|
||||
"@id": "me:SAnalysis1",
|
||||
"@id": "_:SAnalysis1_Activity",
|
||||
"@type": "marl:SentimentAnalysis",
|
||||
"marl:maxPolarityValue": 1,
|
||||
"marl:minPolarityValue": 0
|
||||
"prov:wasAssociatedWith": "me:SAnalysis1"
|
||||
}
|
||||
],
|
||||
"entries": [
|
||||
@ -18,10 +17,6 @@
|
||||
"nif:Context"
|
||||
],
|
||||
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
||||
"entities": [
|
||||
],
|
||||
"suggestions": [
|
||||
],
|
||||
"sentiments": [
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=80,97",
|
||||
@ -30,10 +25,10 @@
|
||||
"nif:anchorOf": "You'll be awesome.",
|
||||
"marl:hasPolarity": "marl:Positive",
|
||||
"marl:polarityValue": 0.9,
|
||||
"prov:wasGeneratedBy": "me:SAnalysis1"
|
||||
"prov:wasGeneratedBy": "_:SAnalysis1_Activity"
|
||||
}
|
||||
],
|
||||
"emotionSets": [
|
||||
"emotions": [
|
||||
]
|
||||
}
|
||||
]
|
||||
|
@ -2,10 +2,11 @@
|
||||
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
||||
"@id": "me:Result1",
|
||||
"@type": "results",
|
||||
"analysis": [
|
||||
"activities": [
|
||||
{
|
||||
"@id": "me:SgAnalysis1",
|
||||
"@type": "me:SuggestionAnalysis"
|
||||
"@id": "_:SgAnalysis1_Activity",
|
||||
"@type": "me:SuggestionAnalysis",
|
||||
"prov:wasAssociatedWith": "me:SgAnalysis1"
|
||||
}
|
||||
],
|
||||
"entries": [
|
||||
@ -15,7 +16,6 @@
|
||||
"nif:RFC5147String",
|
||||
"nif:Context"
|
||||
],
|
||||
"prov:wasGeneratedBy": "me:SAnalysis1",
|
||||
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
||||
"entities": [
|
||||
],
|
||||
@ -25,7 +25,7 @@
|
||||
"nif:beginIndex": 16,
|
||||
"nif:endIndex": 77,
|
||||
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
|
||||
"prov:wasGeneratedBy": "me:SgAnalysis1"
|
||||
"prov:wasGeneratedBy": "_:SgAnalysis1_Activity"
|
||||
}
|
||||
],
|
||||
"sentiments": [
|
||||
|
115
docs/index.rst
@ -1,19 +1,106 @@
|
||||
.. Senpy documentation master file, created by
|
||||
sphinx-quickstart on Tue Feb 24 08:57:32 2015.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
Welcome to Senpy's documentation!
|
||||
=================================
|
||||
|
||||
Contents:
|
||||
.. image:: https://readthedocs.org/projects/senpy/badge/?version=latest
|
||||
:target: http://senpy.readthedocs.io/en/latest/
|
||||
.. image:: https://badge.fury.io/py/senpy.svg
|
||||
:target: https://badge.fury.io/py/senpy
|
||||
.. image:: https://travis-ci.org/gsi-upm/senpy.svg
|
||||
:target: https://github.com/gsi-upm/senpy/senpy/tree/master
|
||||
.. image:: https://img.shields.io/pypi/l/requests.svg
|
||||
:target: https://lab.gsi.upm.es/senpy/senpy/
|
||||
|
||||
Senpy is a framework to build sentiment and emotion analysis services.
|
||||
It provides functionalities for:
|
||||
|
||||
- developing sentiment and emotion classifier and exposing them as an HTTP service
|
||||
- requesting sentiment and emotion analysis from different providers (i.e. Vader, Sentimet140, ...) using the same interface (:doc:`apischema`). In this way, applications do not depend on the API offered for these services.
|
||||
- combining services that use different sentiment model (e.g. polarity between [-1, 1] or [0,1] or emotion models (e.g. Ekkman or VAD)
|
||||
- evaluating sentiment algorithms with well known datasets
|
||||
|
||||
|
||||
Using senpy services is as simple as sending an HTTP request with your favourite tool or library.
|
||||
Let's analyze the sentiment of the text "Senpy is awesome".
|
||||
|
||||
We can call the `Sentiment140 <http://www.sentiment140.com/>`_ service with an HTTP request using curl:
|
||||
|
||||
|
||||
.. code:: shell
|
||||
:emphasize-lines: 14,18
|
||||
|
||||
$ curl "http://senpy.gsi.upm.es/api/sentiment140" \
|
||||
--data-urlencode "input=Senpy is awesome"
|
||||
|
||||
{
|
||||
"@context": "http://senpy.gsi.upm.es/api/contexts/YXBpL3NlbnRpbWVudDE0MD8j",
|
||||
"@type": "Results",
|
||||
"entries": [
|
||||
{
|
||||
"@id": "prefix:",
|
||||
"@type": "Entry",
|
||||
"marl:hasOpinion": [
|
||||
{
|
||||
"@type": "Sentiment",
|
||||
"marl:hasPolarity": "marl:Positive",
|
||||
"prov:wasGeneratedBy": "prefix:Analysis_1554389334.6431913"
|
||||
}
|
||||
],
|
||||
"nif:isString": "Senpy is awesome",
|
||||
"onyx:hasEmotionSet": []
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
Congratulations, you’ve used your first senpy service!
|
||||
You can observe the result: the polarity is positive (marl:Positive). The reason of this prefix is that Senpy follows a linked data approach.
|
||||
|
||||
You can analyze the same sentence using a different sentiment service (e.g. Vader) and requesting a different format (e.g. turtle):
|
||||
|
||||
|
||||
|
||||
.. code:: shell
|
||||
|
||||
$ curl "http://senpy.gsi.upm.es/api/sentiment-vader" \
|
||||
--data-urlencode "input=Senpy is awesome" \
|
||||
--data-urlencode "outformat=turtle"
|
||||
|
||||
@prefix : <http://www.gsi.upm.es/onto/senpy/ns#> .
|
||||
@prefix endpoint: <http://senpy.gsi.upm.es/api/> .
|
||||
@prefix marl: <http://www.gsi.upm.es/ontologies/marl/ns#> .
|
||||
@prefix nif: <http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core#> .
|
||||
@prefix prefix: <http://senpy.invalid/> .
|
||||
@prefix prov: <http://www.w3.org/ns/prov#> .
|
||||
@prefix senpy: <http://www.gsi.upm.es/onto/senpy/ns#> .
|
||||
|
||||
prefix: a senpy:Entry ;
|
||||
nif:isString "Senpy is awesome" ;
|
||||
marl:hasOpinion [ a senpy:Sentiment ;
|
||||
marl:hasPolarity "marl:Positive" ;
|
||||
marl:polarityValue 6.72e-01 ;
|
||||
prov:wasGeneratedBy prefix:Analysis_1562668175.9808676 ] .
|
||||
|
||||
[] a senpy:Results ;
|
||||
prov:used prefix: .
|
||||
|
||||
As you see, Vader returns also the polarity value (0.67) in addition to the category (positive).
|
||||
|
||||
If you are interested in consuming Senpy services, read :doc:`Quickstart`.
|
||||
To get familiar with the concepts behind Senpy, and what it can offer for service developers, check out :doc:`development`.
|
||||
:doc:`apischema` contains information about the semantic models and vocabularies used by Senpy.
|
||||
|
||||
.. toctree::
|
||||
senpy
|
||||
installation
|
||||
usage
|
||||
api
|
||||
schema
|
||||
plugins
|
||||
demo
|
||||
:maxdepth: 2
|
||||
:caption: Learn more about senpy:
|
||||
:maxdepth: 2
|
||||
:hidden:
|
||||
|
||||
senpy
|
||||
demo
|
||||
Quickstart.ipynb
|
||||
installation
|
||||
conversion
|
||||
Evaluation.ipynb
|
||||
apischema
|
||||
development
|
||||
publications
|
||||
projects
|
||||
|
@ -1,27 +1,69 @@
|
||||
Installation
|
||||
------------
|
||||
The stable version can be installed in three ways.
|
||||
The stable version can be used in two ways: as a system/user library through pip, or from a docker image.
|
||||
|
||||
Using docker is recommended because the image is self-contained, reproducible and isolated from the system, which means:
|
||||
|
||||
* It can be downloaded and run with just one simple command
|
||||
* All dependencies are included
|
||||
* It is OS-independent (MacOS, Windows, GNU/Linux)
|
||||
* Several versions may coexist in the same machine without additional virtual environments
|
||||
|
||||
Additionally, you may create your own docker image with your custom plugins, ready to be used by others.
|
||||
|
||||
|
||||
Through PIP
|
||||
***********
|
||||
|
||||
.. code:: bash
|
||||
|
||||
pip install senpy
|
||||
|
||||
# Or with --user if you get permission errors:
|
||||
|
||||
pip install --user senpy
|
||||
|
||||
|
||||
Alternatively, you can use the development version:
|
||||
|
||||
.. code:: bash
|
||||
..
|
||||
Alternatively, you can use the development version:
|
||||
|
||||
git clone git@github.com:gsi-upm/senpy
|
||||
cd senpy
|
||||
pip install --user .
|
||||
.. code:: bash
|
||||
|
||||
git clone git@github.com:gsi-upm/senpy
|
||||
cd senpy
|
||||
pip install --user .
|
||||
|
||||
Each version is automatically tested on GNU/Linux, macOS and Windows 10.
|
||||
If you have trouble with the installation, please file an `issue on GitHub <https://github.com/gsi-upm/senpy/issues>`_.
|
||||
|
||||
If you want to install senpy globally, use sudo instead of the ``--user`` flag.
|
||||
|
||||
Docker Image
|
||||
************
|
||||
Build the image or use the pre-built one: ``docker run -ti -p 5000:5000 gsiupm/senpy --host 0.0.0.0 --default-plugins``.
|
||||
|
||||
To add custom plugins, add a volume and tell senpy where to find the plugins: ``docker run -ti -p 5000:5000 -v <PATH OF PLUGINS>:/plugins gsiupm/senpy --host 0.0.0.0 --default-plugins -f /plugins``
|
||||
The base image of senpy comes with some built-in plugins that you can use:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
docker run -ti -p 5000:5000 gsiupm/senpy --host 0.0.0.0
|
||||
|
||||
To use your custom plugins, you can add volume to the container:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
docker run -ti -p 5000:5000 -v <PATH OF PLUGINS>:/plugins gsiupm/senpy --host 0.0.0.0 --plugins-folder /plugins
|
||||
|
||||
|
||||
Alias
|
||||
.....
|
||||
|
||||
If you are using the docker approach regularly, it is advisable to use a script or an alias to simplify your executions:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
alias senpy='docker run --rm -ti -p 5000:5000 -v $PWD:/senpy-plugins gsiupm/senpy'
|
||||
|
||||
|
||||
Now, you may run senpy from any folder in your computer like so:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
senpy --version
|
||||
|
BIN
docs/playground-0.20.png
Normal file
After Width: | Height: | Size: 68 KiB |
112
docs/plugins-definition.rst
Normal file
@ -0,0 +1,112 @@
|
||||
Advanced plugin definition
|
||||
--------------------------
|
||||
In addition to finding plugins defined in source code files, senpy can also load a special type of definition file (`.senpy` files).
|
||||
This used to be the only mechanism for loading in earlier versions of senpy.
|
||||
|
||||
The definition file contains basic information
|
||||
|
||||
Lastly, it is also possible to add new plugins programmatically.
|
||||
|
||||
.. contents:: :local:
|
||||
|
||||
..
|
||||
What is a plugin?
|
||||
=================
|
||||
|
||||
A plugin is a program that, given a text, will add annotations to it.
|
||||
In practice, a plugin consists of at least two files:
|
||||
|
||||
- Definition file: a `.senpy` file that describes the plugin (e.g. what input parameters it accepts, what emotion model it uses).
|
||||
- Python module: the actual code that will add annotations to each input.
|
||||
|
||||
This separation allows us to deploy plugins that use the same code but employ different parameters.
|
||||
For instance, one could use the same classifier and processing in several plugins, but train with different datasets.
|
||||
This scenario is particularly useful for evaluation purposes.
|
||||
|
||||
The only limitation is that the name of each plugin needs to be unique.
|
||||
|
||||
Definition files
|
||||
================
|
||||
|
||||
The definition file complements and overrides the attributes provided by the plugin.
|
||||
It can be written in YAML or JSON.
|
||||
The most important attributes are:
|
||||
|
||||
* **name**: unique name that senpy will use internally to identify the plugin.
|
||||
* **module**: indicates the module that contains the plugin code, which will be automatically loaded by senpy.
|
||||
* **version**
|
||||
* extra_params: to add parameters to the senpy API when this plugin is requested. Those parameters may be required, and have aliased names. For instance:
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
extra_params:
|
||||
hello_param:
|
||||
aliases: # required
|
||||
- hello_param
|
||||
- hello
|
||||
required: true
|
||||
default: Hi you
|
||||
values:
|
||||
- Hi you
|
||||
- Hello y'all
|
||||
- Howdy
|
||||
|
||||
A complete example:
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
name: <Name of the plugin>
|
||||
module: <Python file>
|
||||
version: 0.1
|
||||
|
||||
And the json equivalent:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"name": "<Name of the plugin>",
|
||||
"module": "<Python file>",
|
||||
"version": "0.1"
|
||||
}
|
||||
|
||||
|
||||
Example plugin with a definition file
|
||||
=====================================
|
||||
|
||||
In this section, we will implement a basic sentiment analysis plugin.
|
||||
To determine the polarity of each entry, the plugin will compare the length of the string to a threshold.
|
||||
This threshold will be included in the definition file.
|
||||
|
||||
The definition file would look like this:
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
name: helloworld
|
||||
module: helloworld
|
||||
version: 0.0
|
||||
threshold: 10
|
||||
description: Hello World
|
||||
|
||||
Now, in a file named ``helloworld.py``:
|
||||
|
||||
.. code:: python
|
||||
|
||||
#!/bin/env python
|
||||
#helloworld.py
|
||||
|
||||
from senpy import AnalysisPlugin
|
||||
from senpy import Sentiment
|
||||
|
||||
|
||||
class HelloWorld(AnalysisPlugin):
|
||||
|
||||
def analyse_entry(entry, params):
|
||||
'''Basically do nothing with each entry'''
|
||||
|
||||
sentiment = Sentiment()
|
||||
if len(entry.text) < self.threshold:
|
||||
sentiment['marl:hasPolarity'] = 'marl:Positive'
|
||||
else:
|
||||
sentiment['marl:hasPolarity'] = 'marl:Negative'
|
||||
entry.sentiments.append(sentiment)
|
||||
yield entry
|
258
docs/plugins-faq.rst
Normal file
@ -0,0 +1,258 @@
|
||||
F.A.Q.
|
||||
======
|
||||
|
||||
.. contents:: :local:
|
||||
|
||||
|
||||
|
||||
What are annotations?
|
||||
#####################
|
||||
They are objects just like entries.
|
||||
Senpy ships with several default annotations, including ``Sentiment`` and ``Emotion``.
|
||||
|
||||
|
||||
What's a plugin made of?
|
||||
########################
|
||||
|
||||
When receiving a query, senpy selects what plugin or plugins should process each entry, and in what order.
|
||||
It also makes sure the every entry and the parameters provided by the user meet the plugin requirements.
|
||||
|
||||
Hence, two parts are necessary: 1) the code that will process the entry, and 2) some attributes and metadata that will tell senpy how to interact with the plugin.
|
||||
|
||||
In practice, this is what a plugin looks like, tests included:
|
||||
|
||||
|
||||
.. literalinclude:: ../example-plugins/rand_plugin.py
|
||||
:emphasize-lines: 21-28
|
||||
:language: python
|
||||
|
||||
|
||||
The lines highlighted contain some information about the plugin.
|
||||
In particular, the following information is mandatory:
|
||||
|
||||
* A unique name for the class. In our example, sentiment-random.
|
||||
* The subclass/type of plugin. This is typically either `SentimentPlugin` or `EmotionPlugin`. However, new types of plugin can be created for different annotations. The only requirement is that these new types inherit from `senpy.Analysis`
|
||||
* A description of the plugin. This can be done simply by adding a doc to the class.
|
||||
* A version, which should get updated.
|
||||
* An author name.
|
||||
|
||||
|
||||
How does senpy find modules?
|
||||
############################
|
||||
|
||||
Senpy looks for files of two types:
|
||||
|
||||
* Python files of the form `senpy_<NAME>.py` or `<NAME>_plugin.py`. In these files, it will look for: 1) Instances that inherit from `senpy.Plugin`, or subclasses of `senpy.Plugin` that can be initialized without a configuration file. i.e. classes that contain all the required attributes for a plugin.
|
||||
* Plugin definition files (see :doc:`plugins-definition`)
|
||||
|
||||
How can I define additional parameters for my plugin?
|
||||
#####################################################
|
||||
|
||||
Your plugin may ask for additional parameters from users by using the attribute ``extra_params`` in your plugin definition.
|
||||
It takes a dictionary, where the keys are the name of the argument/parameter, and the value has the following fields:
|
||||
|
||||
* aliases: the different names which can be used in the request to use the parameter.
|
||||
* required: if set to true, users need to provide this parameter unless a default is set.
|
||||
* options: the different acceptable values of the parameter (i.e. an enum). If set, the value provided must match one of the options.
|
||||
* default: the default value of the parameter, if none is provided in the request.
|
||||
|
||||
.. code:: python
|
||||
|
||||
"extra_params":{
|
||||
"language": {
|
||||
"aliases": ["language", "lang", "l"],
|
||||
"required": True,
|
||||
"options": ["es", "en"],
|
||||
"default": "es"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
How should I load external data and files
|
||||
#########################################
|
||||
|
||||
Most plugins will need access to files (dictionaries, lexicons, etc.).
|
||||
These files are usually heavy or under a license that does not allow redistribution.
|
||||
For this reason, senpy has a `data_folder` that is separated from the source files.
|
||||
The location of this folder is controlled programmatically or by setting the `SENPY_DATA` environment variable.
|
||||
You can use the `self.path(filepath)` function to get the path of a given `filepath` within the data folder.
|
||||
|
||||
Plugins have a convenience function `self.open` which will automatically look for the file if it exists, or open a new one if it doesn't:
|
||||
|
||||
|
||||
.. code:: python
|
||||
|
||||
import os
|
||||
|
||||
|
||||
class PluginWithResources(AnalysisPlugin):
|
||||
file_in_data = <FILE PATH>
|
||||
file_in_sources = <FILE PATH>
|
||||
|
||||
def on activate(self):
|
||||
with self.open(self.file_in_data) as f:
|
||||
self._classifier = train_from_file(f)
|
||||
file_in_source = os.path.join(self.get_folder(), self.file_in_sources)
|
||||
with self.open(file_in_source) as f:
|
||||
pass
|
||||
|
||||
|
||||
It is good practice to specify the paths of these files in the plugin configuration, so the same code can be reused with different resources.
|
||||
|
||||
|
||||
Can I build a docker image for my plugin?
|
||||
#########################################
|
||||
|
||||
Add the following dockerfile to your project to generate a docker image with your plugin:
|
||||
|
||||
.. code:: dockerfile
|
||||
|
||||
FROM gsiupm/senpy
|
||||
|
||||
Once you make sure your plugin works with a specific version of senpy, modify that file to make sure your build will work even if senpy gets updated.
|
||||
e.g.:
|
||||
|
||||
|
||||
.. code:: dockerfile
|
||||
|
||||
FROM gsiupm/senpy:1.0.1
|
||||
|
||||
|
||||
This will copy your source folder to the image, and install all dependencies.
|
||||
Now, to build an image:
|
||||
|
||||
.. code:: shell
|
||||
|
||||
docker build . -t gsiupm/exampleplugin
|
||||
|
||||
And you can run it with:
|
||||
|
||||
.. code:: shell
|
||||
|
||||
docker run -p 5000:5000 gsiupm/exampleplugin
|
||||
|
||||
|
||||
If the plugin uses non-source files (:ref:`How should I load external data and files`), the recommended way is to use `SENPY_DATA` folder.
|
||||
Data can then be mounted in the container or added to the image.
|
||||
The former is recommended for open source plugins with licensed resources, whereas the latter is the most convenient and can be used for private images.
|
||||
|
||||
Mounting data:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
docker run -v $PWD/data:/data gsiupm/exampleplugin
|
||||
|
||||
Adding data to the image:
|
||||
|
||||
.. code:: dockerfile
|
||||
|
||||
FROM gsiupm/senpy:1.0.1
|
||||
COPY data /
|
||||
|
||||
What annotations can I use?
|
||||
###########################
|
||||
|
||||
You can add almost any annotation to an entry.
|
||||
The most common use cases are covered in the :doc:`apischema`.
|
||||
|
||||
|
||||
Why does the analyse function yield instead of return?
|
||||
######################################################
|
||||
|
||||
This is so that plugins may add new entries to the response or filter some of them.
|
||||
For instance, a chunker may split one entry into several.
|
||||
On the other hand, a conversion plugin may leave out those entries that do not contain relevant information.
|
||||
|
||||
|
||||
If I'm using a classifier, where should I train it?
|
||||
###################################################
|
||||
|
||||
Training a classifier can be time time consuming. To avoid running the training unnecessarily, you can use ShelfMixin to store the classifier. For instance:
|
||||
|
||||
.. code:: python
|
||||
|
||||
from senpy.plugins import ShelfMixin, AnalysisPlugin
|
||||
|
||||
class MyPlugin(ShelfMixin, AnalysisPlugin):
|
||||
def train(self):
|
||||
''' Code to train the classifier
|
||||
'''
|
||||
# Here goes the code
|
||||
# ...
|
||||
return classifier
|
||||
|
||||
def activate(self):
|
||||
if 'classifier' not in self.sh:
|
||||
classifier = self.train()
|
||||
self.sh['classifier'] = classifier
|
||||
self.classifier = self.sh['classifier']
|
||||
|
||||
def deactivate(self):
|
||||
self.close()
|
||||
|
||||
|
||||
By default the ShelfMixin creates a file based on the plugin name and stores it in that plugin's folder.
|
||||
However, you can manually specify a 'shelf_file' in your .senpy file.
|
||||
|
||||
Shelves may get corrupted if the plugin exists unexpectedly.
|
||||
A corrupt shelf prevents the plugin from loading.
|
||||
If you do not care about the data in the shelf, you can force your plugin to remove the corrupted file and load anyway, set the 'force_shelf' to True in your plugin and start it again.
|
||||
|
||||
How can I turn an external service into a plugin?
|
||||
#################################################
|
||||
|
||||
This example ilustrate how to implement a plugin that accesses the Sentiment140 service.
|
||||
|
||||
.. code:: python
|
||||
|
||||
class Sentiment140Plugin(SentimentPlugin):
|
||||
def analyse_entry(self, entry, params):
|
||||
text = entry.text
|
||||
lang = params.get("language", "auto")
|
||||
res = requests.post("http://www.sentiment140.com/api/bulkClassifyJson",
|
||||
json.dumps({"language": lang,
|
||||
"data": [{"text": text}]
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
p = params.get("prefix", None)
|
||||
polarity_value = self.maxPolarityValue*int(res.json()["data"][0]
|
||||
["polarity"]) * 0.25
|
||||
polarity = "marl:Neutral"
|
||||
neutral_value = self.maxPolarityValue / 2.0
|
||||
if polarity_value > neutral_value:
|
||||
polarity = "marl:Positive"
|
||||
elif polarity_value < neutral_value:
|
||||
polarity = "marl:Negative"
|
||||
|
||||
sentiment = Sentiment(id="Sentiment0",
|
||||
prefix=p,
|
||||
marl__hasPolarity=polarity,
|
||||
marl__polarityValue=polarity_value)
|
||||
sentiment.prov(self)
|
||||
entry.sentiments.append(sentiment)
|
||||
yield entry
|
||||
|
||||
|
||||
How can I activate a DEBUG mode for my plugin?
|
||||
###############################################
|
||||
|
||||
You can activate the DEBUG mode by the command-line tool using the option -d.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
senpy -d
|
||||
|
||||
|
||||
Additionally, with the ``--pdb`` option you will be dropped into a pdb post mortem shell if an exception is raised.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
python -m pdb yourplugin.py
|
||||
|
||||
Where can I find more code examples?
|
||||
####################################
|
||||
|
||||
See: `<http://github.com/gsi-upm/senpy-plugins-community>`_.
|
87
docs/plugins-quickstart.rst
Normal file
@ -0,0 +1,87 @@
|
||||
Quickstart for service developers
|
||||
=================================
|
||||
|
||||
This document contains the minimum to get you started with developing new services using Senpy.
|
||||
|
||||
For an example of conversion plugins, see :doc:`conversion`.
|
||||
For a description of definition files, see :doc:`plugins-definition`.
|
||||
|
||||
A more step-by-step tutorial with slides is available `here <https://lab.gsi.upm.es/senpy/senpy-tutorial>`__
|
||||
|
||||
.. contents:: :local:
|
||||
|
||||
Installation
|
||||
############
|
||||
|
||||
First of all, you need to install the package.
|
||||
See :doc:`installation` for instructions.
|
||||
Once installed, the `senpy` command should be available.
|
||||
|
||||
Architecture
|
||||
############
|
||||
|
||||
The main component of a sentiment analysis service is the algorithm itself. However, for the algorithm to work, it needs to get the appropriate parameters from the user, format the results according to the defined API, interact with the user whn errors occur or more information is needed, etc.
|
||||
|
||||
Senpy proposes a modular and dynamic architecture that allows:
|
||||
|
||||
* Implementing different algorithms in a extensible way, yet offering a common interface.
|
||||
* Offering common services that facilitate development, so developers can focus on implementing new and better algorithms.
|
||||
|
||||
The framework consists of two main modules: Senpy core, which is the building block of the service, and Senpy plugins, which consist of the analysis algorithm. The next figure depicts a simplified version of the processes involved in an analysis with the Senpy framework.
|
||||
|
||||
.. image:: senpy-architecture.png
|
||||
:width: 100%
|
||||
:align: center
|
||||
|
||||
|
||||
What is a plugin?
|
||||
#################
|
||||
|
||||
A plugin is a python object that can process entries.
|
||||
Given an entry, it will modify it, add annotations to it, or generate new entries.
|
||||
|
||||
|
||||
What is an entry?
|
||||
#################
|
||||
|
||||
Entries are objects that can be annotated.
|
||||
In general, they will be a piece of text.
|
||||
By default, entries are `NIF contexts <http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core/nif-core.html>`_ represented in JSON-LD format.
|
||||
It is a dictionary/JSON object that looks like this:
|
||||
|
||||
.. code:: python
|
||||
|
||||
{
|
||||
"@id": "<unique identifier or blank node name>",
|
||||
"nif:isString": "input text",
|
||||
"sentiments": [ {
|
||||
...
|
||||
}
|
||||
],
|
||||
...
|
||||
}
|
||||
|
||||
Annotations are added to the object like this:
|
||||
|
||||
.. code:: python
|
||||
|
||||
entry = Entry()
|
||||
entry.vocabulary__annotationName = 'myvalue'
|
||||
entry['vocabulary:annotationName'] = 'myvalue'
|
||||
entry['annotationNameURI'] = 'myvalue'
|
||||
|
||||
Where vocabulary is one of the prefixes defined in the default senpy context, and annotationURI is a full URI.
|
||||
The value may be any valid JSON-LD dictionary.
|
||||
For simplicity, senpy includes a series of models by default in the ``senpy.models`` module.
|
||||
|
||||
Plugins Code
|
||||
############
|
||||
|
||||
The basic methods in a plugin are:
|
||||
|
||||
* analyse_entry: called in every user requests. It takes two parameters: ``Entry``, the entry object, and ``params``, the parameters supplied by the user. It should yield one or more ``Entry`` objects.
|
||||
* activate: used to load memory-hungry resources. For instance, to train a classifier.
|
||||
* deactivate: used to free up resources when the plugin is no longer needed.
|
||||
|
||||
Plugins are loaded asynchronously, so don't worry if the activate method takes too long. The plugin will be marked as activated once it is finished executing the method.
|
||||
|
248
docs/plugins.rst
@ -1,248 +0,0 @@
|
||||
Developing new plugins
|
||||
----------------------
|
||||
This document describes how to develop a new analysis plugin. For an example of conversion plugins, see :doc:`conversion`.
|
||||
|
||||
Each plugin represents a different analysis process.There are two types of files that are needed by senpy for loading a plugin:
|
||||
|
||||
- Definition file, has the ".senpy" extension.
|
||||
- Code file, is a python file.
|
||||
|
||||
This separation will allow us to deploy plugins that use the same code but employ different parameters.
|
||||
For instance, one could use the same classifier and processing in several plugins, but train with different datasets.
|
||||
This scenario is particularly useful for evaluation purposes.
|
||||
|
||||
The only limitation is that the name of each plugin needs to be unique.
|
||||
|
||||
Plugins Definitions
|
||||
===================
|
||||
|
||||
The definition file contains all the attributes of the plugin, and can be written in YAML or JSON.
|
||||
The most important attributes are:
|
||||
|
||||
* **name**: unique name that senpy will use internally to identify the plugin.
|
||||
* **module**: indicates the module that contains the plugin code, which will be automatically loaded by senpy.
|
||||
* **version**
|
||||
* extra_params: used to specify parameters that the plugin accepts that are not already part of the senpy API. Those parameters may be required, and have aliased names. For instance:
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
extra_params:
|
||||
hello_param:
|
||||
aliases: # required
|
||||
- hello_param
|
||||
- hello
|
||||
required: true
|
||||
default: Hi you
|
||||
values:
|
||||
- Hi you
|
||||
- Hello y'all
|
||||
- Howdy
|
||||
|
||||
Parameter validation will fail if a required parameter without a default has not been provided, or if the definition includes a set of values and the provided one does not match one of them.
|
||||
|
||||
|
||||
A complete example:
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
name: <Name of the plugin>
|
||||
module: <Python file>
|
||||
version: 0.1
|
||||
|
||||
And the json equivalent:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"name": "<Name of the plugin>",
|
||||
"module": "<Python file>",
|
||||
"version": "0.1"
|
||||
}
|
||||
|
||||
|
||||
Plugins Code
|
||||
============
|
||||
|
||||
The basic methods in a plugin are:
|
||||
|
||||
* __init__
|
||||
* activate: used to load memory-hungry resources
|
||||
* deactivate: used to free up resources
|
||||
* analyse_entry: called in every user requests. It takes in the parameters supplied by a user and should yield one or more ``Entry`` objects.
|
||||
|
||||
Plugins are loaded asynchronously, so don't worry if the activate method takes too long. The plugin will be marked as activated once it is finished executing the method.
|
||||
|
||||
|
||||
Example plugin
|
||||
==============
|
||||
|
||||
In this section, we will implement a basic sentiment analysis plugin.
|
||||
To determine the polarity of each entry, the plugin will compare the length of the string to a threshold.
|
||||
This threshold will be included in the definition file.
|
||||
|
||||
The definition file would look like this:
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
name: helloworld
|
||||
module: helloworld
|
||||
version: 0.0
|
||||
threshold: 10
|
||||
|
||||
|
||||
Now, in a file named ``helloworld.py``:
|
||||
|
||||
.. code:: python
|
||||
|
||||
#!/bin/env python
|
||||
#helloworld.py
|
||||
|
||||
from senpy.plugins import SenpyPlugin
|
||||
from senpy.models import Sentiment
|
||||
|
||||
|
||||
class HelloWorld(SenpyPlugin):
|
||||
|
||||
def analyse_entry(entry, params):
|
||||
'''Basically do nothing with each entry'''
|
||||
|
||||
sentiment = Sentiment()
|
||||
if len(entry.text) < self.threshold:
|
||||
sentiment['marl:hasPolarity'] = 'marl:Positive'
|
||||
else:
|
||||
sentiment['marl:hasPolarity'] = 'marl:Negative'
|
||||
entry.sentiments.append(sentiment)
|
||||
yield entry
|
||||
|
||||
|
||||
F.A.Q.
|
||||
======
|
||||
Why does the analyse function yield instead of return?
|
||||
??????????????????????????????????????????????????????
|
||||
|
||||
This is so that plugins may add new entries to the response or filter some of them.
|
||||
For instance, a `context detection` plugin may add a new entry for each context in the original entry.
|
||||
On the other hand, a conveersion plugin may leave out those entries that do not contain relevant information.
|
||||
|
||||
|
||||
If I'm using a classifier, where should I train it?
|
||||
???????????????????????????????????????????????????
|
||||
|
||||
Training a classifier can be time time consuming. To avoid running the training unnecessarily, you can use ShelfMixin to store the classifier. For instance:
|
||||
|
||||
.. code:: python
|
||||
|
||||
from senpy.plugins import ShelfMixin, SenpyPlugin
|
||||
|
||||
class MyPlugin(ShelfMixin, SenpyPlugin):
|
||||
def train(self):
|
||||
''' Code to train the classifier
|
||||
'''
|
||||
# Here goes the code
|
||||
# ...
|
||||
return classifier
|
||||
|
||||
def activate(self):
|
||||
if 'classifier' not in self.sh:
|
||||
classifier = self.train()
|
||||
self.sh['classifier'] = classifier
|
||||
self.classifier = self.sh['classifier']
|
||||
|
||||
def deactivate(self):
|
||||
self.close()
|
||||
|
||||
You can speficy a 'shelf_file' in your .senpy file. By default the ShelfMixin creates a file based on the plugin name and stores it in that plugin's folder.
|
||||
|
||||
I want to implement my service as a plugin, How i can do it?
|
||||
????????????????????????????????????????????????????????????
|
||||
|
||||
This example ilustrate how to implement the Sentiment140 service as a plugin in senpy
|
||||
|
||||
.. code:: python
|
||||
|
||||
class Sentiment140Plugin(SentimentPlugin):
|
||||
def analyse_entry(self, entry, params):
|
||||
text = entry.text
|
||||
lang = params.get("language", "auto")
|
||||
res = requests.post("http://www.sentiment140.com/api/bulkClassifyJson",
|
||||
json.dumps({"language": lang,
|
||||
"data": [{"text": text}]
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
p = params.get("prefix", None)
|
||||
polarity_value = self.maxPolarityValue*int(res.json()["data"][0]
|
||||
["polarity"]) * 0.25
|
||||
polarity = "marl:Neutral"
|
||||
neutral_value = self.maxPolarityValue / 2.0
|
||||
if polarity_value > neutral_value:
|
||||
polarity = "marl:Positive"
|
||||
elif polarity_value < neutral_value:
|
||||
polarity = "marl:Negative"
|
||||
|
||||
sentiment = Sentiment(id="Sentiment0",
|
||||
prefix=p,
|
||||
marl__hasPolarity=polarity,
|
||||
marl__polarityValue=polarity_value)
|
||||
sentiment.prov__wasGeneratedBy = self.id
|
||||
entry.sentiments.append(sentiment)
|
||||
yield entry
|
||||
|
||||
|
||||
Where can I define extra parameters to be introduced in the request to my plugin?
|
||||
?????????????????????????????????????????????????????????????????????????????????
|
||||
|
||||
You can add these parameters in the definition file under the attribute "extra_params" : "{param_name}". The name of the parameter has new attributes-value pairs. The basic attributes are:
|
||||
|
||||
* aliases: the different names which can be used in the request to use the parameter.
|
||||
* required: this option is a boolean and indicates if the parameters is binding in operation plugin.
|
||||
* options: the different values of the paremeter.
|
||||
* default: the default value of the parameter, this is useful in case the paremeter is required and you want to have a default value.
|
||||
|
||||
.. code:: python
|
||||
|
||||
"extra_params": {
|
||||
"language": {
|
||||
"aliases": ["language", "l"],
|
||||
"required": true,
|
||||
"options": ["es","en"],
|
||||
"default": "es"
|
||||
}
|
||||
}
|
||||
|
||||
This example shows how to introduce a parameter associated with language.
|
||||
The extraction of this paremeter is used in the analyse method of the Plugin interface.
|
||||
|
||||
.. code:: python
|
||||
|
||||
lang = params.get("language")
|
||||
|
||||
Where can I set up variables for using them in my plugin?
|
||||
?????????????????????????????????????????????????????????
|
||||
|
||||
You can add these variables in the definition file with the structure of attribute-value pairs.
|
||||
|
||||
Every field added to the definition file is available to the plugin instance.
|
||||
|
||||
Can I activate a DEBUG mode for my plugin?
|
||||
???????????????????????????????????????????
|
||||
|
||||
You can activate the DEBUG mode by the command-line tool using the option -d.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
senpy -d
|
||||
|
||||
|
||||
Additionally, with the ``--pdb`` option you will be dropped into a pdb post mortem shell if an exception is raised.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
senpy --pdb
|
||||
|
||||
|
||||
Where can I find more code examples?
|
||||
????????????????????????????????????
|
||||
|
||||
See: `<http://github.com/gsi-upm/senpy-plugins-community>`_.
|
49
docs/projects.rst
Normal file
@ -0,0 +1,49 @@
|
||||
Projects using Senpy
|
||||
--------------------
|
||||
|
||||
Are you using Senpy in your work?, we would love to hear from you!
|
||||
Here is a list of on-going and past projects that have benefited from senpy:
|
||||
|
||||
|
||||
MixedEmotions
|
||||
,,,,,,,,,,,,,
|
||||
|
||||
`MixedEmotions <https://mixedemotions-project.eu/>`_ develops innovative multilingual multi-modal Big Data analytics applications.
|
||||
The analytics relies on a common toolbox for multi-modal sentiment and emotion analysis.
|
||||
The NLP parts of the toolbox are based on senpy and its API.
|
||||
|
||||
The toolbox is featured in this publication:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
Buitelaar, P., Wood, I. D., Arcan, M., McCrae, J. P., Abele, A., Robin, C., … Tummarello, G. (2018).
|
||||
MixedEmotions: An Open-Source Toolbox for Multi-Modal Emotion Analysis.
|
||||
IEEE Transactions on Multimedia.
|
||||
|
||||
EuroSentiment
|
||||
,,,,,,,,,,,,,
|
||||
|
||||
The aim of the EUROSENTIMENT project was to create a pool for multilingual language resources and services for Sentiment Analysis.
|
||||
|
||||
The EuroSentiment project was the main motivation behind the development of Senpy, and some early versions were used:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
Sánchez-Rada, J. F., Vulcu, G., Iglesias, C. A., & Buitelaar, P. (2014).
|
||||
EUROSENTIMENT: Linked Data Sentiment Analysis.
|
||||
Proceedings of the ISWC 2014 Posters & Demonstrations Track
|
||||
13th International Semantic Web Conference (ISWC 2014) (Vol. 1272, pp. 145–148).
|
||||
|
||||
|
||||
SoMeDi
|
||||
,,,,,,
|
||||
`SoMeDi <https://itea3.org/project/somedi.html>`_ is an ITEA3 project to research machine learning and artificial intelligence techniques that can be used to turn digital interaction data into Digital Interaction Intelligence and approaches that can be used to effectively enter and act in social media, and to automate this process.
|
||||
SoMeDi exploits senpy's interoperability of services in their customizable data enrichment and NLP workflows.
|
||||
|
||||
TRIVALENT
|
||||
,,,,,,,,,
|
||||
|
||||
`TRIVALENT <https://trivalent-project.eu/>`_ is an EU funded project which aims to a better understanding of root causes of the phenomenon of violent radicalisation in Europe in order to develop appropriate countermeasures, ranging from early detection methodologies to techniques of counter-narrative.
|
||||
|
||||
In addition to sentiment and emotion analysis services, trivalent provides other types of senpy services such as radicalism and writing style analysis.
|
||||
|
36
docs/publications.rst
Normal file
@ -0,0 +1,36 @@
|
||||
Publications
|
||||
============
|
||||
|
||||
|
||||
And if you use Senpy in your research, please cite `Senpy: A Pragmatic Linked Sentiment Analysis Framework <http://gsi.upm.es/index.php/es/investigacion/publicaciones?view=publication&task=show&id=417>`__ (`BibTex <http://gsi.upm.es/index.php/es/investigacion/publicaciones?controller=publications&task=export&format=bibtex&id=417>`__):
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
Sánchez-Rada, J. F., Iglesias, C. A., Corcuera, I., & Araque, Ó. (2016, October).
|
||||
Senpy: A Pragmatic Linked Sentiment Analysis Framework.
|
||||
In Data Science and Advanced Analytics (DSAA),
|
||||
2016 IEEE International Conference on (pp. 735-742). IEEE.
|
||||
|
||||
|
||||
Senpy uses Onyx for emotion representation, first introduced in:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
Sánchez-Rada, J. F., & Iglesias, C. A. (2016).
|
||||
Onyx: A linked data approach to emotion representation.
|
||||
Information Processing & Management, 52(1), 99-114.
|
||||
|
||||
Senpy uses Marl for sentiment representation, which was presented in:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
Westerski, A., Iglesias Fernandez, C. A., & Tapia Rico, F. (2011).
|
||||
Linked opinions: Describing sentiments on the structured web of data.
|
||||
|
||||
The representation models, formats and challenges are partially covered in a chapter of the book Sentiment Analysis in Social Networks:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
Iglesias, C. A., Sánchez-Rada, J. F., Vulcu, G., & Buitelaar, P. (2017).
|
||||
Linked Data Models for Sentiment and Emotion Analysis in Social Networks.
|
||||
In Sentiment Analysis in Social Networks (pp. 49-69).
|
@ -1 +1,3 @@
|
||||
sphinxcontrib-httpdomain>=1.4
|
||||
ipykernel
|
||||
nbsphinx
|
||||
|
@ -1,74 +0,0 @@
|
||||
Schema Examples
|
||||
===============
|
||||
All the examples in this page use the :download:`the main schema <_static/schemas/definitions.json>`.
|
||||
|
||||
Simple NIF annotation
|
||||
---------------------
|
||||
Description
|
||||
...........
|
||||
This example covers the basic example in the NIF documentation: `<http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core/nif-core.html>`_.
|
||||
|
||||
Representation
|
||||
..............
|
||||
.. literalinclude:: examples/example-basic.json
|
||||
:language: json-ld
|
||||
|
||||
Sentiment Analysis
|
||||
---------------------
|
||||
Description
|
||||
...........
|
||||
|
||||
Representation
|
||||
..............
|
||||
|
||||
.. literalinclude:: examples/example-sentiment.json
|
||||
:emphasize-lines: 5-10,25-33
|
||||
:language: json-ld
|
||||
|
||||
Suggestion Mining
|
||||
-----------------
|
||||
Description
|
||||
...........
|
||||
|
||||
Representation
|
||||
..............
|
||||
|
||||
.. literalinclude:: examples/example-suggestion.json
|
||||
:emphasize-lines: 5-8,22-27
|
||||
:language: json-ld
|
||||
|
||||
Emotion Analysis
|
||||
----------------
|
||||
Description
|
||||
...........
|
||||
|
||||
Representation
|
||||
..............
|
||||
|
||||
.. literalinclude:: examples/example-emotion.json
|
||||
:language: json-ld
|
||||
:emphasize-lines: 5-8,25-37
|
||||
|
||||
Named Entity Recognition
|
||||
------------------------
|
||||
Description
|
||||
...........
|
||||
|
||||
Representation
|
||||
..............
|
||||
|
||||
.. literalinclude:: examples/example-ner.json
|
||||
:emphasize-lines: 5-8,19-34
|
||||
:language: json-ld
|
||||
|
||||
Complete example
|
||||
----------------
|
||||
Description
|
||||
...........
|
||||
This example covers all of the above cases, integrating all the annotations in the same document.
|
||||
|
||||
Representation
|
||||
..............
|
||||
|
||||
.. literalinclude:: examples/example-complete.json
|
||||
:language: json-ld
|
Before Width: | Height: | Size: 65 KiB After Width: | Height: | Size: 122 KiB |
BIN
docs/senpy-framework.png
Normal file
After Width: | Height: | Size: 48 KiB |
Before Width: | Height: | Size: 79 KiB After Width: | Height: | Size: 49 KiB |
@ -1,35 +1,27 @@
|
||||
What is Senpy?
|
||||
--------------
|
||||
|
||||
Senpy is an open source reference implementation of a linked data model for sentiment and emotion analysis services based on the vocabularies NIF, Marl and Onyx.
|
||||
|
||||
The overall goal of the reference implementation Senpy is easing the adoption of the proposed linked data model for sentiment and emotion analysis services, so that services from different providers become interoperable. With this aim, the design of the reference implementation has focused on its extensibility and reusability.
|
||||
|
||||
A modular approach allows organizations to replace individual components with custom ones developed in-house. Furthermore, organizations can benefit from reusing prepackages modules that provide advanced functionalities, such as algorithms for sentiment and emotion analysis, linked data publication or emotion and sentiment mapping between different providers.
|
||||
|
||||
Specifications
|
||||
==============
|
||||
|
||||
The model used in Senpy is based on the following specifications:
|
||||
|
||||
* Marl, a vocabulary designed to annotate and describe subjetive opinions expressed on the web or in information systems.
|
||||
* Onyx, which is built one the same principles as Marl to annotate and describe emotions, and provides interoperability with Emotion Markup Language.
|
||||
* NIF 2.0, which defines a semantic format and APO for improving interoperability among natural language processing services
|
||||
|
||||
Architecture
|
||||
============
|
||||
|
||||
The main component of a sentiment analysis service is the algorithm itself. However, for the algorithm to work, it needs to get the appropriate parameters from the user, format the results according to the defined API, interact with the user whn errors occur or more information is needed, etc.
|
||||
|
||||
Senpy proposes a modular and dynamic architecture that allows:
|
||||
|
||||
* Implementing different algorithms in a extensible way, yet offering a common interface.
|
||||
* Offering common services that facilitate development, so developers can focus on implementing new and better algorithms.
|
||||
|
||||
The framework consists of two main modules: Senpy core, which is the building block of the service, and Senpy plugins, which consist of the analysis algorithm. The next figure depicts a simplified version of the processes involved in an analysis with the Senpy framework.
|
||||
Senpy is a framework for sentiment and emotion analysis services.
|
||||
Its goal is to produce analysis services that are interchangeable and fully interoperable.
|
||||
|
||||
.. image:: senpy-architecture.png
|
||||
:height: 400px
|
||||
:width: 800px
|
||||
:scale: 100 %
|
||||
:width: 100%
|
||||
:align: center
|
||||
|
||||
All services built using senpy share a common interface.
|
||||
This allows users to use them (almost) interchangeably, with the same API and tools, simply by pointing to a different URL or changing a parameter.
|
||||
The common schema also makes it easier to evaluate the performance of different algorithms and services.
|
||||
In fact, Senpy has a built-in evaluation API you can use to compare results with different algorithms.
|
||||
|
||||
Services can also use the common interface to communicate with each other.
|
||||
And higher level features can be built on top of these services, such as automatic fusion of results, emotion model conversion, and service discovery.
|
||||
|
||||
These benefits are not limited to new services.
|
||||
The community has developed wrappers for some proprietary and commercial services (such as sentiment140 and Meaning Cloud), so you can consult them as.
|
||||
Senpy comes with a built-in client in the client package.
|
||||
|
||||
|
||||
To achieve this goal, Senpy uses a Linked Data principled approach, based on the NIF (NLP Interchange Format) specification, and open vocabularies such as Marl and Onyx.
|
||||
You can learn more about this in :doc:`vocabularies`.
|
||||
|
||||
Check out :doc:`development` if you have developed an analysis algorithm (e.g. sentiment analysis) and you want to publish it as a service.
|
||||
|
85
docs/server-cli.rst
Normal file
@ -0,0 +1,85 @@
|
||||
Command line tool
|
||||
=================
|
||||
|
||||
Basic usage
|
||||
-----------
|
||||
|
||||
The senpy server is launched via the `senpy` command:
|
||||
|
||||
.. code:: text
|
||||
|
||||
usage: senpy [-h] [--level logging_level] [--log-format log_format] [--debug]
|
||||
[--no-default-plugins] [--host HOST] [--port PORT]
|
||||
[--plugins-folder PLUGINS_FOLDER] [--install]
|
||||
[--test] [--no-run] [--data-folder DATA_FOLDER]
|
||||
[--no-threaded] [--no-deps] [--version] [--allow-fail]
|
||||
|
||||
Run a Senpy server
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
--level logging_level, -l logging_level
|
||||
Logging level
|
||||
--log-format log_format
|
||||
Logging format
|
||||
--debug, -d Run the application in debug mode
|
||||
--no-default-plugins Do not load the default plugins
|
||||
--host HOST Use 0.0.0.0 to accept requests from any host.
|
||||
--port PORT, -p PORT Port to listen on.
|
||||
--plugins-folder PLUGINS_FOLDER, -f PLUGINS_FOLDER
|
||||
Where to look for plugins.
|
||||
--install, -i Install plugin dependencies before launching the server.
|
||||
--test, -t Test all plugins before launching the server
|
||||
--no-run Do not launch the server
|
||||
--data-folder DATA_FOLDER, --data DATA_FOLDER
|
||||
Where to look for data. It be set with the SENPY_DATA
|
||||
environment variable as well.
|
||||
--no-threaded Run the server without threading
|
||||
--no-deps, -n Skip installing dependencies
|
||||
--version, -v Output the senpy version and exit
|
||||
--allow-fail, --fail Do not exit if some plugins fail to activate
|
||||
|
||||
|
||||
When launched, the server will recursively look for plugins in the specified plugins folder (the current working directory by default).
|
||||
For every plugin found, it will download its dependencies, and try to activate it.
|
||||
The default server includes a playground and an endpoint with all plugins found.
|
||||
|
||||
Let's run senpy with the default plugins:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
senpy -f .
|
||||
|
||||
Now open your browser and go to `http://localhost:5000 <http://localhost:5000>`_, where you should be greeted by the senpy playground:
|
||||
|
||||
.. image:: senpy-playground.png
|
||||
:width: 100%
|
||||
:alt: Playground
|
||||
|
||||
The playground is a user-friendly way to test your plugins, but you can always use the service directly: `http://localhost:5000/api?input=hello <http://localhost:5000/api?input=hello>`_.
|
||||
|
||||
|
||||
By default, senpy will listen only on `127.0.0.1`.
|
||||
That means you can only access the API from your PC (i.e. localhost).
|
||||
You can listen on a different address using the `--host` flag (e.g., 0.0.0.0, to allow any computer to access it).
|
||||
The default port is 5000.
|
||||
You can change it with the `--port` flag.
|
||||
|
||||
For instance, to accept connections on port 6000 on any interface:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
senpy --host 0.0.0.0 --port 6000
|
||||
|
||||
For more options, see the `--help` page.
|
||||
|
||||
Sentiment analysis in the command line
|
||||
--------------------------------------
|
||||
|
||||
Although the main use of senpy is to publish services, the tool can also be used locally to analyze text in the command line.
|
||||
This is a short video demonstration:
|
||||
|
||||
.. image:: https://asciinema.org/a/9uwef1ghkjk062cw2t4mhzpyk.png
|
||||
:width: 100%
|
||||
:target: https://asciinema.org/a/9uwef1ghkjk062cw2t4mhzpyk
|
||||
:alt: CLI demo
|
@ -1,76 +0,0 @@
|
||||
Usage
|
||||
-----
|
||||
|
||||
The easiest and recommended way is to just use the command-line tool to load your plugins and launch the server.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
senpy
|
||||
|
||||
Or, alternatively:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
python -m senpy
|
||||
|
||||
|
||||
This will create a server with any modules found in the current path.
|
||||
|
||||
Useful command-line options
|
||||
===========================
|
||||
|
||||
In case you want to load modules, which are located in different folders under the root folder, use the next option.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
python -m senpy -f .
|
||||
|
||||
The default port used by senpy is 5000, but you can change it using the option `--port`.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
python -m senpy --port 8080
|
||||
|
||||
Also, the host can be changed where senpy is deployed. The default value is `127.0.0.1`.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
python -m senpy --host 0.0.0.0
|
||||
|
||||
For more options, see the `--help` page.
|
||||
|
||||
Alternatively, you can use the modules included in senpy to build your own application.
|
||||
|
||||
Senpy server
|
||||
============
|
||||
|
||||
Once the server is launched, there is a basic endpoint in the server, which provides a playground to use the plugins that have been loaded.
|
||||
|
||||
In case you want to know the different endpoints of the server, there is more information available in the NIF API section_.
|
||||
|
||||
CLI
|
||||
===
|
||||
|
||||
This video shows how to use senpy through command-line tool.
|
||||
|
||||
https://asciinema.org/a/9uwef1ghkjk062cw2t4mhzpyk
|
||||
|
||||
Request example in python
|
||||
=========================
|
||||
|
||||
This example shows how to make a request to the default plugin:
|
||||
|
||||
.. code:: python
|
||||
|
||||
from senpy.client import Client
|
||||
|
||||
c = Client('http://127.0.0.1:5000/api/')
|
||||
r = c.analyse('hello world')
|
||||
|
||||
for entry in r.entries:
|
||||
print('{} -> {}'.format(entry.text, entry.emotions))
|
||||
|
||||
|
||||
|
||||
.. _section: http://senpy.readthedocs.org/en/latest/api.html
|
||||
|
24
docs/vocabularies.rst
Normal file
@ -0,0 +1,24 @@
|
||||
Vocabularies and model
|
||||
======================
|
||||
|
||||
The model used in Senpy is based on NIF 2.0 [1], which defines a semantic format and API for improving interoperability among natural language processing services.
|
||||
|
||||
Senpy has been applied to sentiment and emotion analysis services using the following vocabularies:
|
||||
|
||||
* Marl [2,6], a vocabulary designed to annotate and describe subjetive opinions expressed on the web or in information systems.
|
||||
* Onyx [3,5], which is built one the same principles as Marl to annotate and describe emotions, and provides interoperability with Emotion Markup Language.
|
||||
|
||||
An overview of the vocabularies and their use can be found in [4].
|
||||
|
||||
|
||||
[1] Guidelines for developing NIF-based NLP services, Final Community Group Report 22 December 2015 Available at: https://www.w3.org/2015/09/bpmlod-reports/nif-based-nlp-webservices/
|
||||
|
||||
[2] Marl Ontology Specification, available at http://www.gsi.upm.es/ontologies/marl/
|
||||
|
||||
[3] Onyx Ontology Specification, available at http://www.gsi.upm.es/ontologies/onyx/
|
||||
|
||||
[4] Iglesias, C. A., Sánchez-Rada, J. F., Vulcu, G., & Buitelaar, P. (2017). Linked Data Models for Sentiment and Emotion Analysis in Social Networks. In Sentiment Analysis in Social Networks (pp. 49-69).
|
||||
|
||||
[5] Sánchez-Rada, J. F., & Iglesias, C. A. (2016). Onyx: A linked data approach to emotion representation. Information Processing & Management, 52(1), 99-114.
|
||||
|
||||
[6] Westerski, A., Iglesias Fernandez, C. A., & Tapia Rico, F. (2011). Linked opinions: Describing sentiments on the structured web of data.
|
23
example-plugins/README.md
Normal file
@ -0,0 +1,23 @@
|
||||
This is a collection of plugins that exemplify certain aspects of plugin development with senpy.
|
||||
|
||||
The first series of plugins are the `basic` ones.
|
||||
Their starting point is a classification function defined in `basic.py`.
|
||||
They all include testing and running them as a script will run all tests.
|
||||
In ascending order of customization, the plugins are:
|
||||
|
||||
* Basic is the simplest plugin of all. It leverages the `SentimentBox` Plugin class to create a plugin out of a classification method, and `MappingMixin` to convert the labels from (`pos`, `neg`) to (`marl:Positive`, `marl:Negative`
|
||||
* Basic_box is just like the previous one, but replaces the mixin with a custom function.
|
||||
* Basic_configurable is a version of `basic` with a configurable map of emojis for each sentiment.
|
||||
* Basic_parameterized like `basic_info`, but users set the map in each query (via `extra_parameters`).
|
||||
* Basic_analyse\_entry uses the more general `analyse_entry` method and adds the annotations individually.
|
||||
|
||||
|
||||
In rest of the plugins show advanced topics:
|
||||
|
||||
* mynoop: shows how to add a definition file with external requirements for a plugin. Doing this with a python-only module would require moving all imports of the requirements to their functions, which is considered bad practice.
|
||||
* Async: a barebones example of training a plugin and analyzing data in parallel.
|
||||
|
||||
All of the plugins in this folder include a set of test cases and they are periodically tested with the latest version of senpy.
|
||||
|
||||
Additioanlly, for an example of stand-alone plugin that can be tested and deployed with docker, take a look at: lab.gsi.upm.es/senpy/plugin-example
|
||||
bbm
|
53
example-plugins/async_plugin.py
Normal file
@ -0,0 +1,53 @@
|
||||
#
|
||||
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from senpy import AnalysisPlugin
|
||||
|
||||
import multiprocessing
|
||||
|
||||
|
||||
def _train(process_number):
|
||||
return process_number
|
||||
|
||||
|
||||
class Async(AnalysisPlugin):
|
||||
'''An example of an asynchronous module'''
|
||||
author = '@balkian'
|
||||
version = '0.2'
|
||||
sync = False
|
||||
|
||||
def _do_async(self, num_processes):
|
||||
pool = multiprocessing.Pool(processes=num_processes)
|
||||
values = sorted(pool.map(_train, range(num_processes)))
|
||||
|
||||
return values
|
||||
|
||||
def activate(self):
|
||||
self.value = self._do_async(4)
|
||||
|
||||
def analyse_entry(self, entry, params):
|
||||
values = self._do_async(2)
|
||||
entry.async_values = values
|
||||
yield entry
|
||||
|
||||
test_cases = [
|
||||
{
|
||||
'input': 'any',
|
||||
'expected': {
|
||||
'async_values': [0, 1]
|
||||
}
|
||||
}
|
||||
]
|
41
example-plugins/basic.py
Normal file
@ -0,0 +1,41 @@
|
||||
#!/usr/local/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
emoticons = {
|
||||
'pos': [':)', ':]', '=)', ':D'],
|
||||
'neg': [':(', ':[', '=(']
|
||||
}
|
||||
|
||||
emojis = {
|
||||
'pos': [u'😁', u'😂', u'😃', u'😄', u'😆', u'😅', u'😄', u'😍'],
|
||||
'neg': [u'😢', u'😡', u'😠', u'😞', u'😖', u'😔', u'😓', u'😒']
|
||||
}
|
||||
|
||||
|
||||
def get_polarity(text, dictionaries=[emoticons, emojis]):
|
||||
polarity = 'marl:Neutral'
|
||||
print('Input for get_polarity', text)
|
||||
for dictionary in dictionaries:
|
||||
for label, values in dictionary.items():
|
||||
for emoticon in values:
|
||||
if emoticon and emoticon in text:
|
||||
polarity = label
|
||||
break
|
||||
print('Polarity', polarity)
|
||||
return polarity
|
62
example-plugins/basic_analyse_entry_plugin.py
Normal file
@ -0,0 +1,62 @@
|
||||
#!/usr/local/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from senpy import easy_test, models, plugins
|
||||
|
||||
import basic
|
||||
|
||||
|
||||
class BasicAnalyseEntry(plugins.SentimentPlugin):
|
||||
'''Equivalent to Basic, implementing the analyse_entry method'''
|
||||
|
||||
author = '@balkian'
|
||||
version = '0.1'
|
||||
|
||||
mappings = {
|
||||
'pos': 'marl:Positive',
|
||||
'neg': 'marl:Negative',
|
||||
'default': 'marl:Neutral'
|
||||
}
|
||||
|
||||
def analyse_entry(self, entry, activity):
|
||||
polarity = basic.get_polarity(entry.text)
|
||||
|
||||
polarity = self.mappings.get(polarity, self.mappings['default'])
|
||||
|
||||
s = models.Sentiment(marl__hasPolarity=polarity)
|
||||
s.prov(activity)
|
||||
entry.sentiments.append(s)
|
||||
yield entry
|
||||
|
||||
test_cases = [{
|
||||
'input': 'Hello :)',
|
||||
'polarity': 'marl:Positive'
|
||||
}, {
|
||||
'input': 'So sad :(',
|
||||
'polarity': 'marl:Negative'
|
||||
}, {
|
||||
'input': 'Yay! Emojis 😁',
|
||||
'polarity': 'marl:Positive'
|
||||
}, {
|
||||
'input': 'But no emoticons 😢',
|
||||
'polarity': 'marl:Negative'
|
||||
}]
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
easy_test()
|
54
example-plugins/basic_box_plugin.py
Normal file
@ -0,0 +1,54 @@
|
||||
#!/usr/local/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from senpy import easy_test, SentimentBox
|
||||
|
||||
import basic
|
||||
|
||||
|
||||
class BasicBox(SentimentBox):
|
||||
''' A modified version of Basic that also does converts annotations manually'''
|
||||
|
||||
author = '@balkian'
|
||||
version = '0.1'
|
||||
|
||||
def predict_one(self, features, **kwargs):
|
||||
output = basic.get_polarity(features[0])
|
||||
if output == 'pos':
|
||||
return [1, 0, 0]
|
||||
if output == 'neg':
|
||||
return [0, 0, 1]
|
||||
return [0, 1, 0]
|
||||
|
||||
test_cases = [{
|
||||
'input': 'Hello :)',
|
||||
'polarity': 'marl:Positive'
|
||||
}, {
|
||||
'input': 'So sad :(',
|
||||
'polarity': 'marl:Negative'
|
||||
}, {
|
||||
'input': 'Yay! Emojis 😁',
|
||||
'polarity': 'marl:Positive'
|
||||
}, {
|
||||
'input': 'But no emoticons 😢',
|
||||
'polarity': 'marl:Negative'
|
||||
}]
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
easy_test()
|
55
example-plugins/basic_plugin.py
Normal file
@ -0,0 +1,55 @@
|
||||
#!/usr/local/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
from senpy import easy_test, SentimentBox
|
||||
|
||||
import basic
|
||||
|
||||
|
||||
class Basic(SentimentBox):
|
||||
'''Provides sentiment annotation using a lexicon'''
|
||||
|
||||
author = '@balkian'
|
||||
version = '0.1'
|
||||
|
||||
def predict_one(self, features, **kwargs):
|
||||
output = basic.get_polarity(features[0])
|
||||
if output == 'pos':
|
||||
return [1, 0, 0]
|
||||
if output == 'neu':
|
||||
return [0, 1, 0]
|
||||
return [0, 0, 1]
|
||||
|
||||
test_cases = [{
|
||||
'input': u'Hello :)',
|
||||
'polarity': 'marl:Positive'
|
||||
}, {
|
||||
'input': u'So sad :(',
|
||||
'polarity': 'marl:Negative'
|
||||
}, {
|
||||
'input': u'Yay! Emojis 😁',
|
||||
'polarity': 'marl:Positive'
|
||||
}, {
|
||||
'input': u'But no emoticons 😢',
|
||||
'polarity': 'marl:Negative'
|
||||
}]
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
easy_test()
|
121
example-plugins/configurable_plugin.py
Normal file
@ -0,0 +1,121 @@
|
||||
#!/usr/local/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
from senpy import easy_test, models, plugins
|
||||
|
||||
import basic
|
||||
|
||||
|
||||
class Dictionary(plugins.SentimentPlugin):
|
||||
'''Sentiment annotation using a configurable lexicon'''
|
||||
|
||||
author = '@balkian'
|
||||
version = '0.2'
|
||||
|
||||
dictionaries = [basic.emojis, basic.emoticons]
|
||||
|
||||
mappings = {'pos': 'marl:Positive', 'neg': 'marl:Negative'}
|
||||
|
||||
def analyse_entry(self, entry, *args, **kwargs):
|
||||
polarity = basic.get_polarity(entry.text, self.dictionaries)
|
||||
if polarity in self.mappings:
|
||||
polarity = self.mappings[polarity]
|
||||
|
||||
s = models.Sentiment(marl__hasPolarity=polarity)
|
||||
s.prov(self)
|
||||
entry.sentiments.append(s)
|
||||
yield entry
|
||||
|
||||
test_cases = [{
|
||||
'input': 'Hello :)',
|
||||
'polarity': 'marl:Positive'
|
||||
}, {
|
||||
'input': 'So sad :(',
|
||||
'polarity': 'marl:Negative'
|
||||
}, {
|
||||
'input': 'Yay! Emojis 😁',
|
||||
'polarity': 'marl:Positive'
|
||||
}, {
|
||||
'input': 'But no emoticons 😢',
|
||||
'polarity': 'marl:Negative'
|
||||
}]
|
||||
|
||||
|
||||
class EmojiOnly(Dictionary):
|
||||
'''Sentiment annotation with a basic lexicon of emojis'''
|
||||
dictionaries = [basic.emojis]
|
||||
|
||||
test_cases = [{
|
||||
'input': 'Hello :)',
|
||||
'polarity': 'marl:Neutral'
|
||||
}, {
|
||||
'input': 'So sad :(',
|
||||
'polarity': 'marl:Neutral'
|
||||
}, {
|
||||
'input': 'Yay! Emojis 😁',
|
||||
'polarity': 'marl:Positive'
|
||||
}, {
|
||||
'input': 'But no emoticons 😢',
|
||||
'polarity': 'marl:Negative'
|
||||
}]
|
||||
|
||||
|
||||
class EmoticonsOnly(Dictionary):
|
||||
'''Sentiment annotation with a basic lexicon of emoticons'''
|
||||
dictionaries = [basic.emoticons]
|
||||
|
||||
test_cases = [{
|
||||
'input': 'Hello :)',
|
||||
'polarity': 'marl:Positive'
|
||||
}, {
|
||||
'input': 'So sad :(',
|
||||
'polarity': 'marl:Negative'
|
||||
}, {
|
||||
'input': 'Yay! Emojis 😁',
|
||||
'polarity': 'marl:Neutral'
|
||||
}, {
|
||||
'input': 'But no emoticons 😢',
|
||||
'polarity': 'marl:Neutral'
|
||||
}]
|
||||
|
||||
|
||||
class Salutes(Dictionary):
|
||||
'''Sentiment annotation with a custom lexicon, for illustration purposes'''
|
||||
dictionaries = [{
|
||||
'marl:Positive': ['Hello', '!'],
|
||||
'marl:Negative': ['Good bye', ]
|
||||
}]
|
||||
|
||||
test_cases = [{
|
||||
'input': 'Hello :)',
|
||||
'polarity': 'marl:Positive'
|
||||
}, {
|
||||
'input': 'Good bye :(',
|
||||
'polarity': 'marl:Negative'
|
||||
}, {
|
||||
'input': 'Yay! Emojis 😁',
|
||||
'polarity': 'marl:Positive'
|
||||
}, {
|
||||
'input': 'But no emoticons 😢',
|
||||
'polarity': 'marl:Neutral'
|
||||
}]
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
easy_test()
|
41
example-plugins/dummy_plugin.py
Normal file
@ -0,0 +1,41 @@
|
||||
#
|
||||
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from senpy import AnalysisPlugin, easy
|
||||
|
||||
|
||||
class Dummy(AnalysisPlugin):
|
||||
'''This is a dummy self-contained plugin'''
|
||||
author = '@balkian'
|
||||
version = '0.1'
|
||||
|
||||
def analyse_entry(self, entry, params):
|
||||
entry['nif:isString'] = entry['nif:isString'][::-1]
|
||||
entry.reversed = entry.get('reversed', 0) + 1
|
||||
yield entry
|
||||
|
||||
test_cases = [{
|
||||
'entry': {
|
||||
'nif:isString': 'Hello',
|
||||
},
|
||||
'expected': {
|
||||
'nif:isString': 'olleH'
|
||||
}
|
||||
}]
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
easy()
|
56
example-plugins/dummy_required_plugin.py
Normal file
@ -0,0 +1,56 @@
|
||||
#
|
||||
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from senpy import AnalysisPlugin, easy
|
||||
|
||||
|
||||
class DummyRequired(AnalysisPlugin):
|
||||
'''This is a dummy self-contained plugin'''
|
||||
author = '@balkian'
|
||||
version = '0.1'
|
||||
extra_params = {
|
||||
'example': {
|
||||
'description': 'An example parameter',
|
||||
'required': True,
|
||||
'options': ['a', 'b']
|
||||
}
|
||||
}
|
||||
|
||||
def analyse_entry(self, entry, params):
|
||||
entry['nif:isString'] = entry['nif:isString'][::-1]
|
||||
entry.reversed = entry.get('reversed', 0) + 1
|
||||
yield entry
|
||||
|
||||
test_cases = [{
|
||||
'entry': {
|
||||
'nif:isString': 'Hello',
|
||||
},
|
||||
'should_fail': True
|
||||
}, {
|
||||
'entry': {
|
||||
'nif:isString': 'Hello',
|
||||
},
|
||||
'params': {
|
||||
'example': 'a'
|
||||
},
|
||||
'expected': {
|
||||
'nif:isString': 'olleH'
|
||||
}
|
||||
}]
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
easy()
|
49
example-plugins/emorand_plugin.py
Normal file
@ -0,0 +1,49 @@
|
||||
#
|
||||
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import random
|
||||
|
||||
from senpy.plugins import EmotionPlugin
|
||||
from senpy.models import EmotionSet, Emotion, Entry
|
||||
|
||||
|
||||
class EmoRand(EmotionPlugin):
|
||||
'''A sample plugin that returns a random emotion annotation'''
|
||||
name = 'emotion-random'
|
||||
author = '@balkian'
|
||||
version = '0.1'
|
||||
url = "https://github.com/gsi-upm/senpy-plugins-community"
|
||||
onyx__usesEmotionModel = "emoml:big6"
|
||||
|
||||
def analyse_entry(self, entry, activity):
|
||||
category = "emoml:big6happiness"
|
||||
number = max(-1, min(1, random.gauss(0, 0.5)))
|
||||
if number > 0:
|
||||
category = "emoml:big6anger"
|
||||
emotionSet = EmotionSet()
|
||||
emotion = Emotion({"onyx:hasEmotionCategory": category})
|
||||
emotionSet.onyx__hasEmotion.append(emotion)
|
||||
emotionSet.prov(activity)
|
||||
entry.emotions.append(emotionSet)
|
||||
yield entry
|
||||
|
||||
def test(self):
|
||||
params = dict()
|
||||
results = list()
|
||||
for i in range(100):
|
||||
res = next(self.analyse_entry(Entry(nif__isString="Hello"), self.activity(params)))
|
||||
res.validate()
|
||||
results.append(res.emotions[0]['onyx:hasEmotion'][0]['onyx:hasEmotionCategory'])
|
40
example-plugins/mynoop.py
Normal file
@ -0,0 +1,40 @@
|
||||
#
|
||||
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import noop
|
||||
from senpy.plugins import SentimentPlugin
|
||||
|
||||
|
||||
class NoOp(SentimentPlugin):
|
||||
'''This plugin does nothing. Literally nothing.'''
|
||||
|
||||
version = 0
|
||||
|
||||
def analyse_entry(self, entry, *args, **kwargs):
|
||||
yield entry
|
||||
|
||||
def test(self):
|
||||
print(dir(noop))
|
||||
super(NoOp, self).test()
|
||||
|
||||
test_cases = [{
|
||||
'entry': {
|
||||
'nif:isString': 'hello'
|
||||
},
|
||||
'expected': {
|
||||
'nif:isString': 'hello'
|
||||
}
|
||||
}]
|
4
example-plugins/mynoop.senpy
Normal file
@ -0,0 +1,4 @@
|
||||
module: mynoop
|
||||
optional: true
|
||||
requirements:
|
||||
- noop
|
80
example-plugins/parameterized_plugin.py
Normal file
@ -0,0 +1,80 @@
|
||||
#!/usr/local/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
from senpy import easy_test, models, plugins
|
||||
|
||||
import basic
|
||||
|
||||
|
||||
class ParameterizedDictionary(plugins.SentimentPlugin):
|
||||
'''This is a basic self-contained plugin'''
|
||||
|
||||
author = '@balkian'
|
||||
version = '0.2'
|
||||
|
||||
extra_params = {
|
||||
'positive-words': {
|
||||
'description': 'Comma-separated list of words that are considered positive',
|
||||
'aliases': ['positive'],
|
||||
'required': True
|
||||
},
|
||||
'negative-words': {
|
||||
'description': 'Comma-separated list of words that are considered negative',
|
||||
'aliases': ['negative'],
|
||||
'required': False
|
||||
}
|
||||
}
|
||||
|
||||
def analyse_entry(self, entry, activity):
|
||||
params = activity.params
|
||||
positive_words = params['positive-words'].split(',')
|
||||
negative_words = params['negative-words'].split(',')
|
||||
dictionary = {
|
||||
'marl:Positive': positive_words,
|
||||
'marl:Negative': negative_words,
|
||||
}
|
||||
polarity = basic.get_polarity(entry.text, [dictionary])
|
||||
|
||||
s = models.Sentiment(marl__hasPolarity=polarity)
|
||||
s.prov(activity)
|
||||
entry.sentiments.append(s)
|
||||
yield entry
|
||||
|
||||
test_cases = [
|
||||
{
|
||||
'input': 'Hello :)',
|
||||
'polarity': 'marl:Positive',
|
||||
'parameters': {
|
||||
'positive': "Hello,:)",
|
||||
'negative': "sad,:()"
|
||||
}
|
||||
},
|
||||
{
|
||||
'input': 'Hello :)',
|
||||
'polarity': 'marl:Negative',
|
||||
'parameters': {
|
||||
'positive': "",
|
||||
'negative': "Hello"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
easy_test()
|
54
example-plugins/rand_plugin.py
Normal file
@ -0,0 +1,54 @@
|
||||
#
|
||||
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import random
|
||||
from senpy import SentimentPlugin, Sentiment, Entry
|
||||
|
||||
|
||||
class RandSent(SentimentPlugin):
|
||||
'''A sample plugin that returns a random sentiment annotation'''
|
||||
name = 'sentiment-random'
|
||||
author = "@balkian"
|
||||
version = '0.1'
|
||||
url = "https://github.com/gsi-upm/senpy-plugins-community"
|
||||
marl__maxPolarityValue = '1'
|
||||
marl__minPolarityValue = "-1"
|
||||
|
||||
def analyse_entry(self, entry, activity):
|
||||
polarity_value = max(-1, min(1, random.gauss(0.2, 0.2)))
|
||||
polarity = "marl:Neutral"
|
||||
if polarity_value > 0:
|
||||
polarity = "marl:Positive"
|
||||
elif polarity_value < 0:
|
||||
polarity = "marl:Negative"
|
||||
sentiment = Sentiment(marl__hasPolarity=polarity,
|
||||
marl__polarityValue=polarity_value)
|
||||
sentiment.prov(activity)
|
||||
entry.sentiments.append(sentiment)
|
||||
yield entry
|
||||
|
||||
def test(self):
|
||||
'''Run several random analyses.'''
|
||||
params = dict()
|
||||
results = list()
|
||||
for i in range(50):
|
||||
activity = self.activity(params)
|
||||
res = next(self.analyse_entry(Entry(nif__isString="Hello"),
|
||||
activity))
|
||||
res.validate()
|
||||
results.append(res.sentiments[0]['marl:hasPolarity'])
|
||||
assert 'marl:Positive' in results
|
||||
assert 'marl:Negative' in results
|
49
example-plugins/sklearn/mydata.py
Normal file
@ -0,0 +1,49 @@
|
||||
#
|
||||
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
'''
|
||||
Create a dummy dataset.
|
||||
Messages with a happy emoticon are labelled positive
|
||||
Messages with a sad emoticon are labelled negative
|
||||
'''
|
||||
import random
|
||||
|
||||
dataset = []
|
||||
|
||||
vocabulary = ['hello', 'world', 'senpy', 'cool', 'goodbye', 'random', 'text']
|
||||
|
||||
emojimap = {
|
||||
1: [':)', ],
|
||||
-1: [':(', ]
|
||||
}
|
||||
|
||||
|
||||
for tag, values in emojimap.items():
|
||||
for i in range(1000):
|
||||
msg = ''
|
||||
for j in range(3):
|
||||
msg += random.choice(vocabulary)
|
||||
msg += " "
|
||||
msg += random.choice(values)
|
||||
dataset.append([msg, tag])
|
||||
|
||||
|
||||
text = []
|
||||
labels = []
|
||||
|
||||
for i in dataset:
|
||||
text.append(i[0])
|
||||
labels.append(i[1])
|
46
example-plugins/sklearn/mypipeline.py
Normal file
@ -0,0 +1,46 @@
|
||||
#
|
||||
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from sklearn.pipeline import Pipeline
|
||||
from sklearn.feature_extraction.text import CountVectorizer
|
||||
from sklearn.model_selection import train_test_split
|
||||
|
||||
from mydata import text, labels
|
||||
|
||||
X_train, X_test, y_train, y_test = train_test_split(text, labels, test_size=0.12, random_state=42)
|
||||
|
||||
from sklearn.naive_bayes import MultinomialNB
|
||||
|
||||
|
||||
count_vec = CountVectorizer(tokenizer=lambda x: x.split())
|
||||
clf3 = MultinomialNB()
|
||||
pipeline = Pipeline([('cv', count_vec),
|
||||
('clf', clf3)])
|
||||
|
||||
pipeline.fit(X_train, y_train)
|
||||
print('Feature names: {}'.format(count_vec.get_feature_names_out()))
|
||||
print('Class count: {}'.format(clf3.class_count_))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print('--Results--')
|
||||
tests = [
|
||||
(['The sentiment for senpy should be positive :)', ], 1),
|
||||
(['The sentiment for anything else should be negative :()', ], -1)
|
||||
]
|
||||
for features, expected in tests:
|
||||
result = pipeline.predict(features)
|
||||
print('Input: {}\nExpected: {}\nGot: {}'.format(features[0], expected, result))
|
48
example-plugins/sklearn/pipeline_plugin.py
Normal file
@ -0,0 +1,48 @@
|
||||
#
|
||||
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from senpy import SentimentBox, easy_test
|
||||
|
||||
from mypipeline import pipeline
|
||||
|
||||
|
||||
class PipelineSentiment(SentimentBox):
|
||||
'''This is a pipeline plugin that wraps a classifier defined in another module
|
||||
(mypipeline).'''
|
||||
author = '@balkian'
|
||||
version = 0.1
|
||||
maxPolarityValue = 1
|
||||
minPolarityValue = -1
|
||||
|
||||
def predict_one(self, features, **kwargs):
|
||||
if pipeline.predict(features) > 0:
|
||||
return [1, 0, 0]
|
||||
return [0, 0, 1]
|
||||
|
||||
test_cases = [
|
||||
{
|
||||
'input': 'The sentiment for senpy should be positive :)',
|
||||
'polarity': 'marl:Positive'
|
||||
},
|
||||
{
|
||||
'input': 'The sentiment for senpy should be negative :(',
|
||||
'polarity': 'marl:Negative'
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
easy_test()
|
43
example-plugins/sleep_plugin.py
Normal file
@ -0,0 +1,43 @@
|
||||
#
|
||||
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from senpy.plugins import AnalysisPlugin
|
||||
from time import sleep
|
||||
|
||||
|
||||
class Sleep(AnalysisPlugin):
|
||||
'''Dummy plugin to test async'''
|
||||
author = "@balkian"
|
||||
version = "0.2"
|
||||
timeout = 0.05
|
||||
extra_params = {
|
||||
"timeout": {
|
||||
"@id": "timeout_sleep",
|
||||
"aliases": ["timeout", "to"],
|
||||
"required": False,
|
||||
"default": 0
|
||||
}
|
||||
}
|
||||
|
||||
def activate(self, *args, **kwargs):
|
||||
sleep(self.timeout)
|
||||
|
||||
def analyse_entry(self, entry, params):
|
||||
sleep(float(params.get("timeout", self.timeout)))
|
||||
yield entry
|
||||
|
||||
def test(self):
|
||||
pass
|
6
extra-requirements.txt
Normal file
@ -0,0 +1,6 @@
|
||||
gsitk>0.1.9.1
|
||||
flask_cors==3.0.10
|
||||
Pattern==3.6
|
||||
lxml==4.9.3
|
||||
pandas==2.1.1
|
||||
textblob==0.17.1
|
7
k8s/README.md
Normal file
@ -0,0 +1,7 @@
|
||||
Deploy senpy to a kubernetes cluster.
|
||||
|
||||
Usage:
|
||||
|
||||
```
|
||||
kubectl apply -f . -n senpy
|
||||
```
|
36
k8s/senpy-deployment.yaml.tmpl
Normal file
@ -0,0 +1,36 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: senpy-latest
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: senpy-latest
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: senpy-latest
|
||||
role: senpy-latest
|
||||
spec:
|
||||
containers:
|
||||
- name: senpy-latest
|
||||
image: $IMAGEWTAG
|
||||
imagePullPolicy: Always
|
||||
args: ["--enable-cors"]
|
||||
resources:
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "1000m"
|
||||
ports:
|
||||
- name: web
|
||||
containerPort: 5000
|
||||
volumeMounts:
|
||||
- name: senpy-data
|
||||
mountPath: /senpy-data
|
||||
subPath: data
|
||||
volumes:
|
||||
- name: senpy-data
|
||||
persistentVolumeClaim:
|
||||
claimName: pvc-senpy
|
29
k8s/senpy-ingress.yaml
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: senpy-ingress
|
||||
labels:
|
||||
app: senpy-latest
|
||||
spec:
|
||||
rules:
|
||||
- host: senpy-latest.gsi.upm.es
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: senpy-latest
|
||||
port:
|
||||
number: 5000
|
||||
- host: latest.senpy.gsi.upm.es
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: senpy-latest
|
||||
port:
|
||||
number: 5000
|
14
k8s/senpy-svc.yaml
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: senpy-latest
|
||||
labels:
|
||||
app: senpy-latest
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 5000
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: senpy-latest
|
@ -1,11 +1,15 @@
|
||||
Flask>=0.10.1
|
||||
requests>=2.4.1
|
||||
gevent>=1.1rc4
|
||||
tornado>=4.4.3
|
||||
PyLD>=0.6.5
|
||||
six
|
||||
nltk
|
||||
future
|
||||
jsonschema
|
||||
jsonref
|
||||
PyYAML
|
||||
rdflib
|
||||
rdflib-jsonld
|
||||
rdflib==6.1.1
|
||||
numpy
|
||||
scipy
|
||||
scikit-learn>=0.20
|
||||
responses
|
||||
jmespath
|
||||
|
@ -1,7 +1,7 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014 J. Fernando Sánchez Rada - Grupo de Sistemas Inteligentes
|
||||
# DIT, UPM
|
||||
#
|
||||
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -14,10 +14,10 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
"""
|
||||
Sentiment analysis server in Python
|
||||
"""
|
||||
from __future__ import print_function
|
||||
from .version import __version__
|
||||
|
||||
import logging
|
||||
@ -26,4 +26,10 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
logger.info('Using senpy version: {}'.format(__version__))
|
||||
|
||||
from .utils import easy, easy_load, easy_test # noqa: F401
|
||||
|
||||
from .models import * # noqa: F401,F403
|
||||
from .plugins import * # noqa: F401,F403
|
||||
from .extensions import * # noqa: F401,F403
|
||||
|
||||
__all__ = ['api', 'blueprints', 'cli', 'extensions', 'models', 'plugins']
|
||||
|
@ -1,7 +1,6 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014 J. Fernando Sánchez Rada - Grupo de Sistemas Inteligentes
|
||||
# DIT, UPM
|
||||
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -22,35 +21,19 @@ the server.
|
||||
|
||||
from flask import Flask
|
||||
from senpy.extensions import Senpy
|
||||
from gevent.wsgi import WSGIServer
|
||||
from gevent.monkey import patch_all
|
||||
from senpy.utils import easy_test
|
||||
from senpy.plugins import list_dependencies
|
||||
from senpy import config
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
import senpy
|
||||
|
||||
patch_all(thread=False)
|
||||
|
||||
SERVER_PORT = os.environ.get("PORT", 5000)
|
||||
|
||||
|
||||
def info(type, value, tb):
|
||||
if hasattr(sys, 'ps1') or not sys.stderr.isatty():
|
||||
# we are in interactive mode or we don't have a tty-like
|
||||
# device, so we call the default hook
|
||||
sys.__excepthook__(type, value, tb)
|
||||
else:
|
||||
import traceback
|
||||
import pdb
|
||||
# we are NOT in interactive mode, print the exception...
|
||||
traceback.print_exception(type, value, tb)
|
||||
print
|
||||
# ...then start the debugger in post-mortem mode.
|
||||
# pdb.pm() # deprecated
|
||||
pdb.post_mortem(tb) # more "modern"
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='Run a Senpy server')
|
||||
parser.add_argument(
|
||||
@ -60,6 +43,17 @@ def main():
|
||||
type=str,
|
||||
default="INFO",
|
||||
help='Logging level')
|
||||
parser.add_argument(
|
||||
'--no-proxy-fix',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='Do not assume senpy will be running behind a proxy (e.g., nginx)')
|
||||
parser.add_argument(
|
||||
'--log-format',
|
||||
metavar='log_format',
|
||||
type=str,
|
||||
default='%(asctime)s %(levelname)-10s %(name)-30s \t %(message)s',
|
||||
help='Logging format')
|
||||
parser.add_argument(
|
||||
'--debug',
|
||||
'-d',
|
||||
@ -67,10 +61,10 @@ def main():
|
||||
default=False,
|
||||
help='Run the application in debug mode')
|
||||
parser.add_argument(
|
||||
'--default-plugins',
|
||||
'--no-default-plugins',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='Load the default plugins')
|
||||
help='Do not load the default plugins')
|
||||
parser.add_argument(
|
||||
'--host',
|
||||
type=str,
|
||||
@ -86,36 +80,164 @@ def main():
|
||||
'--plugins-folder',
|
||||
'-f',
|
||||
type=str,
|
||||
default='plugins',
|
||||
action='append',
|
||||
help='Where to look for plugins.')
|
||||
parser.add_argument(
|
||||
'--only-install',
|
||||
'--install',
|
||||
'-i',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='Do not run a server, only install plugin dependencies')
|
||||
help='Install plugin dependencies before running.')
|
||||
parser.add_argument(
|
||||
'--dependencies',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='List plugin dependencies')
|
||||
parser.add_argument(
|
||||
'--strict',
|
||||
action='store_true',
|
||||
default=config.strict,
|
||||
help='Fail if optional plugins cannot be loaded.')
|
||||
parser.add_argument(
|
||||
'--test',
|
||||
'-t',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='Test all plugins before launching the server')
|
||||
parser.add_argument(
|
||||
'--no-run',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='Do not launch the server.')
|
||||
parser.add_argument(
|
||||
'--data-folder',
|
||||
'--data',
|
||||
type=str,
|
||||
default=None,
|
||||
help='Where to look for data. It be set with the SENPY_DATA environment variable as well.')
|
||||
parser.add_argument(
|
||||
'--no-threaded',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='Run a single-threaded server')
|
||||
parser.add_argument(
|
||||
'--no-deps',
|
||||
'-n',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='Skip installing dependencies')
|
||||
parser.add_argument(
|
||||
'--version',
|
||||
'-v',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='Output the senpy version and exit')
|
||||
parser.add_argument(
|
||||
'--allow-fail',
|
||||
'--fail',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='Do not exit if some plugins fail to activate')
|
||||
parser.add_argument(
|
||||
'--enable-cors',
|
||||
'--cors',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='Enable CORS for all domains (requires flask-cors to be installed)')
|
||||
args = parser.parse_args()
|
||||
logging.basicConfig()
|
||||
print('Senpy version {}'.format(senpy.__version__))
|
||||
print(sys.version)
|
||||
if args.version:
|
||||
exit(1)
|
||||
rl = logging.getLogger()
|
||||
rl.setLevel(getattr(logging, args.level))
|
||||
logger_handler = rl.handlers[0]
|
||||
|
||||
# First, generic formatter:
|
||||
logger_handler.setFormatter(logging.Formatter(args.log_format))
|
||||
|
||||
app = Flask(__name__)
|
||||
app.debug = args.debug
|
||||
if args.debug:
|
||||
sys.excepthook = info
|
||||
sp = Senpy(app, args.plugins_folder, default_plugins=args.default_plugins)
|
||||
if args.only_install:
|
||||
|
||||
sp = Senpy(app,
|
||||
plugin_folder=None,
|
||||
default_plugins=not args.no_default_plugins,
|
||||
install=args.install,
|
||||
strict=args.strict,
|
||||
data_folder=args.data_folder)
|
||||
folders = list(args.plugins_folder) if args.plugins_folder else []
|
||||
if not folders:
|
||||
folders.append(".")
|
||||
for p in folders:
|
||||
sp.add_folder(p)
|
||||
|
||||
plugins = sp.plugins(plugin_type=None, is_activated=False)
|
||||
maxname = max(len(x.name) for x in plugins)
|
||||
maxversion = max(len(str(x.version)) for x in plugins)
|
||||
print('Found {} plugins:'.format(len(plugins)))
|
||||
for plugin in plugins:
|
||||
import inspect
|
||||
fpath = inspect.getfile(plugin.__class__)
|
||||
print('\t{: <{maxname}} @ {: <{maxversion}} -> {}'.format(plugin.name,
|
||||
plugin.version,
|
||||
fpath,
|
||||
maxname=maxname,
|
||||
maxversion=maxversion))
|
||||
if args.dependencies:
|
||||
print('Listing dependencies')
|
||||
missing = []
|
||||
installed = []
|
||||
for plug in sp.plugins(is_activated=False):
|
||||
inst, miss, nltkres = list_dependencies(plug)
|
||||
if not any([inst, miss, nltkres]):
|
||||
continue
|
||||
print(f'Plugin: {plug.id}')
|
||||
for m in miss:
|
||||
missing.append(f'{m} # {plug.id}')
|
||||
for i in inst:
|
||||
installed.append(f'{i} # {plug.id}')
|
||||
if installed:
|
||||
print('Installed packages:')
|
||||
for i in installed:
|
||||
print(f'\t{i}')
|
||||
if missing:
|
||||
print('Missing packages:')
|
||||
for m in missing:
|
||||
print(f'\t{m}')
|
||||
|
||||
if args.install:
|
||||
sp.install_deps()
|
||||
|
||||
if args.test:
|
||||
sp.activate_all(sync=True)
|
||||
easy_test(sp.plugins(is_activated=True), debug=args.debug)
|
||||
|
||||
if args.no_run:
|
||||
return
|
||||
sp.activate_all()
|
||||
http_server = WSGIServer((args.host, args.port), app)
|
||||
|
||||
sp.activate_all(sync=True)
|
||||
if sp.strict:
|
||||
inactive = sp.plugins(is_activated=False)
|
||||
assert not inactive
|
||||
|
||||
print('Senpy version {}'.format(senpy.__version__))
|
||||
print('Server running on port %s:%d. Ctrl+C to quit' % (args.host,
|
||||
args.port))
|
||||
if args.enable_cors:
|
||||
from flask_cors import CORS
|
||||
CORS(app)
|
||||
|
||||
if not args.no_proxy_fix:
|
||||
from werkzeug.middleware.proxy_fix import ProxyFix
|
||||
app.wsgi_app = ProxyFix(app.wsgi_app)
|
||||
|
||||
try:
|
||||
print('Senpy version {}'.format(senpy.__version__))
|
||||
print('Server running on port %s:%d. Ctrl+C to quit' % (args.host,
|
||||
args.port))
|
||||
http_server.serve_forever()
|
||||
app.run(args.host,
|
||||
args.port,
|
||||
threaded=not args.no_threaded,
|
||||
debug=app.debug)
|
||||
except KeyboardInterrupt:
|
||||
print('Bye!')
|
||||
http_server.stop()
|
||||
sp.deactivate_all()
|
||||
|
||||
|
||||
|
399
senpy/api.py
@ -1,51 +1,208 @@
|
||||
#
|
||||
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from future.utils import iteritems
|
||||
from .models import Error
|
||||
from .models import Error, Results, Entry, from_string
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
boolean = [True, False]
|
||||
|
||||
processors = {
|
||||
'string_to_tuple': lambda p: p if isinstance(p, (tuple, list)) else tuple(p.split(','))
|
||||
}
|
||||
|
||||
API_PARAMS = {
|
||||
"algorithm": {
|
||||
"aliases": ["algorithm", "a", "algo"],
|
||||
"required": False,
|
||||
},
|
||||
"outformat": {
|
||||
"@id": "outformat",
|
||||
"aliases": ["outformat", "o"],
|
||||
"default": "json-ld",
|
||||
"aliases": ["algorithms", "a", "algo"],
|
||||
"required": True,
|
||||
"options": ["json-ld", "turtle"],
|
||||
"default": 'default',
|
||||
"processor": 'string_to_tuple',
|
||||
"description": ("Algorithms that will be used to process the request."
|
||||
"It may be a list of comma-separated names."),
|
||||
},
|
||||
"expanded-jsonld": {
|
||||
"@id": "expanded-jsonld",
|
||||
"aliases": ["expanded", "expanded-jsonld"],
|
||||
"description": "use JSON-LD expansion to get full URIs",
|
||||
"aliases": ["expanded", "expanded_jsonld"],
|
||||
"options": boolean,
|
||||
"required": True,
|
||||
"default": 0
|
||||
"default": False
|
||||
},
|
||||
"emotionModel": {
|
||||
"with-parameters": {
|
||||
"aliases": ['withparameters',
|
||||
'with_parameters'],
|
||||
"description": "include initial parameters in the response",
|
||||
"options": boolean,
|
||||
"default": False,
|
||||
"required": True
|
||||
},
|
||||
"outformat": {
|
||||
"@id": "outformat",
|
||||
"aliases": ["o"],
|
||||
"default": "json-ld",
|
||||
"description": """The data can be semantically formatted (JSON-LD, turtle or n-triples),
|
||||
given as a list of comma-separated fields (see the fields option) or constructed from a Jinja2
|
||||
template (see the template option).""",
|
||||
"required": True,
|
||||
"options": ["json-ld", "turtle", "ntriples"],
|
||||
},
|
||||
"template": {
|
||||
"@id": "template",
|
||||
"required": False,
|
||||
"description": """Jinja2 template for the result. The input data for the template will
|
||||
be the results as a dictionary.
|
||||
For example:
|
||||
|
||||
Consider the results before templating:
|
||||
|
||||
```
|
||||
[{
|
||||
"@type": "entry",
|
||||
"onyx:hasEmotionSet": [],
|
||||
"nif:isString": "testing the template",
|
||||
"marl:hasOpinion": [
|
||||
{
|
||||
"@type": "sentiment",
|
||||
"marl:hasPolarity": "marl:Positive"
|
||||
}
|
||||
]
|
||||
}]
|
||||
```
|
||||
|
||||
|
||||
And the template:
|
||||
|
||||
```
|
||||
{% for entry in entries %}
|
||||
{{ entry["nif:isString"] | upper }},{{entry.sentiments[0]["marl:hasPolarity"].split(":")[1]}}
|
||||
{% endfor %}
|
||||
```
|
||||
|
||||
The final result would be:
|
||||
|
||||
```
|
||||
TESTING THE TEMPLATE,Positive
|
||||
```
|
||||
"""
|
||||
|
||||
},
|
||||
"fields": {
|
||||
"@id": "fields",
|
||||
"required": False,
|
||||
"description": """A jmespath selector, that can be used to extract a new dictionary, array or value
|
||||
from the results.
|
||||
jmespath is a powerful query language for json and/or dictionaries.
|
||||
It allows you to change the structure (and data) of your objects through queries.
|
||||
|
||||
e.g., the following expression gets a list of `[emotion label, intensity]` for each entry:
|
||||
`entries[]."onyx:hasEmotionSet"[]."onyx:hasEmotion"[]["onyx:hasEmotionCategory","onyx:hasEmotionIntensity"]`
|
||||
|
||||
For more information, see: https://jmespath.org
|
||||
|
||||
"""
|
||||
},
|
||||
"help": {
|
||||
"@id": "help",
|
||||
"description": "Show additional help to know more about the possible parameters",
|
||||
"aliases": ["h"],
|
||||
"required": True,
|
||||
"options": boolean,
|
||||
"default": False
|
||||
},
|
||||
"verbose": {
|
||||
"@id": "verbose",
|
||||
"description": "Show all properties in the result",
|
||||
"aliases": ["v"],
|
||||
"required": True,
|
||||
"options": boolean,
|
||||
"default": False
|
||||
},
|
||||
"aliases": {
|
||||
"@id": "aliases",
|
||||
"description": "Replace JSON properties with their aliases",
|
||||
"aliases": [],
|
||||
"required": True,
|
||||
"options": boolean,
|
||||
"default": False
|
||||
},
|
||||
"emotion-model": {
|
||||
"@id": "emotionModel",
|
||||
"aliases": ["emotionModel", "emoModel"],
|
||||
"description": """Emotion model to use in the response.
|
||||
Senpy will try to convert the output to this model automatically.
|
||||
|
||||
Examples: `wna:liking` and `emoml:big6`.
|
||||
""",
|
||||
"aliases": ["emoModel", "emotionModel"],
|
||||
"required": False
|
||||
},
|
||||
"conversion": {
|
||||
"@id": "conversion",
|
||||
"description": "How to show the elements that have (not) been converted",
|
||||
"description": """How to show the elements that have (not) been converted.
|
||||
|
||||
* full: converted and original elements will appear side-by-side
|
||||
* filtered: only converted elements will be shown
|
||||
* nested: converted elements will be shown, and they will include a link to the original element
|
||||
(using `prov:wasGeneratedBy`).
|
||||
""",
|
||||
"required": True,
|
||||
"options": ["filtered", "nested", "full"],
|
||||
"default": "full"
|
||||
}
|
||||
}
|
||||
|
||||
WEB_PARAMS = {
|
||||
"inHeaders": {
|
||||
"aliases": ["inHeaders", "headers"],
|
||||
EVAL_PARAMS = {
|
||||
"algorithm": {
|
||||
"aliases": ["plug", "p", "plugins", "algorithms", 'algo', 'a', 'plugin'],
|
||||
"description": "Plugins to evaluate",
|
||||
"required": True,
|
||||
"default": "0"
|
||||
"help": "See activated plugins in /plugins",
|
||||
"processor": API_PARAMS['algorithm']['processor']
|
||||
},
|
||||
"dataset": {
|
||||
"aliases": ["datasets", "data", "d"],
|
||||
"description": "Datasets to be evaluated",
|
||||
"required": True,
|
||||
"help": "See avalaible datasets in /datasets"
|
||||
}
|
||||
}
|
||||
|
||||
PLUGINS_PARAMS = {
|
||||
"plugin-type": {
|
||||
"@id": "pluginType",
|
||||
"description": 'What kind of plugins to list',
|
||||
"aliases": ["pluginType", "plugin_type"],
|
||||
"required": True,
|
||||
"default": 'analysisPlugin'
|
||||
}
|
||||
}
|
||||
|
||||
WEB_PARAMS = {
|
||||
"in-headers": {
|
||||
"aliases": ["headers", "inheaders", "inHeaders", "in-headers", "in_headers"],
|
||||
"description": "Only include the JSON-LD context in the headers",
|
||||
"required": True,
|
||||
"default": False,
|
||||
"options": boolean
|
||||
},
|
||||
}
|
||||
|
||||
CLI_PARAMS = {
|
||||
"plugin_folder": {
|
||||
"aliases": ["plugin_folder", "folder"],
|
||||
"plugin-folder": {
|
||||
"aliases": ["folder", "plugin_folder"],
|
||||
"required": True,
|
||||
"default": "."
|
||||
},
|
||||
@ -54,71 +211,215 @@ CLI_PARAMS = {
|
||||
NIF_PARAMS = {
|
||||
"input": {
|
||||
"@id": "input",
|
||||
"aliases": ["i", "input"],
|
||||
"aliases": ["i"],
|
||||
"required": True,
|
||||
"help": "Input text"
|
||||
},
|
||||
"informat": {
|
||||
"@id": "informat",
|
||||
"aliases": ["f", "informat"],
|
||||
"required": False,
|
||||
"default": "text",
|
||||
"options": ["turtle", "text"],
|
||||
},
|
||||
"intype": {
|
||||
"@id": "intype",
|
||||
"aliases": ["intype", "t"],
|
||||
"description": "input type",
|
||||
"aliases": ["t"],
|
||||
"required": False,
|
||||
"default": "direct",
|
||||
"options": ["direct", "url", "file"],
|
||||
},
|
||||
"informat": {
|
||||
"@id": "informat",
|
||||
"description": "input format",
|
||||
"aliases": ["f"],
|
||||
"required": False,
|
||||
"default": "text",
|
||||
"options": ["text", "json-ld"],
|
||||
},
|
||||
"language": {
|
||||
"@id": "language",
|
||||
"aliases": ["language", "l"],
|
||||
"description": "language of the input",
|
||||
"aliases": ["l"],
|
||||
"required": False,
|
||||
},
|
||||
"prefix": {
|
||||
"@id": "prefix",
|
||||
"aliases": ["prefix", "p"],
|
||||
"description": "prefix to use for new entities",
|
||||
"aliases": ["p"],
|
||||
"required": True,
|
||||
"default": "",
|
||||
},
|
||||
"urischeme": {
|
||||
"@id": "urischeme",
|
||||
"aliases": ["urischeme", "u"],
|
||||
"description": "scheme for NIF URIs",
|
||||
"aliases": ["u"],
|
||||
"required": False,
|
||||
"default": "RFC5147String",
|
||||
"options": "RFC5147String"
|
||||
},
|
||||
"options": ["RFC5147String", ]
|
||||
}
|
||||
}
|
||||
|
||||
BUILTIN_PARAMS = {}
|
||||
|
||||
def parse_params(indict, spec=NIF_PARAMS):
|
||||
logger.debug("Parsing: {}\n{}".format(indict, spec))
|
||||
for d in [
|
||||
NIF_PARAMS, CLI_PARAMS, WEB_PARAMS, PLUGINS_PARAMS, EVAL_PARAMS,
|
||||
API_PARAMS
|
||||
]:
|
||||
for k, v in d.items():
|
||||
BUILTIN_PARAMS[k] = v
|
||||
|
||||
|
||||
def parse_params(indict, *specs):
|
||||
if not specs:
|
||||
specs = [NIF_PARAMS]
|
||||
logger.debug("Parsing: {}\n{}".format(indict, specs))
|
||||
outdict = indict.copy()
|
||||
wrong_params = {}
|
||||
for param, options in iteritems(spec):
|
||||
if param[0] != "@": # Exclude json-ld properties
|
||||
for spec in specs:
|
||||
for param, options in iteritems(spec):
|
||||
for alias in options.get("aliases", []):
|
||||
if alias in indict:
|
||||
# Replace each alias with the correct name of the parameter
|
||||
if alias in indict and alias != param:
|
||||
outdict[param] = indict[alias]
|
||||
del outdict[alias]
|
||||
break
|
||||
if param not in outdict:
|
||||
if options.get("required", False) and "default" not in options:
|
||||
if "default" in options:
|
||||
# We assume the default is correct
|
||||
outdict[param] = options["default"]
|
||||
elif options.get("required", False):
|
||||
wrong_params[param] = spec[param]
|
||||
else:
|
||||
if "default" in options:
|
||||
outdict[param] = options["default"]
|
||||
else:
|
||||
if "options" in spec[param] and \
|
||||
outdict[param] not in spec[param]["options"]:
|
||||
continue
|
||||
if 'processor' in options:
|
||||
outdict[param] = processors[options['processor']](outdict[param])
|
||||
if "options" in options:
|
||||
if options["options"] == boolean:
|
||||
outdict[param] = str(outdict[param]).lower() in ['true', '1', '']
|
||||
elif outdict[param] not in options["options"]:
|
||||
wrong_params[param] = spec[param]
|
||||
if wrong_params:
|
||||
logger.debug("Error parsing: %s", wrong_params)
|
||||
message = Error(
|
||||
status=400,
|
||||
message="Missing or invalid parameters",
|
||||
message='Missing or invalid parameters',
|
||||
parameters=outdict,
|
||||
errors={param: error
|
||||
for param, error in iteritems(wrong_params)})
|
||||
errors=wrong_params)
|
||||
raise message
|
||||
return outdict
|
||||
|
||||
|
||||
def get_all_params(plugins, *specs):
|
||||
'''Return a list of parameters for a given set of specifications and plugins.'''
|
||||
dic = {}
|
||||
for s in specs:
|
||||
dic.update(s)
|
||||
dic.update(get_extra_params(plugins))
|
||||
return dic
|
||||
|
||||
|
||||
def get_extra_params(plugins):
|
||||
'''Get a list of possible parameters given a list of plugins'''
|
||||
params = {}
|
||||
extra_params = {}
|
||||
for plugin in plugins:
|
||||
this_params = plugin.get('extra_params', {})
|
||||
for k, v in this_params.items():
|
||||
if k not in extra_params:
|
||||
extra_params[k] = {}
|
||||
extra_params[k][plugin.name] = v
|
||||
for k, v in extra_params.items(): # Resolve conflicts
|
||||
if len(v) == 1: # Add the extra options that do not collide
|
||||
params[k] = list(v.values())[0]
|
||||
else:
|
||||
required = False
|
||||
aliases = None
|
||||
options = None
|
||||
default = None
|
||||
nodefault = False # Set when defaults are not compatible
|
||||
|
||||
for plugin, opt in v.items():
|
||||
params['{}.{}'.format(plugin, k)] = opt
|
||||
required = required or opt.get('required', False)
|
||||
newaliases = set(opt.get('aliases', []))
|
||||
if aliases is None:
|
||||
aliases = newaliases
|
||||
else:
|
||||
aliases = aliases & newaliases
|
||||
if 'options' in opt:
|
||||
newoptions = set(opt['options'])
|
||||
options = newoptions if options is None else options & newoptions
|
||||
if 'default' in opt:
|
||||
newdefault = opt['default']
|
||||
if newdefault:
|
||||
if default is None and not nodefault:
|
||||
default = newdefault
|
||||
elif newdefault != default:
|
||||
nodefault = True
|
||||
default = None
|
||||
# Check for incompatibilities
|
||||
if options != set():
|
||||
params[k] = {
|
||||
'default': default,
|
||||
'aliases': list(aliases),
|
||||
'required': required,
|
||||
'options': list(options)
|
||||
}
|
||||
return params
|
||||
|
||||
|
||||
def parse_analyses(params, plugins):
|
||||
'''
|
||||
Parse the given parameters individually for each plugin, and get a list of the parameters that
|
||||
belong to each of the plugins. Each item can then be used in the plugin.analyse_entries method.
|
||||
'''
|
||||
analysis_list = []
|
||||
for i, plugin in enumerate(plugins):
|
||||
if not plugin:
|
||||
continue
|
||||
this_params = filter_params(params, plugin, i)
|
||||
parsed = parse_params(this_params, plugin.get('extra_params', {}))
|
||||
analysis = plugin.activity(parsed)
|
||||
analysis_list.append(analysis)
|
||||
return analysis_list
|
||||
|
||||
|
||||
def filter_params(params, plugin, ith=-1):
|
||||
'''
|
||||
Get the values within params that apply to a plugin.
|
||||
More specific names override more general names, in this order:
|
||||
|
||||
<index_order>.parameter > <plugin.name>.parameter > parameter
|
||||
|
||||
|
||||
Example:
|
||||
|
||||
>>> filter_params({'0.hello': True, 'hello': False}, Plugin(), 0)
|
||||
{ '0.hello': True, 'hello': True}
|
||||
|
||||
'''
|
||||
thisparams = {}
|
||||
if ith >= 0:
|
||||
ith = '{}.'.format(ith)
|
||||
else:
|
||||
ith = ""
|
||||
for k, v in params.items():
|
||||
if ith and k.startswith(str(ith)):
|
||||
thisparams[k[len(ith):]] = v
|
||||
elif k.startswith(plugin.name):
|
||||
thisparams[k[len(plugin.name) + 1:]] = v
|
||||
elif k not in thisparams:
|
||||
thisparams[k] = v
|
||||
return thisparams
|
||||
|
||||
|
||||
def parse_call(params):
|
||||
'''
|
||||
Return a results object based on the parameters used in a call/request.
|
||||
'''
|
||||
params = parse_params(params, NIF_PARAMS)
|
||||
if params['informat'] == 'text':
|
||||
results = Results()
|
||||
entry = Entry(nif__isString=params['input'], id='prefix:') # Use @base
|
||||
results.entries.append(entry)
|
||||
elif params['informat'] == 'json-ld':
|
||||
results = from_string(params['input'], cls=Results)
|
||||
else: # pragma: no cover
|
||||
raise NotImplementedError('Informat {} is not implemented'.format(
|
||||
params['informat']))
|
||||
results.parameters = params
|
||||
return results
|
||||
|
@ -1,37 +1,60 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014 J. Fernando Sánchez Rada - Grupo de Sistemas Inteligentes
|
||||
# DIT, UPM
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
"""
|
||||
Blueprints for Senpy
|
||||
"""
|
||||
from flask import (Blueprint, request, current_app, render_template, url_for,
|
||||
jsonify)
|
||||
from .models import Error, Response, Plugins, read_schema
|
||||
from .api import WEB_PARAMS, API_PARAMS, parse_params
|
||||
jsonify, redirect)
|
||||
from .models import Error, Response, Help, Plugins, read_schema, dump_schema, Datasets
|
||||
from . import api
|
||||
from .version import __version__
|
||||
from functools import wraps
|
||||
|
||||
from .gsitk_compat import GSITK_AVAILABLE, datasets
|
||||
|
||||
import logging
|
||||
import json
|
||||
import base64
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
api_blueprint = Blueprint("api", __name__)
|
||||
demo_blueprint = Blueprint("demo", __name__)
|
||||
demo_blueprint = Blueprint("demo", __name__, template_folder='templates')
|
||||
ns_blueprint = Blueprint("ns", __name__)
|
||||
|
||||
_mimetypes_r = {'json-ld': ['application/ld+json'],
|
||||
'turtle': ['text/turtle'],
|
||||
'ntriples': ['application/n-triples'],
|
||||
'text': ['text/plain']}
|
||||
|
||||
MIMETYPES = {}
|
||||
|
||||
for k, vs in _mimetypes_r.items():
|
||||
for v in vs:
|
||||
if v in MIMETYPES:
|
||||
raise Exception('MIMETYPE {} specified for two formats: {} and {}'.format(v,
|
||||
v,
|
||||
MIMETYPES[v]))
|
||||
MIMETYPES[v] = k
|
||||
|
||||
DEFAULT_MIMETYPE = 'application/ld+json'
|
||||
DEFAULT_FORMAT = 'json-ld'
|
||||
|
||||
|
||||
def get_params(req):
|
||||
if req.method == 'POST':
|
||||
@ -43,98 +66,213 @@ def get_params(req):
|
||||
return indict
|
||||
|
||||
|
||||
def encode_url(url=None):
|
||||
code = ''
|
||||
if not url:
|
||||
url = request.parameters.get('prefix', request.full_path[1:] + '#')
|
||||
return code or base64.urlsafe_b64encode(url.encode()).decode()
|
||||
|
||||
|
||||
def url_for_code(code, base=None):
|
||||
# if base:
|
||||
# return base + code
|
||||
# return url_for('api.decode', code=code, _external=True)
|
||||
# This was producing unique yet very long URIs, which wasn't ideal for visualization.
|
||||
return 'http://senpy.invalid/'
|
||||
|
||||
|
||||
def decoded_url(code, base=None):
|
||||
path = base64.urlsafe_b64decode(code.encode()).decode()
|
||||
if path[:4] == 'http':
|
||||
return path
|
||||
base = base or request.url_root
|
||||
return base + path
|
||||
|
||||
|
||||
@demo_blueprint.route('/')
|
||||
def index():
|
||||
return render_template("index.html", version=__version__)
|
||||
# ev = str(get_params(request).get('evaluation', True))
|
||||
# evaluation_enabled = ev.lower() not in ['false', 'no', 'none']
|
||||
evaluation_enabled = GSITK_AVAILABLE
|
||||
|
||||
return render_template("index.html",
|
||||
evaluation=evaluation_enabled,
|
||||
version=__version__)
|
||||
|
||||
|
||||
@api_blueprint.route('/contexts/<entity>.jsonld')
|
||||
def context(entity="context"):
|
||||
@api_blueprint.route('/contexts/<code>')
|
||||
def context(code=''):
|
||||
context = Response._context
|
||||
context['@vocab'] = url_for('ns.index', _external=True)
|
||||
context['@base'] = url_for('api.decode', code=code, _external=True)
|
||||
context['endpoint'] = url_for('api.api_root', _external=True)
|
||||
return jsonify({"@context": context})
|
||||
|
||||
|
||||
@api_blueprint.route('/d/<code>')
|
||||
def decode(code):
|
||||
try:
|
||||
return redirect(decoded_url(code))
|
||||
except Exception:
|
||||
return Error('invalid URL').flask()
|
||||
|
||||
|
||||
@ns_blueprint.route('/') # noqa: F811
|
||||
def index():
|
||||
context = Response._context
|
||||
context['@vocab'] = url_for('.ns', _external=True)
|
||||
context = Response._context.copy()
|
||||
context['endpoint'] = url_for('api.api_root', _external=True)
|
||||
return jsonify({"@context": context})
|
||||
|
||||
|
||||
@api_blueprint.route('/schemas/<schema>')
|
||||
def schema(schema="definitions"):
|
||||
try:
|
||||
return jsonify(read_schema(schema))
|
||||
except Exception: # Should be FileNotFoundError, but it's missing from py2
|
||||
return Error(message="Schema not found", status=404).flask()
|
||||
return dump_schema(read_schema(schema))
|
||||
except Exception as ex: # Should be FileNotFoundError, but it's missing from py2
|
||||
return Error(message="Schema not found: {}".format(ex), status=404).flask()
|
||||
|
||||
|
||||
def basic_api(f):
|
||||
default_params = {
|
||||
'in-headers': False,
|
||||
'expanded-jsonld': False,
|
||||
'outformat': None,
|
||||
'with-parameters': True,
|
||||
}
|
||||
|
||||
@wraps(f)
|
||||
def decorated_function(*args, **kwargs):
|
||||
raw_params = get_params(request)
|
||||
headers = {'X-ORIGINAL-PARAMS': raw_params}
|
||||
# Get defaults
|
||||
web_params = parse_params({}, spec=WEB_PARAMS)
|
||||
api_params = parse_params({}, spec=API_PARAMS)
|
||||
# logger.info('Getting request: {}'.format(raw_params))
|
||||
logger.debug('Getting request. Params: {}'.format(raw_params))
|
||||
headers = {'X-ORIGINAL-PARAMS': json.dumps(raw_params)}
|
||||
params = default_params
|
||||
|
||||
mime = request.accept_mimetypes\
|
||||
.best_match(MIMETYPES.keys(),
|
||||
DEFAULT_MIMETYPE)
|
||||
|
||||
mimeformat = MIMETYPES.get(mime, DEFAULT_FORMAT)
|
||||
outformat = mimeformat
|
||||
|
||||
outformat = 'json-ld'
|
||||
try:
|
||||
print('Getting request:')
|
||||
print(request)
|
||||
web_params = parse_params(raw_params, spec=WEB_PARAMS)
|
||||
api_params = parse_params(raw_params, spec=API_PARAMS)
|
||||
if hasattr(request, 'params'):
|
||||
request.params.update(api_params)
|
||||
params = api.parse_params(raw_params, api.WEB_PARAMS, api.API_PARAMS)
|
||||
outformat = params.get('outformat', mimeformat)
|
||||
if hasattr(request, 'parameters'):
|
||||
request.parameters.update(params)
|
||||
else:
|
||||
request.params = api_params
|
||||
request.parameters = params
|
||||
response = f(*args, **kwargs)
|
||||
except Error as ex:
|
||||
|
||||
if 'parameters' in response and not params['with-parameters']:
|
||||
del response.parameters
|
||||
|
||||
logger.debug('Response: {}'.format(response))
|
||||
|
||||
prefix = params.get('prefix')
|
||||
code = encode_url(prefix)
|
||||
|
||||
return response.flask(
|
||||
in_headers=params['in-headers'],
|
||||
headers=headers,
|
||||
prefix=prefix or url_for_code(code),
|
||||
base=prefix,
|
||||
context_uri=url_for('api.context',
|
||||
code=code,
|
||||
_external=True),
|
||||
outformat=outformat,
|
||||
expanded=params['expanded-jsonld'],
|
||||
template=params.get('template'),
|
||||
verbose=params['verbose'],
|
||||
aliases=params['aliases'],
|
||||
fields=params.get('fields'))
|
||||
|
||||
except (Exception) as ex:
|
||||
if current_app.debug or current_app.config['TESTING']:
|
||||
raise
|
||||
if not isinstance(ex, Error):
|
||||
msg = "{}".format(ex)
|
||||
ex = Error(message=msg, status=500)
|
||||
response = ex
|
||||
|
||||
in_headers = web_params['inHeaders'] != "0"
|
||||
expanded = api_params['expanded-jsonld']
|
||||
outformat = api_params['outformat']
|
||||
|
||||
return response.flask(
|
||||
in_headers=in_headers,
|
||||
headers=headers,
|
||||
prefix=url_for('.api', _external=True),
|
||||
context_uri=url_for('api.context',
|
||||
entity=type(response).__name__,
|
||||
_external=True),
|
||||
outformat=outformat,
|
||||
expanded=expanded)
|
||||
response.parameters = raw_params
|
||||
logger.exception(ex)
|
||||
return response.flask(
|
||||
outformat=outformat,
|
||||
expanded=params['expanded-jsonld'],
|
||||
verbose=params.get('verbose', True),
|
||||
)
|
||||
|
||||
return decorated_function
|
||||
|
||||
|
||||
@api_blueprint.route('/', methods=['POST', 'GET'])
|
||||
@api_blueprint.route('/', defaults={'plugins': None}, methods=['POST', 'GET'], strict_slashes=False)
|
||||
@api_blueprint.route('/<path:plugins>', methods=['POST', 'GET'], strict_slashes=False)
|
||||
@basic_api
|
||||
def api():
|
||||
response = current_app.senpy.analyse(**request.params)
|
||||
return response
|
||||
def api_root(plugins):
|
||||
if plugins:
|
||||
if request.parameters['algorithm'] != api.API_PARAMS['algorithm']['default']:
|
||||
raise Error('You cannot specify the algorithm with a parameter and a URL variable.'
|
||||
' Please, remove one of them')
|
||||
plugins = plugins.replace('+', ',').replace('/', ',')
|
||||
plugins = api.processors['string_to_tuple'](plugins)
|
||||
else:
|
||||
plugins = request.parameters['algorithm']
|
||||
|
||||
print(plugins)
|
||||
|
||||
sp = current_app.senpy
|
||||
plugins = sp.get_plugins(plugins)
|
||||
|
||||
if request.parameters['help']:
|
||||
apis = [api.WEB_PARAMS, api.API_PARAMS, api.NIF_PARAMS]
|
||||
# Verbose is set to False as default, but we want it to default to
|
||||
# True for help. This checks the original value, to make sure it wasn't
|
||||
# set by default.
|
||||
if not request.parameters['verbose'] and get_params(request).get('verbose'):
|
||||
apis = []
|
||||
if request.parameters['algorithm'] == ['default', ]:
|
||||
plugins = []
|
||||
allparameters = api.get_all_params(plugins, *apis)
|
||||
response = Help(valid_parameters=allparameters)
|
||||
return response
|
||||
req = api.parse_call(request.parameters)
|
||||
analyses = api.parse_analyses(req.parameters, plugins)
|
||||
results = current_app.senpy.analyse(req, analyses)
|
||||
return results
|
||||
|
||||
|
||||
@api_blueprint.route('/plugins/', methods=['POST', 'GET'])
|
||||
@api_blueprint.route('/evaluate', methods=['POST', 'GET'], strict_slashes=False)
|
||||
@basic_api
|
||||
def evaluate():
|
||||
if request.parameters['help']:
|
||||
dic = dict(api.EVAL_PARAMS)
|
||||
response = Help(parameters=dic)
|
||||
return response
|
||||
else:
|
||||
params = api.parse_params(request.parameters, api.EVAL_PARAMS)
|
||||
response = current_app.senpy.evaluate(params)
|
||||
return response
|
||||
|
||||
|
||||
@api_blueprint.route('/plugins', methods=['POST', 'GET'], strict_slashes=False)
|
||||
@basic_api
|
||||
def plugins():
|
||||
sp = current_app.senpy
|
||||
dic = Plugins(plugins=list(sp.plugins.values()))
|
||||
params = api.parse_params(request.parameters, api.PLUGINS_PARAMS)
|
||||
ptype = params.get('plugin-type')
|
||||
plugins = list(sp.analysis_plugins(plugin_type=ptype))
|
||||
dic = Plugins(plugins=plugins)
|
||||
return dic
|
||||
|
||||
|
||||
@api_blueprint.route('/plugins/<plugin>/', methods=['POST', 'GET'])
|
||||
@api_blueprint.route('/plugins/<plugin>', methods=['POST', 'GET'], strict_slashes=False)
|
||||
@basic_api
|
||||
def plugin(plugin=None):
|
||||
def plugin(plugin):
|
||||
sp = current_app.senpy
|
||||
if plugin == 'default' and sp.default_plugin:
|
||||
return sp.default_plugin
|
||||
plugins = sp.filter_plugins(
|
||||
id='plugins/{}'.format(plugin)) or sp.filter_plugins(name=plugin)
|
||||
if plugins:
|
||||
response = list(plugins.values())[0]
|
||||
else:
|
||||
return Error(message="Plugin not found", status=404)
|
||||
return response
|
||||
return sp.get_plugin(plugin)
|
||||
|
||||
|
||||
@api_blueprint.route('/datasets', methods=['POST', 'GET'], strict_slashes=False)
|
||||
@basic_api
|
||||
def get_datasets():
|
||||
dic = Datasets(datasets=list(datasets.values()))
|
||||
return dic
|
||||
|
53
senpy/cli.py
@ -1,7 +1,24 @@
|
||||
#
|
||||
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from __future__ import print_function
|
||||
|
||||
import sys
|
||||
from .models import Error
|
||||
from .api import parse_params, CLI_PARAMS
|
||||
from .extensions import Senpy
|
||||
from . import api
|
||||
|
||||
|
||||
def argv_to_dict(argv):
|
||||
@ -13,27 +30,31 @@ def argv_to_dict(argv):
|
||||
if argv[i][0] == '-':
|
||||
key = argv[i].strip('-')
|
||||
value = argv[i + 1] if len(argv) > i + 1 else None
|
||||
if value and value[0] == '-':
|
||||
cli_dict[key] = ""
|
||||
if not value or value[0] == '-':
|
||||
cli_dict[key] = True
|
||||
else:
|
||||
cli_dict[key] = value
|
||||
return cli_dict
|
||||
|
||||
|
||||
def parse_cli(argv):
|
||||
cli_dict = argv_to_dict(argv)
|
||||
cli_params = parse_params(cli_dict, spec=CLI_PARAMS)
|
||||
return cli_params, cli_dict
|
||||
|
||||
|
||||
def main_function(argv):
|
||||
'''This is the method for unit testing
|
||||
'''
|
||||
cli_params, cli_dict = parse_cli(argv)
|
||||
plugin_folder = cli_params['plugin_folder']
|
||||
sp = Senpy(default_plugins=False, plugin_folder=plugin_folder)
|
||||
sp.activate_all(sync=True)
|
||||
res = sp.analyse(**cli_dict)
|
||||
params = api.parse_params(argv_to_dict(argv),
|
||||
api.CLI_PARAMS,
|
||||
api.API_PARAMS,
|
||||
api.NIF_PARAMS)
|
||||
plugin_folder = params['plugin-folder']
|
||||
default_plugins = not params.get('no-default-plugins', False)
|
||||
sp = Senpy(default_plugins=default_plugins, plugin_folder=plugin_folder)
|
||||
request = api.parse_call(params)
|
||||
algos = sp.get_plugins(request.parameters.get('algorithm', None))
|
||||
if algos:
|
||||
for algo in algos:
|
||||
sp.activate_plugin(algo.name)
|
||||
else:
|
||||
sp.activate_all()
|
||||
res = sp.analyse(request)
|
||||
return res
|
||||
|
||||
|
||||
@ -42,9 +63,9 @@ def main():
|
||||
'''
|
||||
try:
|
||||
res = main_function(sys.argv[1:])
|
||||
print(res.to_JSON())
|
||||
print(res.serialize())
|
||||
except Error as err:
|
||||
print(err.to_JSON())
|
||||
print(err.serialize(), file=sys.stderr)
|
||||
sys.exit(2)
|
||||
|
||||
|
||||
|
@ -1,3 +1,19 @@
|
||||
#
|
||||
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import requests
|
||||
import logging
|
||||
from . import models
|
||||
@ -12,12 +28,26 @@ class Client(object):
|
||||
def analyse(self, input, method='GET', **kwargs):
|
||||
return self.request('/', method=method, input=input, **kwargs)
|
||||
|
||||
def evaluate(self, input, method='GET', **kwargs):
|
||||
return self.request('/evaluate', method=method, input=input, **kwargs)
|
||||
|
||||
def plugins(self, *args, **kwargs):
|
||||
resp = self.request(path='/plugins').plugins
|
||||
return {p.name: p for p in resp}
|
||||
|
||||
def datasets(self):
|
||||
resp = self.request(path='/datasets').datasets
|
||||
return {d.name: d for d in resp}
|
||||
|
||||
def request(self, path=None, method='GET', **params):
|
||||
url = '{}{}'.format(self.endpoint, path)
|
||||
response = requests.request(method=method, url=url, params=params)
|
||||
url = '{}{}'.format(self.endpoint.rstrip('/'), path)
|
||||
if method == 'POST':
|
||||
response = requests.post(url=url, data=params)
|
||||
else:
|
||||
response = requests.request(method=method, url=url, params=params)
|
||||
|
||||
try:
|
||||
resp = models.from_dict(response.json())
|
||||
resp.validate(resp)
|
||||
except Exception as ex:
|
||||
logger.error(('There seems to be a problem with the response:\n'
|
||||
'\tURL: {url}\n'
|
||||
|
7
senpy/config.py
Normal file
@ -0,0 +1,7 @@
|
||||
import os
|
||||
|
||||
strict = os.environ.get('SENPY_STRICT', '').lower() not in ["", "false", "f"]
|
||||
data_folder = os.environ.get('SENPY_DATA', None)
|
||||
if data_folder:
|
||||
data_folder = os.path.abspath(data_folder)
|
||||
testing = os.environ.get('SENPY_TESTING', "") != ""
|
@ -1,3 +1,18 @@
|
||||
#
|
||||
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
"""
|
||||
Main class for Senpy.
|
||||
It orchestrates plugin (de)activation and analysis.
|
||||
@ -5,23 +20,20 @@ It orchestrates plugin (de)activation and analysis.
|
||||
from future import standard_library
|
||||
standard_library.install_aliases()
|
||||
|
||||
from .plugins import SentimentPlugin, SenpyPlugin
|
||||
from .models import Error, Entry, Results
|
||||
from . import config
|
||||
from . import plugins, api
|
||||
from .models import Error, AggregatedEvaluation
|
||||
from .plugins import AnalysisPlugin
|
||||
from .blueprints import api_blueprint, demo_blueprint, ns_blueprint
|
||||
from .api import API_PARAMS, NIF_PARAMS, parse_params
|
||||
|
||||
from threading import Thread
|
||||
|
||||
from functools import partial
|
||||
import os
|
||||
import copy
|
||||
import fnmatch
|
||||
import inspect
|
||||
import sys
|
||||
import importlib
|
||||
import errno
|
||||
import logging
|
||||
import traceback
|
||||
import yaml
|
||||
import pip
|
||||
|
||||
from . import gsitk_compat
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -32,23 +44,39 @@ class Senpy(object):
|
||||
def __init__(self,
|
||||
app=None,
|
||||
plugin_folder=".",
|
||||
data_folder=None,
|
||||
install=False,
|
||||
strict=None,
|
||||
default_plugins=False):
|
||||
self.app = app
|
||||
self._search_folders = set()
|
||||
self._plugin_list = []
|
||||
self._outdated = True
|
||||
self._default = None
|
||||
|
||||
self.add_folder(plugin_folder)
|
||||
|
||||
default_data = os.path.join(os.getcwd(), 'senpy_data')
|
||||
self.data_folder = data_folder or os.environ.get('SENPY_DATA', default_data)
|
||||
try:
|
||||
os.makedirs(self.data_folder)
|
||||
except OSError as e:
|
||||
if e.errno == errno.EEXIST:
|
||||
logger.debug('Data folder exists: {}'.format(self.data_folder))
|
||||
else: # pragma: no cover
|
||||
raise
|
||||
|
||||
self._default = None
|
||||
self.strict = strict if strict is not None else config.strict
|
||||
self.install = install
|
||||
self._plugins = {}
|
||||
if plugin_folder:
|
||||
self.add_folder(plugin_folder)
|
||||
|
||||
if default_plugins:
|
||||
self.add_folder('plugins', from_root=True)
|
||||
else:
|
||||
# Add only conversion plugins
|
||||
self.add_folder(os.path.join('plugins', 'conversion'),
|
||||
self.add_folder(os.path.join('plugins', 'postprocessing'),
|
||||
from_root=True)
|
||||
|
||||
self.app = app
|
||||
if app is not None:
|
||||
self.init_app(app)
|
||||
self._conversion_candidates = {}
|
||||
|
||||
def init_app(self, app):
|
||||
""" Initialise a flask app to add plugins to its context """
|
||||
@ -61,328 +89,335 @@ class Senpy(object):
|
||||
# otherwise fall back to the request context
|
||||
if hasattr(app, 'teardown_appcontext'):
|
||||
app.teardown_appcontext(self.teardown)
|
||||
else:
|
||||
else: # pragma: no cover
|
||||
app.teardown_request(self.teardown)
|
||||
app.register_blueprint(api_blueprint, url_prefix="/api")
|
||||
app.register_blueprint(ns_blueprint, url_prefix="/ns")
|
||||
app.register_blueprint(demo_blueprint, url_prefix="/")
|
||||
|
||||
def add_plugin(self, plugin):
|
||||
self._plugins[plugin.name.lower()] = plugin
|
||||
self._conversion_candidates = {}
|
||||
|
||||
def delete_plugin(self, plugin):
|
||||
del self._plugins[plugin.name.lower()]
|
||||
|
||||
def plugins(self, plugin_type=None, is_activated=True, **kwargs):
|
||||
""" Return the plugins registered for a given application. Filtered by criteria """
|
||||
return sorted(plugins.pfilter(self._plugins,
|
||||
plugin_type=plugin_type,
|
||||
is_activated=is_activated,
|
||||
**kwargs),
|
||||
key=lambda x: x.id)
|
||||
|
||||
def get_plugin(self, name, default=None):
|
||||
if name == 'default':
|
||||
return self.default_plugin
|
||||
elif name == 'conversion':
|
||||
return None
|
||||
|
||||
if name.lower() in self._plugins:
|
||||
return self._plugins[name.lower()]
|
||||
|
||||
results = self.plugins(id='endpoint:plugins/{}'.format(name.lower()),
|
||||
plugin_type=None)
|
||||
if results:
|
||||
return results[0]
|
||||
|
||||
results = self.plugins(id=name,
|
||||
plugin_type=None)
|
||||
if results:
|
||||
return results[0]
|
||||
|
||||
msg = ("Plugin not found: '{}'\n"
|
||||
"Make sure it is ACTIVATED\n"
|
||||
"Valid algorithms: {}").format(name,
|
||||
self._plugins.keys())
|
||||
raise Error(message=msg, status=404)
|
||||
|
||||
def get_plugins(self, name):
|
||||
try:
|
||||
name = name.split(',')
|
||||
except AttributeError:
|
||||
pass # Assume it is a tuple or a list
|
||||
return tuple(self.get_plugin(n) for n in name)
|
||||
|
||||
def analysis_plugins(self, **kwargs):
|
||||
""" Return only the analysis plugins that are active"""
|
||||
candidates = self.plugins(**kwargs)
|
||||
return list(plugins.pfilter(candidates, plugin_type=AnalysisPlugin))
|
||||
|
||||
def add_folder(self, folder, from_root=False):
|
||||
""" Find plugins in this folder and add them to this instance """
|
||||
if from_root:
|
||||
folder = os.path.join(os.path.dirname(__file__), folder)
|
||||
logger.debug("Adding folder: %s", folder)
|
||||
if os.path.isdir(folder):
|
||||
self._search_folders.add(folder)
|
||||
self._outdated = True
|
||||
new_plugins = plugins.from_folder([folder],
|
||||
data_folder=self.data_folder,
|
||||
strict=self.strict)
|
||||
for plugin in new_plugins:
|
||||
self.add_plugin(plugin)
|
||||
else:
|
||||
logger.debug("Not a folder: %s", folder)
|
||||
raise AttributeError("Not a folder or does not exist: %s", folder)
|
||||
|
||||
def _find_plugin(self, params):
|
||||
api_params = parse_params(params, spec=API_PARAMS)
|
||||
algo = None
|
||||
if "algorithm" in api_params and api_params["algorithm"]:
|
||||
algo = api_params["algorithm"]
|
||||
elif self.plugins:
|
||||
algo = self.default_plugin and self.default_plugin.name
|
||||
if not algo:
|
||||
def _process(self, req, pending, done=None):
|
||||
"""
|
||||
Recursively process the entries with the first plugin in the list, and pass the results
|
||||
to the rest of the plugins.
|
||||
"""
|
||||
done = done or []
|
||||
if not pending:
|
||||
return req
|
||||
|
||||
analysis = pending[0]
|
||||
results = analysis.run(req)
|
||||
results.activities.append(analysis)
|
||||
done += analysis
|
||||
return self._process(results, pending[1:], done)
|
||||
|
||||
def install_deps(self):
|
||||
logger.info('Installing dependencies')
|
||||
# If a plugin is activated, its dependencies should already be installed
|
||||
# Otherwise, it would've failed to activate.
|
||||
plugins.install_deps(*self._plugins.values())
|
||||
|
||||
def analyse(self, request, analyses=None):
|
||||
"""
|
||||
Main method that analyses a request, either from CLI or HTTP.
|
||||
It takes a processed request, provided by the user, as returned
|
||||
by api.parse_call().
|
||||
"""
|
||||
if not self.plugins():
|
||||
raise Error(
|
||||
status=404,
|
||||
message=("No plugins found."
|
||||
" Please install one.").format(algo))
|
||||
if algo not in self.plugins:
|
||||
logger.debug(("The algorithm '{}' is not valid\n"
|
||||
"Valid algorithms: {}").format(algo,
|
||||
self.plugins.keys()))
|
||||
raise Error(
|
||||
status=404,
|
||||
message="The algorithm '{}' is not valid".format(algo))
|
||||
" Please install one."))
|
||||
if analyses is None:
|
||||
plugins = self.get_plugins(request.parameters['algorithm'])
|
||||
analyses = api.parse_analyses(request.parameters, plugins)
|
||||
logger.debug("analysing request: {}".format(request))
|
||||
results = self._process(request, analyses)
|
||||
logger.debug("Got analysis result: {}".format(results))
|
||||
results = self.postprocess(results, analyses)
|
||||
logger.debug("Returning post-processed result: {}".format(results))
|
||||
return results
|
||||
|
||||
if not self.plugins[algo].is_activated:
|
||||
logger.debug("Plugin not activated: {}".format(algo))
|
||||
raise Error(
|
||||
status=400,
|
||||
message=("The algorithm '{}'"
|
||||
" is not activated yet").format(algo))
|
||||
return self.plugins[algo]
|
||||
|
||||
def _get_params(self, params, plugin):
|
||||
nif_params = parse_params(params, spec=NIF_PARAMS)
|
||||
extra_params = plugin.get('extra_params', {})
|
||||
specific_params = parse_params(params, spec=extra_params)
|
||||
nif_params.update(specific_params)
|
||||
return nif_params
|
||||
|
||||
def _get_entries(self, params):
|
||||
entry = None
|
||||
if params['informat'] == 'text':
|
||||
entry = Entry(text=params['input'])
|
||||
else:
|
||||
raise NotImplemented('Only text input format implemented')
|
||||
yield entry
|
||||
|
||||
def analyse(self, **api_params):
|
||||
logger.debug("analysing with params: {}".format(api_params))
|
||||
plugin = self._find_plugin(api_params)
|
||||
nif_params = self._get_params(api_params, plugin)
|
||||
resp = Results()
|
||||
if 'with_parameters' in api_params:
|
||||
resp.parameters = nif_params
|
||||
try:
|
||||
entries = []
|
||||
for i in self._get_entries(nif_params):
|
||||
entries += list(plugin.analyse_entry(i, nif_params))
|
||||
resp.entries = entries
|
||||
self.convert_emotions(resp, plugin, nif_params)
|
||||
resp.analysis.append(plugin.id)
|
||||
logger.debug("Returning analysis result: {}".format(resp))
|
||||
except Error as ex:
|
||||
logger.exception('Error returning analysis result')
|
||||
resp = ex
|
||||
except Exception as ex:
|
||||
logger.exception('Error returning analysis result')
|
||||
resp = Error(message=str(ex), status=500)
|
||||
return resp
|
||||
|
||||
def _conversion_candidates(self, fromModel, toModel):
|
||||
candidates = self.filter_plugins(**{'@type': 'emotionConversionPlugin'})
|
||||
for name, candidate in candidates.items():
|
||||
for pair in candidate.onyx__doesConversion:
|
||||
logging.debug(pair)
|
||||
|
||||
if pair['onyx:conversionFrom'] == fromModel \
|
||||
and pair['onyx:conversionTo'] == toModel:
|
||||
# logging.debug('Found candidate: {}'.format(candidate))
|
||||
yield candidate
|
||||
|
||||
def convert_emotions(self, resp, plugin, params):
|
||||
def convert_emotions(self, resp, analyses):
|
||||
"""
|
||||
Conversion of all emotions in a response.
|
||||
Conversion of all emotions in a response **in place**.
|
||||
In addition to converting from one model to another, it has
|
||||
to include the conversion plugin to the analysis list.
|
||||
Needless to say, this is far from an elegant solution, but it works.
|
||||
@todo refactor and clean up
|
||||
"""
|
||||
fromModel = plugin.get('onyx:usesEmotionModel', None)
|
||||
toModel = params.get('emotionModel', None)
|
||||
output = params.get('conversion', None)
|
||||
logger.debug('Asked for model: {}'.format(toModel))
|
||||
logger.debug('Analysis plugin uses model: {}'.format(fromModel))
|
||||
|
||||
logger.debug("Converting emotions")
|
||||
if 'parameters' not in resp:
|
||||
logger.debug("NO PARAMETERS")
|
||||
return resp
|
||||
|
||||
params = resp['parameters']
|
||||
toModel = params.get('emotion-model', None)
|
||||
if not toModel:
|
||||
return
|
||||
try:
|
||||
candidate = next(self._conversion_candidates(fromModel, toModel))
|
||||
except StopIteration:
|
||||
e = Error(('No conversion plugin found for: '
|
||||
'{} -> {}'.format(fromModel, toModel)))
|
||||
e.original_response = resp
|
||||
e.parameters = params
|
||||
raise e
|
||||
logger.debug("NO tomodel PARAMETER")
|
||||
return resp
|
||||
|
||||
logger.debug('Asked for model: {}'.format(toModel))
|
||||
output = params.get('conversion', None)
|
||||
|
||||
newentries = []
|
||||
done = []
|
||||
for i in resp.entries:
|
||||
|
||||
if output == "full":
|
||||
newemotions = copy.deepcopy(i.emotions)
|
||||
else:
|
||||
newemotions = []
|
||||
for j in i.emotions:
|
||||
activity = j['prov:wasGeneratedBy']
|
||||
act = resp.activity(activity)
|
||||
if not act:
|
||||
raise Error('Could not find the emotion model for {}'.format(activity))
|
||||
fromModel = act.plugin['onyx:usesEmotionModel']
|
||||
if toModel == fromModel:
|
||||
continue
|
||||
candidate = self._conversion_candidate(fromModel, toModel)
|
||||
if not candidate:
|
||||
e = Error(('No conversion plugin found for: '
|
||||
'{} -> {}'.format(fromModel, toModel)),
|
||||
status=404)
|
||||
e.original_response = resp
|
||||
e.parameters = params
|
||||
raise e
|
||||
|
||||
analysis = candidate.activity(params)
|
||||
done.append(analysis)
|
||||
for k in candidate.convert(j, fromModel, toModel, params):
|
||||
k.prov__wasGeneratedBy = candidate.id
|
||||
k.prov__wasGeneratedBy = analysis.id
|
||||
if output == 'nested':
|
||||
k.prov__wasDerivedFrom = j
|
||||
newemotions.append(k)
|
||||
i.emotions = newemotions
|
||||
newentries.append(i)
|
||||
resp.entries = newentries
|
||||
resp.analysis.append(candidate.id)
|
||||
return resp
|
||||
|
||||
def _conversion_candidate(self, fromModel, toModel):
|
||||
if not self._conversion_candidates:
|
||||
candidates = {}
|
||||
for conv in self.plugins(plugin_type=plugins.EmotionConversion):
|
||||
for pair in conv.onyx__doesConversion:
|
||||
logging.debug(pair)
|
||||
key = (pair['onyx:conversionFrom'], pair['onyx:conversionTo'])
|
||||
if key not in candidates:
|
||||
candidates[key] = []
|
||||
candidates[key].append(conv)
|
||||
self._conversion_candidates = candidates
|
||||
|
||||
key = (fromModel, toModel)
|
||||
if key not in self._conversion_candidates:
|
||||
return None
|
||||
return self._conversion_candidates[key][0]
|
||||
|
||||
def postprocess(self, response, analyses):
|
||||
'''
|
||||
Transform the results from the analysis plugins.
|
||||
It has some pre-defined post-processing like emotion conversion,
|
||||
and it also allows plugins to auto-select themselves.
|
||||
'''
|
||||
|
||||
response = self.convert_emotions(response, analyses)
|
||||
|
||||
for plug in self.plugins(plugin_type=plugins.PostProcessing):
|
||||
if plug.check(response, response.activities):
|
||||
activity = plug.activity(response.parameters)
|
||||
response = plug.process(response, activity)
|
||||
return response
|
||||
|
||||
def _get_datasets(self, request):
|
||||
datasets_name = request.parameters.get('dataset', None).split(',')
|
||||
for dataset in datasets_name:
|
||||
if dataset not in gsitk_compat.datasets:
|
||||
logger.debug(("The dataset '{}' is not valid\n"
|
||||
"Valid datasets: {}").format(
|
||||
dataset, gsitk_compat.datasets.keys()))
|
||||
raise Error(
|
||||
status=404,
|
||||
message="The dataset '{}' is not valid".format(dataset))
|
||||
return datasets_name
|
||||
|
||||
def evaluate(self, params):
|
||||
logger.debug("evaluating request: {}".format(params))
|
||||
results = AggregatedEvaluation()
|
||||
results.parameters = params
|
||||
datasets = self._get_datasets(results)
|
||||
plugs = []
|
||||
for plugname in params['algorithm']:
|
||||
plugs = self.get_plugins(plugname)
|
||||
for plug in plugs:
|
||||
if not isinstance(plug, plugins.Evaluable):
|
||||
raise Exception('Plugin {} can not be evaluated', plug.id)
|
||||
|
||||
for eval in plugins.evaluate(plugs, datasets):
|
||||
results.evaluations.append(eval)
|
||||
if 'with-parameters' not in results.parameters:
|
||||
del results.parameters
|
||||
logger.debug("Returning evaluation result: {}".format(results))
|
||||
return results
|
||||
|
||||
@property
|
||||
def default_plugin(self):
|
||||
candidate = self._default
|
||||
if not candidate:
|
||||
candidates = self.filter_plugins(is_activated=True)
|
||||
if not self._default or not self._default.is_activated:
|
||||
candidates = self.analysis_plugins()
|
||||
if len(candidates) > 0:
|
||||
candidate = list(candidates.values())[0]
|
||||
logger.debug("Default: {}".format(candidate))
|
||||
return candidate
|
||||
self._default = candidates[0]
|
||||
else:
|
||||
self._default = None
|
||||
logger.debug("Default: {}".format(self._default))
|
||||
return self._default
|
||||
|
||||
@default_plugin.setter
|
||||
def default_plugin(self, value):
|
||||
if isinstance(value, SenpyPlugin):
|
||||
if isinstance(value, plugins.Plugin):
|
||||
if not value.is_activated:
|
||||
raise AttributeError('The default plugin has to be activated.')
|
||||
self._default = value
|
||||
else:
|
||||
self._default = self.plugins[value]
|
||||
|
||||
def activate_all(self, sync=False):
|
||||
else:
|
||||
self._default = self._plugins[value.lower()]
|
||||
|
||||
def activate_all(self, sync=True):
|
||||
ps = []
|
||||
for plug in self.plugins.keys():
|
||||
ps.append(self.activate_plugin(plug, sync=sync))
|
||||
for plug in self._plugins.keys():
|
||||
try:
|
||||
self.activate_plugin(plug, sync=sync)
|
||||
except Exception as ex:
|
||||
if self.strict:
|
||||
raise
|
||||
logger.error('Could not activate {}: {}'.format(plug, ex))
|
||||
return ps
|
||||
|
||||
def deactivate_all(self, sync=False):
|
||||
def deactivate_all(self, sync=True):
|
||||
ps = []
|
||||
for plug in self.plugins.keys():
|
||||
for plug in self._plugins.keys():
|
||||
ps.append(self.deactivate_plugin(plug, sync=sync))
|
||||
return ps
|
||||
|
||||
def _set_active_plugin(self, plugin_name, active=True, *args, **kwargs):
|
||||
''' We're using a variable in the plugin itself to activate/deactive plugins.\
|
||||
Note that plugins may activate themselves by setting this variable.
|
||||
'''
|
||||
self.plugins[plugin_name].is_activated = active
|
||||
def _activate(self, plugin):
|
||||
with plugin._lock:
|
||||
if plugin.is_activated:
|
||||
return
|
||||
try:
|
||||
logger.info("Activating plugin: {}".format(plugin.name))
|
||||
|
||||
def activate_plugin(self, plugin_name, sync=False):
|
||||
try:
|
||||
plugin = self.plugins[plugin_name]
|
||||
except KeyError:
|
||||
assert plugin._activate()
|
||||
logger.info(f"Plugin activated: {plugin.name}")
|
||||
except Exception as ex:
|
||||
if getattr(plugin, "optional", False) and not self.strict:
|
||||
logger.info(f"Plugin could NOT be activated: {plugin.name}")
|
||||
return False
|
||||
raise
|
||||
return plugin.is_activated
|
||||
|
||||
def activate_plugin(self, plugin_name, sync=True):
|
||||
plugin_name = plugin_name.lower()
|
||||
if plugin_name not in self._plugins:
|
||||
raise Error(
|
||||
message="Plugin not found: {}".format(plugin_name), status=404)
|
||||
plugin = self._plugins[plugin_name]
|
||||
|
||||
logger.info("Activating plugin: {}".format(plugin.name))
|
||||
|
||||
def act():
|
||||
success = False
|
||||
try:
|
||||
plugin.activate()
|
||||
msg = "Plugin activated: {}".format(plugin.name)
|
||||
logger.info(msg)
|
||||
success = True
|
||||
self._set_active_plugin(plugin_name, success)
|
||||
except Exception as ex:
|
||||
msg = "Error activating plugin {} - {} : \n\t{}".format(
|
||||
plugin.name, ex, traceback.format_exc())
|
||||
logger.error(msg)
|
||||
raise Error(msg)
|
||||
|
||||
if sync or 'async' in plugin and not plugin.async:
|
||||
act()
|
||||
if sync or not getattr(plugin, 'async', True) or getattr(
|
||||
plugin, 'sync', False):
|
||||
return self._activate(plugin)
|
||||
else:
|
||||
th = Thread(target=act)
|
||||
th = Thread(target=partial(self._activate, plugin))
|
||||
th.start()
|
||||
return th
|
||||
|
||||
def deactivate_plugin(self, plugin_name, sync=False):
|
||||
try:
|
||||
plugin = self.plugins[plugin_name]
|
||||
except KeyError:
|
||||
def _deactivate(self, plugin):
|
||||
with plugin._lock:
|
||||
if not plugin.is_activated:
|
||||
return
|
||||
plugin._deactivate()
|
||||
logger.info("Plugin deactivated: {}".format(plugin.name))
|
||||
|
||||
def deactivate_plugin(self, plugin_name, sync=True):
|
||||
plugin_name = plugin_name.lower()
|
||||
if plugin_name not in self._plugins:
|
||||
raise Error(
|
||||
message="Plugin not found: {}".format(plugin_name), status=404)
|
||||
plugin = self._plugins[plugin_name]
|
||||
|
||||
self._set_active_plugin(plugin_name, False)
|
||||
|
||||
def deact():
|
||||
try:
|
||||
plugin.deactivate()
|
||||
logger.info("Plugin deactivated: {}".format(plugin.name))
|
||||
except Exception as ex:
|
||||
logger.error(
|
||||
"Error deactivating plugin {}: {}".format(plugin.name, ex))
|
||||
logger.error("Trace: {}".format(traceback.format_exc()))
|
||||
|
||||
if sync or 'async' in plugin and not plugin.async:
|
||||
deact()
|
||||
if sync or not getattr(plugin, 'async', True) or not getattr(
|
||||
plugin, 'sync', False):
|
||||
plugin._deactivate()
|
||||
else:
|
||||
th = Thread(target=deact)
|
||||
th = Thread(target=plugin.deactivate)
|
||||
th.start()
|
||||
|
||||
@classmethod
|
||||
def validate_info(cls, info):
|
||||
return all(x in info for x in ('name', 'module', 'description', 'version'))
|
||||
|
||||
def install_deps(self):
|
||||
for i in self.plugins.values():
|
||||
self._install_deps(i)
|
||||
|
||||
@classmethod
|
||||
def _install_deps(cls, info=None):
|
||||
requirements = info.get('requirements', [])
|
||||
if requirements:
|
||||
pip_args = []
|
||||
pip_args.append('install')
|
||||
pip_args.append('--use-wheel')
|
||||
for req in requirements:
|
||||
pip_args.append(req)
|
||||
logger.info('Installing requirements: ' + str(requirements))
|
||||
pip.main(pip_args)
|
||||
|
||||
@classmethod
|
||||
def _load_module(cls, name, root):
|
||||
sys.path.append(root)
|
||||
tmp = importlib.import_module(name)
|
||||
sys.path.remove(root)
|
||||
return tmp
|
||||
|
||||
@classmethod
|
||||
def _load_plugin_from_info(cls, info, root):
|
||||
if not cls.validate_info(info):
|
||||
logger.warn('The module info is not valid.\n\t{}'.format(info))
|
||||
return None, None
|
||||
module = info["module"]
|
||||
name = info["name"]
|
||||
|
||||
cls._install_deps(info)
|
||||
tmp = cls._load_module(module, root)
|
||||
|
||||
candidate = None
|
||||
for _, obj in inspect.getmembers(tmp):
|
||||
if inspect.isclass(obj) and inspect.getmodule(obj) == tmp:
|
||||
logger.debug(("Found plugin class:"
|
||||
" {}@{}").format(obj, inspect.getmodule(obj)))
|
||||
candidate = obj
|
||||
break
|
||||
if not candidate:
|
||||
logger.debug("No valid plugin for: {}".format(module))
|
||||
return
|
||||
module = candidate(info=info)
|
||||
return name, module
|
||||
|
||||
@classmethod
|
||||
def _load_plugin(cls, root, filename):
|
||||
fpath = os.path.join(root, filename)
|
||||
logger.debug("Loading plugin: {}".format(fpath))
|
||||
with open(fpath, 'r') as f:
|
||||
info = yaml.load(f)
|
||||
logger.debug("Info: {}".format(info))
|
||||
return cls._load_plugin_from_info(info, root)
|
||||
|
||||
def _load_plugins(self):
|
||||
plugins = {}
|
||||
for search_folder in self._search_folders:
|
||||
for root, dirnames, filenames in os.walk(search_folder):
|
||||
for filename in fnmatch.filter(filenames, '*.senpy'):
|
||||
name, plugin = self._load_plugin(root, filename)
|
||||
if plugin and name:
|
||||
plugins[name] = plugin
|
||||
|
||||
self._outdated = False
|
||||
return plugins
|
||||
return th
|
||||
|
||||
def teardown(self, exception):
|
||||
pass
|
||||
|
||||
@property
|
||||
def plugins(self):
|
||||
""" Return the plugins registered for a given application. """
|
||||
if self._outdated:
|
||||
self._plugin_list = self._load_plugins()
|
||||
return self._plugin_list
|
||||
|
||||
def filter_plugins(self, **kwargs):
|
||||
""" Filter plugins by different criteria """
|
||||
|
||||
def matches(plug):
|
||||
res = all(getattr(plug, k, None) == v for (k, v) in kwargs.items())
|
||||
logger.debug(
|
||||
"matching {} with {}: {}".format(plug.name, kwargs, res))
|
||||
return res
|
||||
|
||||
if not kwargs:
|
||||
return self.plugins
|
||||
else:
|
||||
return {n: p for n, p in self.plugins.items() if matches(p)}
|
||||
|
||||
def sentiment_plugins(self):
|
||||
""" Return only the sentiment plugins """
|
||||
return {
|
||||
p: plugin
|
||||
for p, plugin in self.plugins.items()
|
||||
if isinstance(plugin, SentimentPlugin)
|
||||
}
|
||||
|
67
senpy/gsitk_compat.py
Normal file
@ -0,0 +1,67 @@
|
||||
#
|
||||
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import logging
|
||||
import os
|
||||
|
||||
from pkg_resources import parse_version, get_distribution, DistributionNotFound
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
MSG = 'GSITK is not (properly) installed.'
|
||||
IMPORTMSG = '{} Some functions will be unavailable.'.format(MSG)
|
||||
RUNMSG = '{} Install it to use this function.'.format(MSG)
|
||||
|
||||
|
||||
def raise_exception(*args, **kwargs):
|
||||
raise Exception(RUNMSG)
|
||||
|
||||
|
||||
try:
|
||||
gsitk_distro = get_distribution("gsitk")
|
||||
GSITK_VERSION = parse_version(gsitk_distro.version)
|
||||
|
||||
if not os.environ.get('DATA_PATH'):
|
||||
os.environ['DATA_PATH'] = os.environ.get('SENPY_DATA', 'senpy_data')
|
||||
|
||||
from gsitk.datasets.datasets import DatasetManager
|
||||
from gsitk.evaluation.evaluation import Evaluation as Eval # noqa: F401
|
||||
from gsitk.evaluation.evaluation import EvalPipeline # noqa: F401
|
||||
from sklearn.pipeline import Pipeline
|
||||
modules = locals()
|
||||
GSITK_AVAILABLE = True
|
||||
datasets = {}
|
||||
manager = DatasetManager()
|
||||
|
||||
for item in manager.get_datasets():
|
||||
for key in item:
|
||||
if key in datasets:
|
||||
continue
|
||||
properties = item[key]
|
||||
properties['@id'] = key
|
||||
datasets[key] = properties
|
||||
|
||||
def prepare(ds, *args, **kwargs):
|
||||
return manager.prepare_datasets(ds, *args, **kwargs)
|
||||
|
||||
|
||||
except (DistributionNotFound, ImportError) as err:
|
||||
logger.debug('Error importing GSITK: {}'.format(err))
|
||||
logger.warning(IMPORTMSG)
|
||||
GSITK_AVAILABLE = False
|
||||
GSITK_VERSION = ()
|
||||
DatasetManager = Eval = Pipeline = prepare = raise_exception
|
||||
datasets = {}
|
303
senpy/meta.py
Normal file
@ -0,0 +1,303 @@
|
||||
#
|
||||
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
'''
|
||||
Meta-programming for the models.
|
||||
'''
|
||||
import os
|
||||
import json
|
||||
import jsonschema
|
||||
import inspect
|
||||
import copy
|
||||
|
||||
from abc import ABCMeta
|
||||
from collections import namedtuple
|
||||
from collections.abc import MutableMapping
|
||||
|
||||
|
||||
class BaseMeta(ABCMeta):
|
||||
'''
|
||||
Metaclass for models. It extracts the default values for the fields in
|
||||
the model.
|
||||
|
||||
For instance, instances of the following class wouldn't need to mark
|
||||
their version or description on initialization:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class MyPlugin(Plugin):
|
||||
version=0.3
|
||||
description='A dull plugin'
|
||||
|
||||
|
||||
Note that these operations could be included in the __init__ of the
|
||||
class, but it would be very inefficient.
|
||||
'''
|
||||
_subtypes = {}
|
||||
|
||||
def __new__(mcs, name, bases, attrs, **kwargs):
|
||||
register_afterwards = False
|
||||
defaults = {}
|
||||
aliases = {}
|
||||
|
||||
attrs = mcs.expand_with_schema(name, attrs)
|
||||
if 'schema' in attrs:
|
||||
register_afterwards = True
|
||||
for base in bases:
|
||||
if hasattr(base, '_defaults'):
|
||||
defaults.update(getattr(base, '_defaults'))
|
||||
if hasattr(base, '_aliases'):
|
||||
aliases.update(getattr(base, '_aliases'))
|
||||
|
||||
info, rest = mcs.split_attrs(attrs)
|
||||
|
||||
for i in list(info.keys()):
|
||||
if isinstance(info[i], _Alias):
|
||||
aliases[i] = info[i].indict
|
||||
if info[i].default is not None:
|
||||
defaults[i] = info[i].default
|
||||
else:
|
||||
defaults[i] = info[i]
|
||||
|
||||
rest['_defaults'] = defaults
|
||||
rest['_aliases'] = aliases
|
||||
|
||||
cls = super(BaseMeta, mcs).__new__(mcs, name, tuple(bases), rest)
|
||||
|
||||
if register_afterwards:
|
||||
mcs.register(cls, defaults['@type'])
|
||||
return cls
|
||||
|
||||
@classmethod
|
||||
def register(mcs, rsubclass, rtype=None):
|
||||
mcs._subtypes[rtype or rsubclass.__name__] = rsubclass
|
||||
|
||||
@staticmethod
|
||||
def expand_with_schema(name, attrs):
|
||||
if 'schema' in attrs: # Schema specified by name
|
||||
schema_file = '{}.json'.format(attrs['schema'])
|
||||
elif 'schema_file' in attrs:
|
||||
schema_file = attrs['schema_file']
|
||||
del attrs['schema_file']
|
||||
else:
|
||||
return attrs
|
||||
|
||||
if '/' not in 'schema_file':
|
||||
thisdir = os.path.dirname(os.path.realpath(__file__))
|
||||
schema_file = os.path.join(thisdir,
|
||||
'schemas',
|
||||
schema_file)
|
||||
|
||||
schema_path = 'file://' + schema_file
|
||||
|
||||
with open(schema_file) as f:
|
||||
schema = json.load(f)
|
||||
|
||||
resolver = jsonschema.RefResolver(schema_path, schema)
|
||||
if '@type' not in attrs:
|
||||
attrs['@type'] = name
|
||||
attrs['_schema_file'] = schema_file
|
||||
attrs['schema'] = schema
|
||||
attrs['_validator'] = jsonschema.Draft4Validator(schema, resolver=resolver)
|
||||
|
||||
schema_defaults = BaseMeta.get_defaults(attrs['schema'])
|
||||
attrs.update(schema_defaults)
|
||||
|
||||
return attrs
|
||||
|
||||
@staticmethod
|
||||
def is_func(v):
|
||||
return inspect.isroutine(v) or inspect.ismethod(v) or \
|
||||
inspect.ismodule(v) or isinstance(v, property)
|
||||
|
||||
@staticmethod
|
||||
def is_internal(k):
|
||||
return k[0] == '_' or k == 'schema' or k == 'data'
|
||||
|
||||
@staticmethod
|
||||
def get_key(key):
|
||||
if key[0] != '_':
|
||||
key = key.replace("__", ":", 1)
|
||||
return key
|
||||
|
||||
@staticmethod
|
||||
def split_attrs(attrs):
|
||||
'''
|
||||
Extract the attributes of the class.
|
||||
|
||||
This allows adding default values in the class definition.
|
||||
e.g.:
|
||||
'''
|
||||
isattr = {}
|
||||
rest = {}
|
||||
for key, value in attrs.items():
|
||||
if not (BaseMeta.is_internal(key)) and (not BaseMeta.is_func(value)):
|
||||
isattr[key] = value
|
||||
else:
|
||||
rest[key] = value
|
||||
return isattr, rest
|
||||
|
||||
@staticmethod
|
||||
def get_defaults(schema):
|
||||
temp = {}
|
||||
for obj in [
|
||||
schema,
|
||||
] + schema.get('allOf', []):
|
||||
for k, v in obj.get('properties', {}).items():
|
||||
if 'default' in v and k not in temp:
|
||||
temp[k] = v['default']
|
||||
return temp
|
||||
|
||||
|
||||
def make_property(key, default=None):
|
||||
|
||||
def fget(self):
|
||||
if default:
|
||||
return self.get(key, copy.copy(default))
|
||||
return self[key]
|
||||
|
||||
def fdel(self):
|
||||
del self[key]
|
||||
|
||||
def fset(self, value):
|
||||
self[key] = value
|
||||
|
||||
return fget, fset, fdel
|
||||
|
||||
|
||||
class CustomDict(MutableMapping, object):
|
||||
'''
|
||||
A dictionary whose elements can also be accessed as attributes. Since some
|
||||
characters are not valid in the dot-notation, the attribute names also
|
||||
converted. e.g.:
|
||||
|
||||
> d = CustomDict()
|
||||
> d.key = d['ns:name'] = 1
|
||||
> d.key == d['key']
|
||||
True
|
||||
> d.ns__name == d['ns:name']
|
||||
'''
|
||||
|
||||
_defaults = {}
|
||||
_aliases = {'id': '@id'}
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CustomDict, self).__init__()
|
||||
for k, v in self._defaults.items():
|
||||
self[k] = copy.copy(v)
|
||||
for arg in args:
|
||||
self.update(arg)
|
||||
for k, v in kwargs.items():
|
||||
self[k] = v
|
||||
return self
|
||||
|
||||
def serializable(self, **kwargs):
|
||||
def ser_or_down(item):
|
||||
if hasattr(item, 'serializable'):
|
||||
return item.serializable(**kwargs)
|
||||
elif isinstance(item, dict):
|
||||
temp = dict()
|
||||
for kp in item:
|
||||
vp = item[kp]
|
||||
temp[kp] = ser_or_down(vp)
|
||||
return temp
|
||||
elif isinstance(item, list) or isinstance(item, set):
|
||||
return list(ser_or_down(i) for i in item)
|
||||
else:
|
||||
return item
|
||||
|
||||
return ser_or_down(self.as_dict(**kwargs))
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self.__dict__[key]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
'''Do not insert data directly, there might be a property in that key. '''
|
||||
key = self._key_to_attr(key)
|
||||
return setattr(self, key, value)
|
||||
|
||||
def __delitem__(self, key):
|
||||
key = self._key_to_attr(key)
|
||||
del self.__dict__[key]
|
||||
|
||||
def as_dict(self, verbose=True, aliases=False):
|
||||
attrs = self.__dict__.keys()
|
||||
if not verbose and hasattr(self, '_terse_keys'):
|
||||
attrs = self._terse_keys + ['@type', '@id']
|
||||
res = {k: getattr(self, k) for k in attrs
|
||||
if not self._internal_key(k) and hasattr(self, k)}
|
||||
if not aliases:
|
||||
return res
|
||||
for k, ok in self._aliases.items():
|
||||
if ok in res:
|
||||
res[k] = getattr(res, ok)
|
||||
del res[ok]
|
||||
return res
|
||||
|
||||
def __iter__(self):
|
||||
return (k for k in self.__dict__ if not self._internal_key(k))
|
||||
|
||||
def __len__(self):
|
||||
return len(self.__dict__)
|
||||
|
||||
def update(self, other):
|
||||
for k, v in other.items():
|
||||
self[k] = v
|
||||
|
||||
def _attr_to_key(self, key):
|
||||
key = key.replace("__", ":", 1)
|
||||
key = self._aliases.get(key, key)
|
||||
return key
|
||||
|
||||
def _key_to_attr(self, key):
|
||||
if self._internal_key(key):
|
||||
return key
|
||||
|
||||
if key in self._aliases:
|
||||
key = self._aliases[key]
|
||||
else:
|
||||
key = key.replace(":", "__", 1)
|
||||
return key
|
||||
|
||||
def __getattr__(self, key):
|
||||
nkey = self._attr_to_key(key)
|
||||
if nkey in self.__dict__:
|
||||
return self.__dict__[nkey]
|
||||
elif nkey == key:
|
||||
raise AttributeError("Key not found: {}".format(key))
|
||||
return getattr(self, nkey)
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
super(CustomDict, self).__setattr__(self._attr_to_key(key), value)
|
||||
|
||||
def __delattr__(self, key):
|
||||
super(CustomDict, self).__delattr__(self._attr_to_key(key))
|
||||
|
||||
@staticmethod
|
||||
def _internal_key(key):
|
||||
return key[0] == '_'
|
||||
|
||||
def __str__(self):
|
||||
return json.dumps(self.serializable(), sort_keys=True, indent=4)
|
||||
|
||||
def __repr__(self):
|
||||
return json.dumps(self.serializable(), sort_keys=True, indent=4)
|
||||
|
||||
|
||||
_Alias = namedtuple('Alias', ['indict', 'default'])
|
||||
|
||||
|
||||
def alias(key, default=None):
|
||||
return _Alias(key, default)
|
618
senpy/models.py
@ -1,3 +1,18 @@
|
||||
#
|
||||
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
'''
|
||||
Senpy Models.
|
||||
|
||||
@ -6,27 +21,37 @@ For compatibility with Py3 and for easier debugging, this new version drops
|
||||
introspection and adds all arguments to the models.
|
||||
'''
|
||||
from __future__ import print_function
|
||||
from six import string_types
|
||||
from future import standard_library
|
||||
standard_library.install_aliases()
|
||||
|
||||
from future.utils import with_metaclass
|
||||
from past.builtins import basestring
|
||||
|
||||
from jinja2 import Environment, BaseLoader
|
||||
|
||||
import time
|
||||
import copy
|
||||
import json
|
||||
import os
|
||||
import jsonref
|
||||
import jsonschema
|
||||
|
||||
from flask import Response as FlaskResponse
|
||||
from pyld import jsonld
|
||||
|
||||
from rdflib import Graph
|
||||
|
||||
import logging
|
||||
import jmespath
|
||||
|
||||
logging.getLogger('rdflib').setLevel(logging.WARN)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
from rdflib import Graph
|
||||
|
||||
|
||||
from .meta import BaseMeta, CustomDict, alias
|
||||
|
||||
DEFINITIONS_FILE = 'definitions.json'
|
||||
CONTEXT_PATH = os.path.join(
|
||||
os.path.dirname(os.path.realpath(__file__)), 'schemas', 'context.jsonld')
|
||||
CONTEXT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)),
|
||||
'schemas',
|
||||
'context.jsonld')
|
||||
|
||||
|
||||
def get_schema_path(schema_file, absolute=False):
|
||||
@ -45,40 +70,99 @@ def read_schema(schema_file, absolute=False):
|
||||
return jsonref.load(f, base_uri=schema_uri)
|
||||
|
||||
|
||||
base_schema = read_schema(DEFINITIONS_FILE)
|
||||
def dump_schema(schema):
|
||||
return jsonref.dumps(schema)
|
||||
|
||||
|
||||
class Context(dict):
|
||||
@staticmethod
|
||||
def load(context):
|
||||
logging.debug('Loading context: {}'.format(context))
|
||||
if not context:
|
||||
def load_context(context):
|
||||
logging.debug('Loading context: {}'.format(context))
|
||||
if not context:
|
||||
return context
|
||||
elif isinstance(context, list):
|
||||
contexts = []
|
||||
for c in context:
|
||||
contexts.append(load_context(c))
|
||||
return contexts
|
||||
elif isinstance(context, dict):
|
||||
return dict(context)
|
||||
elif isinstance(context, basestring):
|
||||
try:
|
||||
with open(context) as f:
|
||||
return dict(json.loads(f.read()))
|
||||
except IOError:
|
||||
return context
|
||||
elif isinstance(context, list):
|
||||
contexts = []
|
||||
for c in context:
|
||||
contexts.append(Context.load(c))
|
||||
return contexts
|
||||
elif isinstance(context, dict):
|
||||
return Context(context)
|
||||
elif isinstance(context, string_types):
|
||||
try:
|
||||
with open(context) as f:
|
||||
return Context(json.loads(f.read()))
|
||||
except IOError:
|
||||
return context
|
||||
else:
|
||||
raise AttributeError('Please, provide a valid context')
|
||||
else:
|
||||
raise AttributeError('Please, provide a valid context')
|
||||
|
||||
|
||||
base_context = Context.load(CONTEXT_PATH)
|
||||
base_context = load_context(CONTEXT_PATH)
|
||||
|
||||
|
||||
class SenpyMixin(object):
|
||||
def register(rsubclass, rtype=None):
|
||||
BaseMeta.register(rsubclass, rtype)
|
||||
|
||||
|
||||
class BaseModel(with_metaclass(BaseMeta, CustomDict)):
|
||||
'''
|
||||
Entities of the base model are a special kind of dictionary that emulates
|
||||
a JSON-LD object. The structure of the dictionary is checked via JSON-schema.
|
||||
For convenience, the values can also be accessed as attributes
|
||||
(a la Javascript). e.g.:
|
||||
|
||||
>>> myobject.key == myobject['key']
|
||||
True
|
||||
>>> myobject.ns__name == myobject['ns:name']
|
||||
True
|
||||
|
||||
Additionally, subclasses of this class can specify default values for their
|
||||
instances. These defaults are inherited by subclasses. e.g.:
|
||||
|
||||
>>> class NewModel(BaseModel):
|
||||
... mydefault = 5
|
||||
>>> n1 = NewModel()
|
||||
>>> n1['mydefault'] == 5
|
||||
True
|
||||
>>> n1.mydefault = 3
|
||||
>>> n1['mydefault'] = 3
|
||||
True
|
||||
>>> n2 = NewModel()
|
||||
>>> n2 == 5
|
||||
True
|
||||
>>> class SubModel(NewModel):
|
||||
pass
|
||||
>>> subn = SubModel()
|
||||
>>> subn.mydefault == 5
|
||||
True
|
||||
|
||||
Lastly, every subclass that also specifies a schema will get registered, so it
|
||||
is possible to deserialize JSON and get the right type.
|
||||
i.e. to recover an instance of the original class from a plain JSON.
|
||||
|
||||
'''
|
||||
|
||||
# schema_file = DEFINITIONS_FILE
|
||||
_context = base_context["@context"]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
auto_id = kwargs.pop('_auto_id', False)
|
||||
|
||||
super(BaseModel, self).__init__(*args, **kwargs)
|
||||
|
||||
if auto_id:
|
||||
self.id
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
if '@id' not in self:
|
||||
self['@id'] = 'prefix:{}_{}'.format(type(self).__name__, time.time())
|
||||
return self['@id']
|
||||
|
||||
@id.setter
|
||||
def id(self, value):
|
||||
self['@id'] = value
|
||||
|
||||
def flask(self,
|
||||
in_headers=True,
|
||||
in_headers=False,
|
||||
headers=None,
|
||||
outformat='json-ld',
|
||||
**kwargs):
|
||||
@ -102,26 +186,37 @@ class SenpyMixin(object):
|
||||
})
|
||||
return FlaskResponse(
|
||||
response=content,
|
||||
status=getattr(self, "status", 200),
|
||||
status=self.get('status', 200),
|
||||
headers=headers,
|
||||
mimetype=mimetype)
|
||||
|
||||
def serialize(self, format='json-ld', with_mime=False, **kwargs):
|
||||
js = self.jsonld(**kwargs)
|
||||
if format == 'json-ld':
|
||||
def serialize(self, format='json-ld', with_mime=False,
|
||||
template=None, prefix=None, fields=None, **kwargs):
|
||||
js = self.jsonld(prefix=prefix, **kwargs)
|
||||
if template is not None:
|
||||
rtemplate = Environment(loader=BaseLoader).from_string(template)
|
||||
content = rtemplate.render(**self)
|
||||
mimetype = 'text'
|
||||
elif fields is not None:
|
||||
# Emulate field selection by constructing a template
|
||||
content = json.dumps(jmespath.search(fields, js))
|
||||
mimetype = 'text'
|
||||
elif format == 'json-ld':
|
||||
content = json.dumps(js, indent=2, sort_keys=True)
|
||||
mimetype = "application/json"
|
||||
elif format in ['turtle', ]:
|
||||
logger.debug(js)
|
||||
elif format in ['turtle', 'ntriples']:
|
||||
content = json.dumps(js, indent=2, sort_keys=True)
|
||||
logger.debug(js)
|
||||
context = [self._context, {'prefix': prefix, '@base': prefix}]
|
||||
g = Graph().parse(
|
||||
data=content,
|
||||
format='json-ld',
|
||||
base=kwargs.get('prefix'),
|
||||
context=self._context)
|
||||
prefix=prefix,
|
||||
context=context)
|
||||
logger.debug(
|
||||
'Parsing with prefix: {}'.format(kwargs.get('prefix')))
|
||||
content = g.serialize(format='turtle').decode('utf-8')
|
||||
content = g.serialize(format=format,
|
||||
prefix=prefix)
|
||||
mimetype = 'text/{}'.format(format)
|
||||
else:
|
||||
raise Error('Unknown outformat: {}'.format(format))
|
||||
@ -130,212 +225,307 @@ class SenpyMixin(object):
|
||||
else:
|
||||
return content
|
||||
|
||||
def serializable(self):
|
||||
def ser_or_down(item):
|
||||
if hasattr(item, 'serializable'):
|
||||
return item.serializable()
|
||||
elif isinstance(item, dict):
|
||||
temp = dict()
|
||||
for kp in item:
|
||||
vp = item[kp]
|
||||
temp[kp] = ser_or_down(vp)
|
||||
return temp
|
||||
elif isinstance(item, list):
|
||||
return list(ser_or_down(i) for i in item)
|
||||
else:
|
||||
return item
|
||||
|
||||
return ser_or_down(self._plain_dict())
|
||||
|
||||
def jsonld(self,
|
||||
with_context=True,
|
||||
with_context=False,
|
||||
context_uri=None,
|
||||
prefix=None,
|
||||
expanded=False):
|
||||
ser = self.serializable()
|
||||
base=None,
|
||||
expanded=False,
|
||||
**kwargs):
|
||||
|
||||
result = self.serializable(**kwargs)
|
||||
|
||||
result = jsonld.compact(
|
||||
ser,
|
||||
self._context,
|
||||
options={
|
||||
'base': prefix,
|
||||
'expandContext': self._context,
|
||||
'senpy': prefix
|
||||
})
|
||||
if context_uri:
|
||||
result['@context'] = context_uri
|
||||
if expanded:
|
||||
result = jsonld.expand(
|
||||
result, options={'base': prefix,
|
||||
'expandContext': self._context})
|
||||
result,
|
||||
options={
|
||||
'expandContext': [
|
||||
self._context,
|
||||
{
|
||||
'prefix': prefix,
|
||||
'endpoint': prefix
|
||||
}
|
||||
]
|
||||
}
|
||||
)[0]
|
||||
if not with_context:
|
||||
del result['@context']
|
||||
return result
|
||||
try:
|
||||
del result['@context']
|
||||
except KeyError:
|
||||
pass
|
||||
elif context_uri:
|
||||
result['@context'] = context_uri
|
||||
else:
|
||||
result['@context'] = self._context
|
||||
|
||||
def to_JSON(self, *args, **kwargs):
|
||||
js = json.dumps(self.jsonld(*args, **kwargs), indent=4, sort_keys=True)
|
||||
return js
|
||||
return result
|
||||
|
||||
def validate(self, obj=None):
|
||||
if not obj:
|
||||
obj = self
|
||||
if hasattr(obj, "jsonld"):
|
||||
obj = obj.jsonld()
|
||||
jsonschema.validate(obj, self.schema)
|
||||
self._validator.validate(obj)
|
||||
|
||||
def __str__(self):
|
||||
return str(self.to_JSON())
|
||||
def prov(self, another):
|
||||
self['prov:wasGeneratedBy'] = another.id
|
||||
|
||||
|
||||
class BaseModel(SenpyMixin, dict):
|
||||
|
||||
schema = base_schema
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if 'id' in kwargs:
|
||||
self.id = kwargs.pop('id')
|
||||
elif kwargs.pop('_auto_id', True):
|
||||
self.id = '_:{}_{}'.format(type(self).__name__, time.time())
|
||||
temp = dict(*args, **kwargs)
|
||||
|
||||
for obj in [
|
||||
self.schema,
|
||||
] + self.schema.get('allOf', []):
|
||||
for k, v in obj.get('properties', {}).items():
|
||||
if 'default' in v and k not in temp:
|
||||
temp[k] = copy.deepcopy(v['default'])
|
||||
|
||||
for i in temp:
|
||||
nk = self._get_key(i)
|
||||
if nk != i:
|
||||
temp[nk] = temp[i]
|
||||
del temp[i]
|
||||
try:
|
||||
temp['@type'] = getattr(self, '@type')
|
||||
except AttributeError:
|
||||
logger.warn('Creating an instance of an unknown model')
|
||||
super(BaseModel, self).__init__(temp)
|
||||
|
||||
def _get_key(self, key):
|
||||
key = key.replace("__", ":", 1)
|
||||
return key
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
dict.__setitem__(self, key, value)
|
||||
|
||||
def __delitem__(self, key):
|
||||
dict.__delitem__(self, key)
|
||||
|
||||
def __getattr__(self, key):
|
||||
try:
|
||||
return self.__getitem__(self._get_key(key))
|
||||
except KeyError:
|
||||
raise AttributeError(key)
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
self.__setitem__(self._get_key(key), value)
|
||||
|
||||
def __delattr__(self, key):
|
||||
self.__delitem__(self._get_key(key))
|
||||
|
||||
def _plain_dict(self):
|
||||
d = {k: v for (k, v) in self.items() if k[0] != "_"}
|
||||
if 'id' in d:
|
||||
d["@id"] = d.pop('id')
|
||||
return d
|
||||
def subtypes():
|
||||
return BaseMeta._subtypes
|
||||
|
||||
|
||||
_subtypes = {}
|
||||
|
||||
|
||||
def register(rsubclass, rtype=None):
|
||||
_subtypes[rtype or rsubclass.__name__] = rsubclass
|
||||
|
||||
|
||||
def from_dict(indict):
|
||||
target = indict.get('@type', None)
|
||||
if target and target in _subtypes:
|
||||
cls = _subtypes[target]
|
||||
else:
|
||||
def from_dict(indict, cls=None, warn=True):
|
||||
if not cls:
|
||||
target = indict.get('@type', None)
|
||||
cls = BaseModel
|
||||
return cls(**indict)
|
||||
try:
|
||||
cls = subtypes()[target]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
if cls == BaseModel and warn:
|
||||
logger.warning('Created an instance of an unknown model')
|
||||
|
||||
outdict = dict()
|
||||
for k, v in indict.items():
|
||||
if k == '@context':
|
||||
pass
|
||||
elif isinstance(v, dict):
|
||||
v = from_dict(indict[k])
|
||||
elif isinstance(v, list):
|
||||
v = v[:]
|
||||
for ix, v2 in enumerate(v):
|
||||
if isinstance(v2, dict):
|
||||
v[ix] = from_dict(v2)
|
||||
outdict[k] = copy.copy(v)
|
||||
return cls(**outdict)
|
||||
|
||||
|
||||
def from_json(injson):
|
||||
def from_string(string, **kwargs):
|
||||
return from_dict(json.loads(string), **kwargs)
|
||||
|
||||
|
||||
def from_json(injson, **kwargs):
|
||||
indict = json.loads(injson)
|
||||
return from_dict(indict)
|
||||
return from_dict(indict, **kwargs)
|
||||
|
||||
|
||||
def from_schema(name, schema_file=None, base_classes=None):
|
||||
base_classes = base_classes or []
|
||||
base_classes.append(BaseModel)
|
||||
schema_file = schema_file or '{}.json'.format(name)
|
||||
class_name = '{}{}'.format(name[0].upper(), name[1:])
|
||||
newclass = type(class_name, tuple(base_classes), {})
|
||||
setattr(newclass, '@type', name)
|
||||
setattr(newclass, 'schema', read_schema(schema_file))
|
||||
setattr(newclass, 'class_name', class_name)
|
||||
register(newclass, name)
|
||||
return newclass
|
||||
class Entry(BaseModel):
|
||||
schema = 'entry'
|
||||
|
||||
text = alias('nif:isString')
|
||||
sentiments = alias('marl:hasOpinion', [])
|
||||
emotions = alias('onyx:hasEmotionSet', [])
|
||||
|
||||
|
||||
def _add_from_schema(*args, **kwargs):
|
||||
generatedClass = from_schema(*args, **kwargs)
|
||||
globals()[generatedClass.__name__] = generatedClass
|
||||
del generatedClass
|
||||
class Sentiment(BaseModel):
|
||||
schema = 'sentiment'
|
||||
|
||||
polarity = alias('marl:hasPolarity')
|
||||
polarityValue = alias('marl:polarityValue')
|
||||
|
||||
|
||||
for i in [
|
||||
'analysis',
|
||||
'emotion',
|
||||
'emotionConversion',
|
||||
'emotionConversionPlugin',
|
||||
'emotionAnalysis',
|
||||
'emotionModel',
|
||||
'emotionPlugin',
|
||||
'emotionSet',
|
||||
'entry',
|
||||
'plugin',
|
||||
'plugins',
|
||||
'response',
|
||||
'results',
|
||||
'sentiment',
|
||||
'sentimentPlugin',
|
||||
'suggestion',
|
||||
]:
|
||||
_add_from_schema(i)
|
||||
class Error(BaseModel, Exception):
|
||||
schema = 'error'
|
||||
|
||||
_ErrorModel = from_schema('error')
|
||||
|
||||
|
||||
class Error(SenpyMixin, BaseException):
|
||||
def __init__(self, message, *args, **kwargs):
|
||||
super(Error, self).__init__(self, message, message)
|
||||
self._error = _ErrorModel(message=message, *args, **kwargs)
|
||||
def __init__(self, message='Generic senpy exception', *args, **kwargs):
|
||||
Exception.__init__(self, message)
|
||||
super(Error, self).__init__(*args, **kwargs)
|
||||
self.message = message
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self._error[key]
|
||||
def __str__(self):
|
||||
if not hasattr(self, 'errors'):
|
||||
return self.message
|
||||
return '{}:\n\t{}'.format(self.message, self.errors)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self._error[key] = value
|
||||
|
||||
def __delitem__(self, key):
|
||||
del self._error[key]
|
||||
|
||||
def __getattr__(self, key):
|
||||
if key != '_error' and hasattr(self._error, key):
|
||||
return getattr(self._error, key)
|
||||
raise AttributeError(key)
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
if key != '_error':
|
||||
return setattr(self._error, key, value)
|
||||
else:
|
||||
super(Error, self).__setattr__(key, value)
|
||||
|
||||
def __delattr__(self, key):
|
||||
delattr(self._error, key)
|
||||
def __hash__(self):
|
||||
return Exception.__hash__(self)
|
||||
|
||||
|
||||
register(Error, 'error')
|
||||
class AggregatedEvaluation(BaseModel):
|
||||
schema = 'aggregatedEvaluation'
|
||||
|
||||
evaluations = alias('senpy:evaluations', [])
|
||||
|
||||
|
||||
class Dataset(BaseModel):
|
||||
schema = 'dataset'
|
||||
|
||||
|
||||
class Datasets(BaseModel):
|
||||
schema = 'datasets'
|
||||
|
||||
datasets = []
|
||||
|
||||
|
||||
class Emotion(BaseModel):
|
||||
schema = 'emotion'
|
||||
|
||||
|
||||
class EmotionConversion(BaseModel):
|
||||
schema = 'emotionConversion'
|
||||
|
||||
|
||||
class EmotionConversionPlugin(BaseModel):
|
||||
schema = 'emotionConversionPlugin'
|
||||
|
||||
|
||||
class EmotionAnalysis(BaseModel):
|
||||
schema = 'emotionAnalysis'
|
||||
|
||||
|
||||
class EmotionModel(BaseModel):
|
||||
schema = 'emotionModel'
|
||||
onyx__hasEmotionCategory = []
|
||||
|
||||
|
||||
class EmotionPlugin(BaseModel):
|
||||
schema = 'emotionPlugin'
|
||||
|
||||
|
||||
class EmotionSet(BaseModel):
|
||||
schema = 'emotionSet'
|
||||
|
||||
onyx__hasEmotion = []
|
||||
|
||||
|
||||
class Evaluation(BaseModel):
|
||||
schema = 'evaluation'
|
||||
|
||||
metrics = alias('senpy:metrics', [])
|
||||
|
||||
|
||||
class Entity(BaseModel):
|
||||
schema = 'entity'
|
||||
|
||||
|
||||
class Help(BaseModel):
|
||||
schema = 'help'
|
||||
|
||||
|
||||
class Metric(BaseModel):
|
||||
schema = 'metric'
|
||||
|
||||
|
||||
class Parameter(BaseModel):
|
||||
schema = 'parameter'
|
||||
|
||||
|
||||
class Plugins(BaseModel):
|
||||
schema = 'plugins'
|
||||
|
||||
plugins = []
|
||||
|
||||
|
||||
class Response(BaseModel):
|
||||
schema = 'response'
|
||||
|
||||
|
||||
class Results(BaseModel):
|
||||
schema = 'results'
|
||||
|
||||
_terse_keys = ['entries', ]
|
||||
|
||||
activities = []
|
||||
entries = []
|
||||
|
||||
def activity(self, id):
|
||||
for i in self.activities:
|
||||
if i.id == id:
|
||||
return i
|
||||
return None
|
||||
|
||||
|
||||
class SentimentPlugin(BaseModel):
|
||||
schema = 'sentimentPlugin'
|
||||
|
||||
|
||||
class Suggestion(BaseModel):
|
||||
schema = 'suggestion'
|
||||
|
||||
|
||||
class Topic(BaseModel):
|
||||
schema = 'topic'
|
||||
|
||||
|
||||
class Analysis(BaseModel):
|
||||
'''
|
||||
A prov:Activity that results of executing a Plugin on an entry with a set of
|
||||
parameters.
|
||||
'''
|
||||
schema = 'analysis'
|
||||
|
||||
parameters = alias('prov:used', [])
|
||||
algorithm = alias('prov:wasAssociatedWith', [])
|
||||
|
||||
@property
|
||||
def params(self):
|
||||
outdict = {}
|
||||
outdict['algorithm'] = self.algorithm
|
||||
for param in self.parameters:
|
||||
outdict[param['name']] = param['value']
|
||||
return outdict
|
||||
|
||||
@params.setter
|
||||
def params(self, value):
|
||||
for k, v in value.items():
|
||||
for param in self.parameters:
|
||||
if param.name == k:
|
||||
param.value = v
|
||||
break
|
||||
else:
|
||||
self.parameters.append(Parameter(name=k, value=v)) # noqa: F821
|
||||
|
||||
def param(self, key, default=None):
|
||||
for param in self.parameters:
|
||||
if param['name'] == key:
|
||||
return param['value']
|
||||
return default
|
||||
|
||||
@property
|
||||
def plugin(self):
|
||||
return self._plugin
|
||||
|
||||
@plugin.setter
|
||||
def plugin(self, value):
|
||||
self._plugin = value
|
||||
self['prov:wasAssociatedWith'] = value.id
|
||||
|
||||
def run(self, request):
|
||||
return self.plugin.process(request, self)
|
||||
|
||||
|
||||
class Plugin(BaseModel):
|
||||
schema = 'plugin'
|
||||
extra_params = {}
|
||||
|
||||
def activity(self, parameters=None):
|
||||
'''Generate an Analysis (prov:Activity) from this plugin and the given parameters'''
|
||||
a = Analysis()
|
||||
a.plugin = self
|
||||
if parameters:
|
||||
a.params = parameters
|
||||
return a
|
||||
|
||||
|
||||
# More classes could be added programmatically
|
||||
|
||||
def _class_from_schema(name, schema=None, schema_file=None, base_classes=None):
|
||||
base_classes = base_classes or []
|
||||
base_classes.append(BaseModel)
|
||||
attrs = {}
|
||||
if schema:
|
||||
attrs['schema'] = schema
|
||||
elif schema_file:
|
||||
attrs['schema_file'] = schema_file
|
||||
else:
|
||||
attrs['schema'] = name
|
||||
name = "".join((name[0].upper(), name[1:]))
|
||||
return BaseMeta(name, base_classes, attrs)
|
||||
|
||||
|
||||
def _add_class_from_schema(*args, **kwargs):
|
||||
generatedClass = _class_from_schema(*args, **kwargs)
|
||||
globals()[generatedClass.__name__] = generatedClass
|
||||
del generatedClass
|
||||
|