mirror of
https://github.com/gsi-upm/senpy
synced 2024-11-14 12:42:27 +00:00
Add 'community-plugins/' from commit '4c73797246c6aff8d055abfef73d3f0d34b933a8'
git-subtree-dir: community-plugins git-subtree-mainline:7f712952be
git-subtree-split:4c73797246
This commit is contained in:
commit
e1d888ebd6
2
community-plugins/.dockerignore
Normal file
2
community-plugins/.dockerignore
Normal file
@ -0,0 +1,2 @@
|
||||
.*
|
||||
data
|
65
community-plugins/.gitignore
vendored
Normal file
65
community-plugins/.gitignore
vendored
Normal file
@ -0,0 +1,65 @@
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
env/
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*,cover
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
.*
|
||||
*.pyc
|
||||
**/__pycache__
|
||||
*/wordnet1.6
|
||||
*/Corpus
|
||||
*/a-hierarchy.xml
|
||||
*/a-synsets.xml
|
||||
*/wn16.txt
|
51
community-plugins/.gitlab-ci.yml
Normal file
51
community-plugins/.gitlab-ci.yml
Normal file
@ -0,0 +1,51 @@
|
||||
# Uncomment if you want to use docker-in-docker
|
||||
# image: gsiupm/dockermake:latest
|
||||
# services:
|
||||
# - docker:dind
|
||||
# When using dind, it's wise to use the overlayfs driver for
|
||||
# improved performance.
|
||||
|
||||
variables:
|
||||
GIT_SUBMODULE_STRATEGY: recursive
|
||||
|
||||
stages:
|
||||
- build
|
||||
- test
|
||||
- push
|
||||
- deploy
|
||||
- clean
|
||||
|
||||
before_script:
|
||||
- make -e login
|
||||
|
||||
build:
|
||||
stage: build
|
||||
script:
|
||||
- make -e docker-build
|
||||
only:
|
||||
- master
|
||||
- fix-makefiles
|
||||
|
||||
test:
|
||||
stage: test
|
||||
script:
|
||||
- make -e test
|
||||
|
||||
push:
|
||||
stage: push
|
||||
script:
|
||||
- make -e docker-push
|
||||
|
||||
deploy:
|
||||
stage: deploy
|
||||
script:
|
||||
- make -e deploy
|
||||
only:
|
||||
- master
|
||||
- fix-makefiles
|
||||
|
||||
clean :
|
||||
stage: clean
|
||||
script:
|
||||
- make -e clean
|
||||
when: manual
|
3
community-plugins/.gitmodules
vendored
Normal file
3
community-plugins/.gitmodules
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
[submodule "data"]
|
||||
path = data
|
||||
url = ../data
|
27
community-plugins/.makefiles/README.md
Normal file
27
community-plugins/.makefiles/README.md
Normal file
@ -0,0 +1,27 @@
|
||||
These makefiles are recipes for several common tasks in different types of projects.
|
||||
To add them to your project, simply do:
|
||||
|
||||
```
|
||||
git remote add makefiles ssh://git@lab.cluster.gsi.dit.upm.es:2200/docs/templates/makefiles.git
|
||||
git subtree add --prefix=.makefiles/ makefiles master
|
||||
touch Makefile
|
||||
echo "include .makefiles/base.mk" >> Makefile
|
||||
```
|
||||
|
||||
Now you can take advantage of the recipes.
|
||||
For instance, to add useful targets for a python project, just add this to your Makefile:
|
||||
|
||||
```
|
||||
include .makefiles/python.mk
|
||||
```
|
||||
|
||||
You may need to set special variables like the name of your project or the python versions you're targetting.
|
||||
Take a look at each specific `.mk` file for more information, and the `Makefile` in the [senpy](https://lab.cluster.gsi.dit.upm.es/senpy/senpy) project for a real use case.
|
||||
|
||||
If you update the makefiles from your repository, make sure to push the changes for review in upstream (this repository):
|
||||
|
||||
```
|
||||
make makefiles-push
|
||||
```
|
||||
|
||||
It will automatically commit all unstaged changes in the .makefiles folder.
|
36
community-plugins/.makefiles/base.mk
Normal file
36
community-plugins/.makefiles/base.mk
Normal file
@ -0,0 +1,36 @@
|
||||
export
|
||||
NAME ?= $(shell basename $(CURDIR))
|
||||
VERSION ?= $(shell git describe --tags --dirty 2>/dev/null)
|
||||
|
||||
ifeq ($(VERSION),)
|
||||
VERSION:=unknown
|
||||
endif
|
||||
|
||||
# Get the location of this makefile.
|
||||
MK_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
|
||||
|
||||
-include .env
|
||||
-include ../.env
|
||||
|
||||
help: ## Show this help.
|
||||
@fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/\(.*:\)[^#]*##\s*\(.*\)/\1\t\2/' | column -t -s " "
|
||||
|
||||
config: ## Load config from the environment. You should run it once in every session before other tasks. Run: eval $(make config)
|
||||
@awk '{ print "export " $$0}' ../.env
|
||||
@awk '{ print "export " $$0}' .env
|
||||
@echo "# Please, run: "
|
||||
@echo "# eval \$$(make config)"
|
||||
# If you need to run a command on the key/value pairs, use this:
|
||||
# @awk '{ split($$0, a, "="); "echo " a[2] " | base64 -w 0" |& getline b64; print "export " a[1] "=" a[2]; print "export " a[1] "_BASE64=" b64}' .env
|
||||
|
||||
ci: ## Run a task using gitlab-runner. Only use to debug problems in the CI pipeline
|
||||
gitlab-runner exec shell --builds-dir '.builds' --env CI_PROJECT_NAME=$(NAME) ${action}
|
||||
|
||||
include $(MK_DIR)/makefiles.mk
|
||||
include $(MK_DIR)/docker.mk
|
||||
include $(MK_DIR)/git.mk
|
||||
|
||||
info:: ## List all variables
|
||||
env
|
||||
|
||||
.PHONY:: config help ci
|
51
community-plugins/.makefiles/docker.mk
Normal file
51
community-plugins/.makefiles/docker.mk
Normal file
@ -0,0 +1,51 @@
|
||||
ifndef IMAGENAME
|
||||
ifdef CI_REGISTRY_IMAGE
|
||||
IMAGENAME=$(CI_REGISTRY_IMAGE)
|
||||
else
|
||||
IMAGENAME=$(NAME)
|
||||
endif
|
||||
endif
|
||||
|
||||
IMAGEWTAG?=$(IMAGENAME):$(VERSION)
|
||||
DOCKER_FLAGS?=$(-ti)
|
||||
DOCKER_CMD?=
|
||||
|
||||
docker-login: ## Log in to the registry. It will only be used in the server, or when running a CI task locally (if CI_BUILD_TOKEN is set).
|
||||
ifeq ($(CI_BUILD_TOKEN),)
|
||||
@echo "Not logging in to the docker registry" "$(CI_REGISTRY)"
|
||||
else
|
||||
@docker login -u gitlab-ci-token -p $(CI_BUILD_TOKEN) $(CI_REGISTRY)
|
||||
endif
|
||||
ifeq ($(HUB_USER),)
|
||||
@echo "Not logging in to global the docker registry"
|
||||
else
|
||||
@docker login -u $(HUB_USER) -p $(HUB_PASSWORD)
|
||||
endif
|
||||
|
||||
docker-clean: ## Remove docker credentials
|
||||
ifeq ($(HUB_USER),)
|
||||
else
|
||||
@docker logout
|
||||
endif
|
||||
|
||||
docker-run: ## Build a generic docker image
|
||||
docker run $(DOCKER_FLAGS) $(IMAGEWTAG) $(DOCKER_CMD)
|
||||
|
||||
docker-build: ## Build a generic docker image
|
||||
docker build . -t $(IMAGEWTAG)
|
||||
|
||||
docker-push: docker-build docker-login ## Push a generic docker image
|
||||
docker push $(IMAGEWTAG)
|
||||
|
||||
docker-latest-push: docker-build ## Push the latest image
|
||||
docker tag $(IMAGEWTAG) $(IMAGENAME)
|
||||
docker push $(IMAGENAME)
|
||||
|
||||
login:: docker-login
|
||||
|
||||
clean:: docker-clean
|
||||
|
||||
docker-info:
|
||||
@echo IMAGEWTAG=${IMAGEWTAG}
|
||||
|
||||
.PHONY:: docker-login docker-clean login clean
|
28
community-plugins/.makefiles/git.mk
Normal file
28
community-plugins/.makefiles/git.mk
Normal file
@ -0,0 +1,28 @@
|
||||
commit:
|
||||
git commit -a
|
||||
|
||||
tag:
|
||||
git tag ${VERSION}
|
||||
|
||||
git-push::
|
||||
git push --tags -u origin HEAD
|
||||
|
||||
git-pull:
|
||||
git pull --all
|
||||
|
||||
push-github: ## Push the code to github. You need to set up GITHUB_DEPLOY_KEY
|
||||
ifeq ($(GITHUB_DEPLOY_KEY),)
|
||||
else
|
||||
$(eval KEY_FILE := "$(shell mktemp)")
|
||||
@printf '%b' '$(GITHUB_DEPLOY_KEY)' > $(KEY_FILE)
|
||||
@git remote rm github-deploy || true
|
||||
git remote add github-deploy $(GITHUB_REPO)
|
||||
-@GIT_SSH_COMMAND="ssh -i $(KEY_FILE)" git fetch github-deploy $(CI_COMMIT_REF_NAME)
|
||||
@GIT_SSH_COMMAND="ssh -i $(KEY_FILE)" git push github-deploy HEAD:$(CI_COMMIT_REF_NAME)
|
||||
rm $(KEY_FILE)
|
||||
endif
|
||||
|
||||
push:: git-push
|
||||
pull:: git-pull
|
||||
|
||||
.PHONY:: commit tag push git-push git-pull push-github
|
51
community-plugins/.makefiles/k8s.mk
Normal file
51
community-plugins/.makefiles/k8s.mk
Normal file
@ -0,0 +1,51 @@
|
||||
# Deployment with Kubernetes
|
||||
|
||||
# KUBE_CA_PEM_FILE is the path of a certificate file. It automatically set by GitLab
|
||||
# if you enable Kubernetes integration in a project.
|
||||
#
|
||||
# As of this writing, Kubernetes integration can not be set on a group level, so it has to
|
||||
# be manually set in every project.
|
||||
# Alternatively, we use a custom KUBE_CA_BUNDLE environment variable, which can be set at
|
||||
# the group level. In this case, the variable contains the whole content of the certificate,
|
||||
# which we dump to a temporary file
|
||||
#
|
||||
# Check if the KUBE_CA_PEM_FILE exists. Otherwise, create it from KUBE_CA_BUNDLE
|
||||
KUBE_CA_TEMP=false
|
||||
ifndef KUBE_CA_PEM_FILE
|
||||
KUBE_CA_PEM_FILE:=$$PWD/.ca.crt
|
||||
CREATED:=$(shell printf '%b\n' '$(KUBE_CA_BUNDLE)' > $(KUBE_CA_PEM_FILE))
|
||||
endif
|
||||
KUBE_TOKEN?=""
|
||||
KUBE_NAMESPACE?=$(NAME)
|
||||
KUBECTL=docker run --rm -v $(KUBE_CA_PEM_FILE):/tmp/ca.pem -i lachlanevenson/k8s-kubectl --server="$(KUBE_URL)" --token="$(KUBE_TOKEN)" --certificate-authority="/tmp/ca.pem" -n $(KUBE_NAMESPACE)
|
||||
CI_COMMIT_REF_NAME?=master
|
||||
|
||||
info:: ## Print variables. Useful for debugging.
|
||||
@echo "#KUBERNETES"
|
||||
@echo KUBE_URL=$(KUBE_URL)
|
||||
@echo KUBE_CA_PEM_FILE=$(KUBE_CA_PEM_FILE)
|
||||
@echo KUBE_CA_BUNDLE=$$KUBE_CA_BUNDLE
|
||||
@echo KUBE_TOKEN=$(KUBE_TOKEN)
|
||||
@echo KUBE_NAMESPACE=$(KUBE_NAMESPACE)
|
||||
@echo KUBECTL=$(KUBECTL)
|
||||
|
||||
@echo "#CI"
|
||||
@echo CI_PROJECT_NAME=$(CI_PROJECT_NAME)
|
||||
@echo CI_REGISTRY=$(CI_REGISTRY)
|
||||
@echo CI_REGISTRY_USER=$(CI_REGISTRY_USER)
|
||||
@echo CI_COMMIT_REF_NAME=$(CI_COMMIT_REF_NAME)
|
||||
@echo "CREATED=$(CREATED)"
|
||||
|
||||
#
|
||||
# Deployment and advanced features
|
||||
#
|
||||
|
||||
|
||||
deploy: ## Deploy to kubernetes using the credentials in KUBE_CA_PEM_FILE (or KUBE_CA_BUNDLE ) and TOKEN
|
||||
@ls k8s/*.yaml k8s/*.yml k8s/*.tmpl 2>/dev/null || true
|
||||
@cat k8s/*.yaml k8s/*.yml k8s/*.tmpl 2>/dev/null | envsubst | $(KUBECTL) apply -f -
|
||||
|
||||
deploy-check: ## Get the deployed configuration.
|
||||
@$(KUBECTL) get deploy,pods,svc,ingress
|
||||
|
||||
.PHONY:: info deploy deploy-check
|
17
community-plugins/.makefiles/makefiles.mk
Normal file
17
community-plugins/.makefiles/makefiles.mk
Normal file
@ -0,0 +1,17 @@
|
||||
makefiles-remote:
|
||||
@git remote add makefiles ssh://git@lab.cluster.gsi.dit.upm.es:2200/docs/templates/makefiles.git 2>/dev/null || true
|
||||
|
||||
makefiles-commit: makefiles-remote
|
||||
git add -f .makefiles
|
||||
git commit -em "Updated makefiles from ${NAME}"
|
||||
|
||||
makefiles-push:
|
||||
git subtree push --prefix=.makefiles/ makefiles $(NAME)
|
||||
|
||||
makefiles-pull: makefiles-remote
|
||||
git subtree pull --prefix=.makefiles/ makefiles master --squash
|
||||
|
||||
pull:: makefiles-pull
|
||||
push:: makefiles-push
|
||||
|
||||
.PHONY:: makefiles-remote makefiles-commit makefiles-push makefiles-pull pull push
|
5
community-plugins/.makefiles/precommit.mk
Normal file
5
community-plugins/.makefiles/precommit.mk
Normal file
@ -0,0 +1,5 @@
|
||||
init: ## Init pre-commit hooks (i.e. enforcing format checking before allowing a commit)
|
||||
pip install --user pre-commit
|
||||
pre-commit install
|
||||
|
||||
.PHONY:: init
|
100
community-plugins/.makefiles/python.mk
Normal file
100
community-plugins/.makefiles/python.mk
Normal file
@ -0,0 +1,100 @@
|
||||
PYVERSIONS ?= 3.5
|
||||
PYMAIN ?= $(firstword $(PYVERSIONS))
|
||||
TARNAME ?= $(NAME)-$(VERSION).tar.gz
|
||||
VERSIONFILE ?= $(NAME)/VERSION
|
||||
|
||||
DEVPORT ?= 6000
|
||||
|
||||
|
||||
.FORCE:
|
||||
|
||||
version: .FORCE
|
||||
@echo $(VERSION) > $(VERSIONFILE)
|
||||
@echo $(VERSION)
|
||||
|
||||
yapf: ## Format python code
|
||||
yapf -i -r $(NAME)
|
||||
yapf -i -r tests
|
||||
|
||||
dockerfiles: $(addprefix Dockerfile-,$(PYVERSIONS)) ## Generate dockerfiles for each python version
|
||||
@unlink Dockerfile >/dev/null
|
||||
ln -s Dockerfile-$(PYMAIN) Dockerfile
|
||||
|
||||
Dockerfile-%: Dockerfile.template ## Generate a specific dockerfile (e.g. Dockerfile-2.7)
|
||||
sed "s/{{PYVERSION}}/$*/" Dockerfile.template > Dockerfile-$*
|
||||
|
||||
quick_build: $(addprefix build-, $(PYMAIN))
|
||||
|
||||
build: $(addprefix build-, $(PYVERSIONS)) ## Build all images / python versions
|
||||
|
||||
build-%: version Dockerfile-% ## Build a specific version (e.g. build-2.7)
|
||||
docker build -t '$(IMAGEWTAG)-python$*' --cache-from $(IMAGENAME):python$* -f Dockerfile-$* .;
|
||||
|
||||
dev-%: ## Launch a specific development environment using docker (e.g. dev-2.7)
|
||||
@docker start $(NAME)-dev$* || (\
|
||||
$(MAKE) build-$*; \
|
||||
docker run -d -w /usr/src/app/ -p $(DEVPORT):5000 -v $$PWD:/usr/src/app --entrypoint=/bin/bash -ti --name $(NAME)-dev$* '$(IMAGEWTAG)-python$*'; \
|
||||
)\
|
||||
|
||||
docker exec -ti $(NAME)-dev$* bash
|
||||
|
||||
dev: dev-$(PYMAIN) ## Launch a development environment using docker, using the default python version
|
||||
|
||||
quick_test: test-$(PYMAIN)
|
||||
|
||||
test-%: ## Run setup.py from in an isolated container, built from the base image. (e.g. test-2.7)
|
||||
# This speeds tests up because the image has most (if not all) of the dependencies already.
|
||||
docker rm $(NAME)-test-$* || true
|
||||
docker create -ti --name $(NAME)-test-$* --entrypoint="" -w /usr/src/app/ $(IMAGENAME):python$* python setup.py test
|
||||
docker cp . $(NAME)-test-$*:/usr/src/app
|
||||
docker start -a $(NAME)-test-$*
|
||||
|
||||
test: $(addprefix test-,$(PYVERSIONS)) ## Run the tests with the main python version
|
||||
|
||||
run-%: build-%
|
||||
docker run --rm -p $(DEVPORT):5000 -ti '$(IMAGEWTAG)-python$(PYMAIN)' --default-plugins
|
||||
|
||||
run: run-$(PYMAIN)
|
||||
|
||||
# Pypy - Upload a package
|
||||
|
||||
dist/$(TARNAME): version
|
||||
python setup.py sdist;
|
||||
|
||||
sdist: dist/$(TARNAME) ## Generate the distribution file (wheel)
|
||||
|
||||
pip_test-%: sdist ## Test the distribution file using pip install and a specific python version (e.g. pip_test-2.7)
|
||||
docker run --rm -v $$PWD/dist:/dist/ python:$* pip install /dist/$(TARNAME);
|
||||
|
||||
pip_test: $(addprefix pip_test-,$(PYVERSIONS)) ## Test pip installation with the main python version
|
||||
|
||||
pip_upload: pip_test ## Upload package to pip
|
||||
python setup.py sdist upload ;
|
||||
|
||||
# Pushing to docker
|
||||
|
||||
push-latest: $(addprefix push-latest-,$(PYVERSIONS)) ## Push the "latest" tag to dockerhub
|
||||
docker tag '$(IMAGEWTAG)-python$(PYMAIN)' '$(IMAGEWTAG)'
|
||||
docker tag '$(IMAGEWTAG)-python$(PYMAIN)' '$(IMAGENAME)'
|
||||
docker push '$(IMAGENAME):latest'
|
||||
docker push '$(IMAGEWTAG)'
|
||||
|
||||
push-latest-%: build-% ## Push the latest image for a specific python version
|
||||
docker tag $(IMAGENAME):$(VERSION)-python$* $(IMAGENAME):python$*
|
||||
docker push $(IMAGENAME):$(VERSION)-python$*
|
||||
docker push $(IMAGENAME):python$*
|
||||
|
||||
push-%: build-% ## Push the image of the current version (tagged). e.g. push-2.7
|
||||
docker push $(IMAGENAME):$(VERSION)-python$*
|
||||
|
||||
push:: $(addprefix push-,$(PYVERSIONS)) ## Push an image with the current version for every python version
|
||||
docker tag '$(IMAGEWTAG)-python$(PYMAIN)' '$(IMAGEWTAG)'
|
||||
docker push $(IMAGENAME):$(VERSION)
|
||||
|
||||
clean:: ## Clean older docker images and containers related to this project and dev environments
|
||||
@docker stop $(addprefix $(NAME)-dev,$(PYVERSIONS)) 2>/dev/null || true
|
||||
@docker rm $(addprefix $(NAME)-dev,$(PYVERSIONS)) 2>/dev/null || true
|
||||
@docker ps -a | grep $(IMAGENAME) | awk '{ split($$2, vers, "-"); if(vers[0] != "${VERSION}"){ print $$1;}}' | xargs docker rm -v 2>/dev/null|| true
|
||||
@docker images | grep $(IMAGENAME) | awk '{ split($$2, vers, "-"); if(vers[0] != "${VERSION}"){ print $$1":"$$2;}}' | xargs docker rmi 2>/dev/null|| true
|
||||
|
||||
.PHONY:: yapf dockerfiles Dockerfile-% quick_build build build-% dev-% quick-dev test quick_test push-latest push-latest-% push-% push version .FORCE
|
12
community-plugins/.travis.yml
Normal file
12
community-plugins/.travis.yml
Normal file
@ -0,0 +1,12 @@
|
||||
language: python
|
||||
python:
|
||||
- "2.7"
|
||||
- "3.4"
|
||||
env:
|
||||
- PLUGIN=example-plugin
|
||||
- PLUGIN=sentiText
|
||||
install:
|
||||
- "pip install senpy pytest"
|
||||
- "python -m senpy --only-install -f $PLUGIN"
|
||||
script:
|
||||
- "py.test $PLUGIN"
|
1
community-plugins/Dockerfile
Normal file
1
community-plugins/Dockerfile
Normal file
@ -0,0 +1 @@
|
||||
from gsiupm/senpy:1.0.1-python3.6
|
202
community-plugins/LICENSE
Normal file
202
community-plugins/LICENSE
Normal file
@ -0,0 +1,202 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
30
community-plugins/Makefile
Normal file
30
community-plugins/Makefile
Normal file
@ -0,0 +1,30 @@
|
||||
PYVERSION=3.7
|
||||
NAME=senpycommunity
|
||||
REPO=gsiupm
|
||||
PLUGINS= $(filter %/, $(wildcard */))
|
||||
IMAGENAME=gsiupm/senpy-plugins-community
|
||||
DOCKER_FLAGS=-e MOCK_REQUESTS=$(MOCK_REQUESTS)
|
||||
DEV_PORT?=5000
|
||||
|
||||
ifdef SENPY_FOLDER
|
||||
DOCKER_FLAGS+= -v $(realpath $(SENPY_FOLDER)):/usr/src/app/
|
||||
endif
|
||||
|
||||
all: build run
|
||||
|
||||
test-fast-%: docker-build
|
||||
docker run $(DOCKER_FLAGS) -v $$PWD/$*:/senpy-plugins/ -v $$PWD/data:/data/ --rm $(IMAGEWTAG) --only-test $(TEST_FLAGS)
|
||||
|
||||
test-fast: test-fast-/
|
||||
|
||||
test: docker-build
|
||||
docker run $(DOCKER_FLAGS) -v $$PWD/data:/data/ --rm $(IMAGEWTAG) --only-test $(TEST_FLAGS)
|
||||
|
||||
dev: docker-build
|
||||
docker run -p $(DEV_PORT):5000 $(DOCKER_FLAGS) -ti $(DOCKER_FLAGS) -v $$PWD/$*:/senpy-plugins/ --entrypoint /bin/bash -v $$PWD/data:/data/ --rm $(IMAGEWTAG)
|
||||
|
||||
.PHONY:: test test-fast dev
|
||||
|
||||
include .makefiles/base.mk
|
||||
include .makefiles/k8s.mk
|
||||
|
76
community-plugins/README.md
Normal file
76
community-plugins/README.md
Normal file
@ -0,0 +1,76 @@
|
||||
# Senpy Plugins
|
||||
|
||||
|
||||
# Requirements
|
||||
|
||||
Some of these plugins require licensed files to run, such as lexicons or corpora.
|
||||
You can **manually download these resources and add them to the `data` folder**.
|
||||
|
||||
Most plugins will look for these resources on activation.
|
||||
By default, we set the flag `--allow-fail` in senpy, so if a plugin fails to activate, the server will still run with the remaining plugins.
|
||||
|
||||
# Running
|
||||
|
||||
## Using docker
|
||||
|
||||
To deploy all the plugins in this repository, run:
|
||||
|
||||
```
|
||||
docker-compose up
|
||||
```
|
||||
A server should now be available at `http://localhost:5000`.
|
||||
|
||||
Alternatively, you can use docker manually with the version of senpy you wish:
|
||||
|
||||
```
|
||||
docker run --rm -ti -p 5000:5000 -v $PWD:/senpy-plugins gsiupm/senpy:0.10.8-python2.7
|
||||
```
|
||||
|
||||
Note that some versions are untested.
|
||||
|
||||
## Manually
|
||||
|
||||
First, install senpy from source or through pip:
|
||||
|
||||
```
|
||||
pip install senpy
|
||||
```
|
||||
|
||||
Now, you can try to run your plugins:
|
||||
|
||||
```
|
||||
senpy -f .
|
||||
```
|
||||
|
||||
Each plugin has different requirements.
|
||||
Senpy will try its best to automatically install requirements (python libraries and NLTK resources) for each plugin.
|
||||
Some cases may require manual installation of dependencies, or external packages.
|
||||
|
||||
# For developers / Contributors
|
||||
|
||||
## Licensed data
|
||||
|
||||
In our deployments, we keep all licensed data in a private submodule.
|
||||
You will likely need to initialize this submodule if you're a contributor:
|
||||
|
||||
```
|
||||
git submodule update --init --recursive
|
||||
```
|
||||
|
||||
## Adding a plugin from a separate repository
|
||||
|
||||
To add a plugin that has been developed in its own repository, you can use git-subtree as so:
|
||||
|
||||
```
|
||||
$mname=<your plugin name>
|
||||
$murl=<URL to your repository>
|
||||
|
||||
git remote add $mname $murl
|
||||
git subtree add --prefix=$mname $mname master
|
||||
```
|
||||
|
||||
Make sure to also add
|
||||
|
||||
# LICENSE
|
||||
|
||||
This compilation of plugins for Senpy use Apache 2.0 License. Some of the resources used for train these plugins can not be distributed, specifically, resources for the plugins `emotion-anew` and `emotion-wnaffect`. For more information visit [Senpy documentation](senpy.readthedocs.io)
|
1
community-plugins/data
Submodule
1
community-plugins/data
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit f34155c3891b8919a1d73bae410cb5113f1bf959
|
12
community-plugins/docker-compose.yml
Normal file
12
community-plugins/docker-compose.yml
Normal file
@ -0,0 +1,12 @@
|
||||
version: '3'
|
||||
services:
|
||||
community:
|
||||
build: .
|
||||
image: "${IMAGEWTAG:-gsi-upm/senpy-community:dev}"
|
||||
volumes:
|
||||
- ".:/senpy-plugins/"
|
||||
# - "./data:/data"
|
||||
ports:
|
||||
- '5000:5000'
|
||||
command:
|
||||
- "--allow-fail"
|
60
community-plugins/emotion-anew/README.md
Executable file
60
community-plugins/emotion-anew/README.md
Executable file
@ -0,0 +1,60 @@
|
||||
# Plugin emotion-anew
|
||||
|
||||
This plugin consists on an **emotion classifier** that detects six possible emotions:
|
||||
- Anger : general-dislike.
|
||||
- Fear : negative-fear.
|
||||
- Disgust : shame.
|
||||
- Joy : gratitude, affective, enthusiasm, love, joy, liking.
|
||||
- Sadness : ingrattitude, daze, humlity, compassion, despair, anxiety, sadness.
|
||||
- Neutral: not detected a particulary emotion.
|
||||
|
||||
The plugin uses **ANEW lexicon** dictionary to calculate VAD (valence-arousal-dominance) of the sentence and determinate which emotion is closer to this value. To do this comparision, it is defined that each emotion has a centroid, calculated according to this article: http://www.aclweb.org/anthology/W10-0208.
|
||||
|
||||
The plugin is going to look for the words in the sentence that appear in the ANEW dictionary and calculate the average VAD score for the sentence. Once this score is calculated, it is going to seek the emotion that is closest to this value.
|
||||
|
||||
The response of this plugin uses [Onyx ontology](https://www.gsi.dit.upm.es/ontologies/onyx/) developed at GSI UPM, to express the information.
|
||||
|
||||
## Installation
|
||||
|
||||
* Download
|
||||
```
|
||||
git clone https://lab.cluster.gsi.dit.upm.es/senpy/emotion-anew.git
|
||||
```
|
||||
* Get data
|
||||
```
|
||||
cd emotion-anew
|
||||
git submodule update --init --recursive
|
||||
```
|
||||
* Run
|
||||
```
|
||||
docker run -p 5000:5000 -v $PWD:/plugins gsiupm/senpy:python2.7 -f /plugins
|
||||
```
|
||||
|
||||
## Data format
|
||||
|
||||
`data/Corpus/affective-isear.tsv` contains data from ISEAR Databank: http://emotion-research.net/toolbox/toolboxdatabase.2006-10-13.2581092615
|
||||
|
||||
##Usage
|
||||
|
||||
Params accepted:
|
||||
- Language: English (en) and Spanish (es).
|
||||
- Input: input text to analyse.
|
||||
|
||||
|
||||
Example request:
|
||||
```
|
||||
http://senpy.cluster.gsi.dit.upm.es/api/?algo=emotion-anew&language=en&input=I%20love%20Madrid
|
||||
```
|
||||
|
||||
Example respond: This plugin follows the standard for the senpy plugin response. For more information, please visit [senpy documentation](http://senpy.readthedocs.io). Specifically, NIF API section.
|
||||
# Known issues
|
||||
|
||||
- To obtain Anew dictionary you can download from here: <https://github.com/hcorona/SMC2015/blob/master/resources/ANEW2010All.txt>
|
||||
|
||||
- This plugin only supports **Python2**
|
||||
|
||||
|
||||
![alt GSI Logo][logoGSI]
|
||||
|
||||
[logoES]: https://www.gsi.dit.upm.es/ontologies/onyx/img/eurosentiment_logo.png "EuroSentiment logo"
|
||||
[logoGSI]: http://www.gsi.dit.upm.es/images/stories/logos/gsi.png "GSI Logo"
|
227
community-plugins/emotion-anew/emotion-anew.py
Normal file
227
community-plugins/emotion-anew/emotion-anew.py
Normal file
@ -0,0 +1,227 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import nltk
|
||||
import csv
|
||||
import sys
|
||||
import os
|
||||
import unicodedata
|
||||
import string
|
||||
import xml.etree.ElementTree as ET
|
||||
import math
|
||||
|
||||
from sklearn.svm import LinearSVC
|
||||
from sklearn.feature_extraction import DictVectorizer
|
||||
|
||||
from nltk import bigrams
|
||||
from nltk import trigrams
|
||||
from nltk.corpus import stopwords
|
||||
|
||||
from pattern.en import parse as parse_en
|
||||
from pattern.es import parse as parse_es
|
||||
from senpy.plugins import EmotionPlugin, SenpyPlugin
|
||||
from senpy.models import Results, EmotionSet, Entry, Emotion
|
||||
|
||||
|
||||
class ANEW(EmotionPlugin):
|
||||
description = "This plugin consists on an emotion classifier using ANEW lexicon dictionary. It averages the VAD (valence-arousal-dominance) value of each word in the text that is also in the ANEW dictionary. To obtain a categorical value (e.g., happy) use the emotion conversion API (e.g., `emotion-model=emoml:big6`)."
|
||||
author = "@icorcuera"
|
||||
version = "0.5.2"
|
||||
name = "emotion-anew"
|
||||
|
||||
extra_params = {
|
||||
"language": {
|
||||
"description": "language of the input",
|
||||
"aliases": ["language", "l"],
|
||||
"required": True,
|
||||
"options": ["es","en"],
|
||||
"default": "en"
|
||||
}
|
||||
}
|
||||
|
||||
anew_path_es = "Dictionary/Redondo(2007).csv"
|
||||
anew_path_en = "Dictionary/ANEW2010All.txt"
|
||||
onyx__usesEmotionModel = "emoml:pad-dimensions"
|
||||
nltk_resources = ['stopwords']
|
||||
|
||||
def activate(self, *args, **kwargs):
|
||||
self._stopwords = stopwords.words('english')
|
||||
dictionary={}
|
||||
dictionary['es'] = {}
|
||||
with self.open(self.anew_path_es,'r') as tabfile:
|
||||
reader = csv.reader(tabfile, delimiter='\t')
|
||||
for row in reader:
|
||||
dictionary['es'][row[2]]={}
|
||||
dictionary['es'][row[2]]['V']=row[3]
|
||||
dictionary['es'][row[2]]['A']=row[5]
|
||||
dictionary['es'][row[2]]['D']=row[7]
|
||||
dictionary['en'] = {}
|
||||
with self.open(self.anew_path_en,'r') as tabfile:
|
||||
reader = csv.reader(tabfile, delimiter='\t')
|
||||
for row in reader:
|
||||
dictionary['en'][row[0]]={}
|
||||
dictionary['en'][row[0]]['V']=row[2]
|
||||
dictionary['en'][row[0]]['A']=row[4]
|
||||
dictionary['en'][row[0]]['D']=row[6]
|
||||
self._dictionary = dictionary
|
||||
|
||||
def _my_preprocessor(self, text):
|
||||
|
||||
regHttp = re.compile('(http://)[a-zA-Z0-9]*.[a-zA-Z0-9/]*(.[a-zA-Z0-9]*)?')
|
||||
regHttps = re.compile('(https://)[a-zA-Z0-9]*.[a-zA-Z0-9/]*(.[a-zA-Z0-9]*)?')
|
||||
regAt = re.compile('@([a-zA-Z0-9]*[*_/&%#@$]*)*[a-zA-Z0-9]*')
|
||||
text = re.sub(regHttp, '', text)
|
||||
text = re.sub(regAt, '', text)
|
||||
text = re.sub('RT : ', '', text)
|
||||
text = re.sub(regHttps, '', text)
|
||||
text = re.sub('[0-9]', '', text)
|
||||
text = self._delete_punctuation(text)
|
||||
return text
|
||||
|
||||
def _delete_punctuation(self, text):
|
||||
|
||||
exclude = set(string.punctuation)
|
||||
s = ''.join(ch for ch in text if ch not in exclude)
|
||||
return s
|
||||
|
||||
def _extract_ngrams(self, text, lang):
|
||||
unigrams_lemmas = []
|
||||
unigrams_words = []
|
||||
pos_tagged = []
|
||||
if lang == 'es':
|
||||
sentences = list(parse_es(text, lemmata=True).split())
|
||||
else:
|
||||
sentences = list(parse_en(text, lemmata=True).split())
|
||||
|
||||
for sentence in sentences:
|
||||
for token in sentence:
|
||||
if token[0].lower() not in self._stopwords:
|
||||
unigrams_words.append(token[0].lower())
|
||||
unigrams_lemmas.append(token[4])
|
||||
pos_tagged.append(token[1])
|
||||
|
||||
return unigrams_lemmas,unigrams_words,pos_tagged
|
||||
|
||||
def _find_ngrams(self, input_list, n):
|
||||
return zip(*[input_list[i:] for i in range(n)])
|
||||
|
||||
def _extract_features(self, tweet,dictionary,lang):
|
||||
feature_set={}
|
||||
ngrams_lemmas,ngrams_words,pos_tagged = self._extract_ngrams(tweet,lang)
|
||||
pos_tags={'NN':'NN', 'NNS':'NN', 'JJ':'JJ', 'JJR':'JJ', 'JJS':'JJ', 'RB':'RB', 'RBR':'RB',
|
||||
'RBS':'RB', 'VB':'VB', 'VBD':'VB', 'VGB':'VB', 'VBN':'VB', 'VBP':'VB', 'VBZ':'VB'}
|
||||
totalVAD=[0,0,0]
|
||||
matches=0
|
||||
for word in range(len(ngrams_lemmas)):
|
||||
VAD=[]
|
||||
if ngrams_lemmas[word] in dictionary:
|
||||
matches+=1
|
||||
totalVAD = [totalVAD[0]+float(dictionary[ngrams_lemmas[word]]['V']),
|
||||
totalVAD[1]+float(dictionary[ngrams_lemmas[word]]['A']),
|
||||
totalVAD[2]+float(dictionary[ngrams_lemmas[word]]['D'])]
|
||||
elif ngrams_words[word] in dictionary:
|
||||
matches+=1
|
||||
totalVAD = [totalVAD[0]+float(dictionary[ngrams_words[word]]['V']),
|
||||
totalVAD[1]+float(dictionary[ngrams_words[word]]['A']),
|
||||
totalVAD[2]+float(dictionary[ngrams_words[word]]['D'])]
|
||||
if matches==0:
|
||||
emotion='neutral'
|
||||
else:
|
||||
totalVAD=[totalVAD[0]/matches,totalVAD[1]/matches,totalVAD[2]/matches]
|
||||
feature_set['V'] = totalVAD[0]
|
||||
feature_set['A'] = totalVAD[1]
|
||||
feature_set['D'] = totalVAD[2]
|
||||
return feature_set
|
||||
|
||||
def analyse_entry(self, entry, activity):
|
||||
params = activity.params
|
||||
|
||||
text_input = entry.text
|
||||
|
||||
text = self._my_preprocessor(text_input)
|
||||
dictionary = self._dictionary[params['language']]
|
||||
|
||||
feature_set=self._extract_features(text, dictionary, params['language'])
|
||||
|
||||
emotions = EmotionSet()
|
||||
emotions.id = "Emotions0"
|
||||
|
||||
emotion1 = Emotion(id="Emotion0")
|
||||
emotion1["emoml:pad-dimensions_pleasure"] = feature_set['V']
|
||||
emotion1["emoml:pad-dimensions_arousal"] = feature_set['A']
|
||||
emotion1["emoml:pad-dimensions_dominance"] = feature_set['D']
|
||||
|
||||
emotion1.prov(activity)
|
||||
emotions.prov(activity)
|
||||
|
||||
emotions.onyx__hasEmotion.append(emotion1)
|
||||
entry.emotions = [emotions, ]
|
||||
|
||||
yield entry
|
||||
|
||||
ontology = "http://gsi.dit.upm.es/ontologies/wnaffect/ns#"
|
||||
test_cases = [
|
||||
{
|
||||
'name': 'anger with VAD=(2.12, 6.95, 5.05)',
|
||||
'input': 'I hate you',
|
||||
'expected': {
|
||||
'onyx:hasEmotionSet': [{
|
||||
'onyx:hasEmotion': [{
|
||||
"http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#arousal": 6.95,
|
||||
"http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#dominance": 5.05,
|
||||
"http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#valence": 2.12,
|
||||
}]
|
||||
}]
|
||||
}
|
||||
}, {
|
||||
'input': 'i am sad',
|
||||
'expected': {
|
||||
'onyx:hasEmotionSet': [{
|
||||
'onyx:hasEmotion': [{
|
||||
"http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#arousal": 4.13,
|
||||
"http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#dominance": 3.45,
|
||||
"http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#valence": 1.61,
|
||||
|
||||
}]
|
||||
}]
|
||||
}
|
||||
}, {
|
||||
'name': 'joy',
|
||||
'input': 'i am happy with my marks',
|
||||
'expected': {
|
||||
'onyx:hasEmotionSet': [{
|
||||
'onyx:hasEmotion': [{
|
||||
"http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#arousal": 6.49,
|
||||
"http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#dominance": 6.63,
|
||||
"http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#valence": 8.21,
|
||||
}]
|
||||
}]
|
||||
}
|
||||
}, {
|
||||
'name': 'negative-feat',
|
||||
'input': 'This movie is scary',
|
||||
'expected': {
|
||||
'onyx:hasEmotionSet': [{
|
||||
'onyx:hasEmotion': [{
|
||||
"http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#arousal": 5.8100000000000005,
|
||||
"http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#dominance": 4.33,
|
||||
"http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#valence": 5.050000000000001,
|
||||
|
||||
}]
|
||||
}]
|
||||
}
|
||||
}, {
|
||||
'name': 'negative-fear',
|
||||
'input': 'this cake is disgusting' ,
|
||||
'expected': {
|
||||
'onyx:hasEmotionSet': [{
|
||||
'onyx:hasEmotion': [{
|
||||
"http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#arousal": 5.09,
|
||||
"http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#dominance": 4.4,
|
||||
"http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#valence": 5.109999999999999,
|
||||
|
||||
}]
|
||||
}]
|
||||
}
|
||||
}
|
||||
]
|
11
community-plugins/emotion-anew/emotion-anew.senpy
Normal file
11
community-plugins/emotion-anew/emotion-anew.senpy
Normal file
@ -0,0 +1,11 @@
|
||||
---
|
||||
module: emotion-anew
|
||||
requirements:
|
||||
- numpy
|
||||
- pandas
|
||||
- nltk
|
||||
- scipy
|
||||
- scikit-learn
|
||||
- textblob
|
||||
- pattern
|
||||
- lxml
|
181
community-plugins/emotion-depechemood/depechemood_plugin.py
Normal file
181
community-plugins/emotion-depechemood/depechemood_plugin.py
Normal file
@ -0,0 +1,181 @@
|
||||
#!/usr/local/bin/python
|
||||
# coding: utf-8
|
||||
|
||||
from future import standard_library
|
||||
standard_library.install_aliases()
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import string
|
||||
import numpy as np
|
||||
from six.moves import urllib
|
||||
from nltk.corpus import stopwords
|
||||
|
||||
from senpy import EmotionBox, models
|
||||
|
||||
|
||||
def ignore(dchars):
|
||||
deletechars = "".join(dchars)
|
||||
tbl = str.maketrans("", "", deletechars)
|
||||
ignore = lambda s: s.translate(tbl)
|
||||
return ignore
|
||||
|
||||
|
||||
class DepecheMood(EmotionBox):
|
||||
'''
|
||||
Plugin that uses the DepecheMood emotion lexicon.
|
||||
|
||||
DepecheMood is an emotion lexicon automatically generated from news articles where users expressed their associated emotions. It contains two languages (English and Italian), as well as three types of word representations (token, lemma and lemma#PoS). For English, the lexicon contains 165k tokens, while the Italian version contains 116k. Unsupervised techniques can be applied to generate simple but effective baselines. To learn more, please visit https://github.com/marcoguerini/DepecheMood and http://www.depechemood.eu/
|
||||
'''
|
||||
|
||||
author = 'Oscar Araque'
|
||||
name = 'emotion-depechemood'
|
||||
version = '0.1'
|
||||
requirements = ['pandas']
|
||||
nltk_resources = ["stopwords"]
|
||||
|
||||
onyx__usesEmotionModel = 'wna:WNAModel'
|
||||
|
||||
EMOTIONS = ['wna:negative-fear',
|
||||
'wna:amusement',
|
||||
'wna:anger',
|
||||
'wna:annoyance',
|
||||
'wna:indifference',
|
||||
'wna:joy',
|
||||
'wna:awe',
|
||||
'wna:sadness']
|
||||
|
||||
DM_EMOTIONS = ['AFRAID', 'AMUSED', 'ANGRY', 'ANNOYED', 'DONT_CARE', 'HAPPY', 'INSPIRED', 'SAD',]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(DepecheMood, self).__init__(*args, **kwargs)
|
||||
self.LEXICON_URL = "https://github.com/marcoguerini/DepecheMood/raw/master/DepecheMood%2B%2B/DepecheMood_english_token_full.tsv"
|
||||
self._denoise = ignore(set(string.punctuation)|set('«»'))
|
||||
self._stop_words = []
|
||||
self._lex_vocab = None
|
||||
self._lex = None
|
||||
|
||||
def activate(self):
|
||||
self._lex = self.download_lex()
|
||||
self._lex_vocab = set(list(self._lex.keys()))
|
||||
self._stop_words = stopwords.words('english') + ['']
|
||||
|
||||
def clean_str(self, string):
|
||||
string = re.sub(r"[^A-Za-z0-9().,!?\'\`]", " ", string)
|
||||
string = re.sub(r"[0-9]+", " num ", string)
|
||||
string = re.sub(r"\'s", " \'s", string)
|
||||
string = re.sub(r"\'ve", " \'ve", string)
|
||||
string = re.sub(r"n\'t", " n\'t", string)
|
||||
string = re.sub(r"\'re", " \'re", string)
|
||||
string = re.sub(r"\'d", " \'d", string)
|
||||
string = re.sub(r"\'ll", " \'ll", string)
|
||||
string = re.sub(r"\.", " . ", string)
|
||||
string = re.sub(r",", " , ", string)
|
||||
string = re.sub(r"!", " ! ", string)
|
||||
string = re.sub(r"\(", " ( ", string)
|
||||
string = re.sub(r"\)", " ) ", string)
|
||||
string = re.sub(r"\?", " ? ", string)
|
||||
string = re.sub(r"\s{2,}", " ", string)
|
||||
return string.strip().lower()
|
||||
|
||||
def preprocess(self, text):
|
||||
if text is None:
|
||||
return None
|
||||
tokens = self._denoise(self.clean_str(text)).split(' ')
|
||||
tokens = [tok for tok in tokens if tok not in self._stop_words]
|
||||
return tokens
|
||||
|
||||
def estimate_emotion(self, tokens, emotion):
|
||||
s = []
|
||||
for tok in tokens:
|
||||
s.append(self._lex[tok][emotion])
|
||||
dividend = np.sum(s) if np.sum(s) > 0 else 0
|
||||
divisor = len(s) if len(s) > 0 else 1
|
||||
S = np.sum(s) / divisor
|
||||
return S
|
||||
|
||||
def estimate_all_emotions(self, tokens):
|
||||
S = []
|
||||
intersection = set(tokens) & self._lex_vocab
|
||||
for emotion in self.DM_EMOTIONS:
|
||||
s = self.estimate_emotion(intersection, emotion)
|
||||
S.append(s)
|
||||
return S
|
||||
|
||||
def download_lex(self, file_path='DepecheMood_english_token_full.tsv', freq_threshold=10):
|
||||
|
||||
import pandas as pd
|
||||
|
||||
try:
|
||||
file_path = self.find_file(file_path)
|
||||
except IOError:
|
||||
file_path = self.path(file_path)
|
||||
filename, _ = urllib.request.urlretrieve(self.LEXICON_URL, file_path)
|
||||
|
||||
lexicon = pd.read_csv(file_path, sep='\t', index_col=0)
|
||||
lexicon = lexicon[lexicon['freq'] >= freq_threshold]
|
||||
lexicon.drop('freq', axis=1, inplace=True)
|
||||
lexicon = lexicon.T.to_dict()
|
||||
return lexicon
|
||||
|
||||
def predict_one(self, features, **kwargs):
|
||||
tokens = self.preprocess(features[0])
|
||||
estimation = self.estimate_all_emotions(tokens)
|
||||
return estimation
|
||||
|
||||
test_cases = [
|
||||
{
|
||||
'entry': {
|
||||
'nif:isString': 'My cat is very happy',
|
||||
},
|
||||
'expected': {
|
||||
'onyx:hasEmotionSet': [
|
||||
{
|
||||
'onyx:hasEmotion': [
|
||||
{
|
||||
'onyx:hasEmotionCategory': 'wna:negative-fear',
|
||||
'onyx:hasEmotionIntensity': 0.05278117640010922
|
||||
},
|
||||
{
|
||||
'onyx:hasEmotionCategory': 'wna:amusement',
|
||||
'onyx:hasEmotionIntensity': 0.2114806151413433,
|
||||
},
|
||||
{
|
||||
'onyx:hasEmotionCategory': 'wna:anger',
|
||||
'onyx:hasEmotionIntensity': 0.05726119426520887
|
||||
},
|
||||
{
|
||||
'onyx:hasEmotionCategory': 'wna:annoyance',
|
||||
'onyx:hasEmotionIntensity': 0.12295990731053638,
|
||||
},
|
||||
{
|
||||
'onyx:hasEmotionCategory': 'wna:indifference',
|
||||
'onyx:hasEmotionIntensity': 0.1860159893608025,
|
||||
},
|
||||
{
|
||||
'onyx:hasEmotionCategory': 'wna:joy',
|
||||
'onyx:hasEmotionIntensity': 0.12904050973724163,
|
||||
},
|
||||
{
|
||||
'onyx:hasEmotionCategory': 'wna:awe',
|
||||
'onyx:hasEmotionIntensity': 0.17973650399862967,
|
||||
},
|
||||
{
|
||||
'onyx:hasEmotionCategory': 'wna:sadness',
|
||||
'onyx:hasEmotionIntensity': 0.060724103786128455,
|
||||
},
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from senpy.utils import easy, easy_load, easy_test
|
||||
# sp, app = easy_load()
|
||||
# for plug in sp.analysis_plugins:
|
||||
# plug.test()
|
||||
easy_test(debug=False)
|
2
community-plugins/emotion-wnaffect/.gitignore
vendored
Normal file
2
community-plugins/emotion-wnaffect/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
__pycache__
|
||||
*.pyc
|
67
community-plugins/emotion-wnaffect/.gitlab-ci.yml
Normal file
67
community-plugins/emotion-wnaffect/.gitlab-ci.yml
Normal file
@ -0,0 +1,67 @@
|
||||
# Uncomment if you want to use docker-in-docker
|
||||
# image: gsiupm/dockermake:latest
|
||||
# services:
|
||||
# - docker:dind
|
||||
# When using dind, it's wise to use the overlayfs driver for
|
||||
# improved performance.
|
||||
|
||||
stages:
|
||||
- test
|
||||
- push
|
||||
- deploy
|
||||
- clean
|
||||
|
||||
before_script:
|
||||
- make -e login
|
||||
|
||||
.test: &test_definition
|
||||
stage: test
|
||||
script:
|
||||
- make -e test-$PYTHON_VERSION
|
||||
|
||||
test-3.5:
|
||||
<<: *test_definition
|
||||
variables:
|
||||
PYTHON_VERSION: "3.5"
|
||||
|
||||
.image: &image_definition
|
||||
stage: push
|
||||
script:
|
||||
- make -e push-$PYTHON_VERSION
|
||||
only:
|
||||
- tags
|
||||
- triggers
|
||||
|
||||
push-3.5:
|
||||
<<: *image_definition
|
||||
variables:
|
||||
PYTHON_VERSION: "3.5"
|
||||
|
||||
push-latest:
|
||||
<<: *image_definition
|
||||
variables:
|
||||
PYTHON_VERSION: latest
|
||||
only:
|
||||
- tags
|
||||
- triggers
|
||||
|
||||
deploy:
|
||||
stage: deploy
|
||||
environment: production
|
||||
script:
|
||||
- make -e deploy
|
||||
only:
|
||||
- tags
|
||||
- triggers
|
||||
|
||||
clean :
|
||||
stage: clean
|
||||
script:
|
||||
- make -e clean
|
||||
when: manual
|
||||
|
||||
cleanup_py:
|
||||
stage: clean
|
||||
when: always # this is important; run even if preceding stages failed.
|
||||
script:
|
||||
- docker logout
|
27
community-plugins/emotion-wnaffect/.makefiles/README.md
Normal file
27
community-plugins/emotion-wnaffect/.makefiles/README.md
Normal file
@ -0,0 +1,27 @@
|
||||
These makefiles are recipes for several common tasks in different types of projects.
|
||||
To add them to your project, simply do:
|
||||
|
||||
```
|
||||
git remote add makefiles ssh://git@lab.cluster.gsi.dit.upm.es:2200/docs/templates/makefiles.git
|
||||
git subtree add --prefix=.makefiles/ makefiles master
|
||||
touch Makefile
|
||||
echo "include .makefiles/base.mk" >> Makefile
|
||||
```
|
||||
|
||||
Now you can take advantage of the recipes.
|
||||
For instance, to add useful targets for a python project, just add this to your Makefile:
|
||||
|
||||
```
|
||||
include .makefiles/python.mk
|
||||
```
|
||||
|
||||
You may need to set special variables like the name of your project or the python versions you're targetting.
|
||||
Take a look at each specific `.mk` file for more information, and the `Makefile` in the [senpy](https://lab.cluster.gsi.dit.upm.es/senpy/senpy) project for a real use case.
|
||||
|
||||
If you update the makefiles from your repository, make sure to push the changes for review in upstream (this repository):
|
||||
|
||||
```
|
||||
make makefiles-push
|
||||
```
|
||||
|
||||
It will automatically commit all unstaged changes in the .makefiles folder.
|
36
community-plugins/emotion-wnaffect/.makefiles/base.mk
Normal file
36
community-plugins/emotion-wnaffect/.makefiles/base.mk
Normal file
@ -0,0 +1,36 @@
|
||||
export
|
||||
NAME ?= $(shell basename $(CURDIR))
|
||||
VERSION ?= $(shell git describe --tags --dirty 2>/dev/null)
|
||||
|
||||
ifeq ($(VERSION),)
|
||||
VERSION:=unknown
|
||||
endif
|
||||
|
||||
# Get the location of this makefile.
|
||||
MK_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
|
||||
|
||||
-include .env
|
||||
-include ../.env
|
||||
|
||||
help: ## Show this help.
|
||||
@fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/\(.*:\)[^#]*##\s*\(.*\)/\1\t\2/' | column -t -s " "
|
||||
|
||||
config: ## Load config from the environment. You should run it once in every session before other tasks. Run: eval $(make config)
|
||||
@awk '{ print "export " $$0}' ../.env
|
||||
@awk '{ print "export " $$0}' .env
|
||||
@echo "# Please, run: "
|
||||
@echo "# eval \$$(make config)"
|
||||
# If you need to run a command on the key/value pairs, use this:
|
||||
# @awk '{ split($$0, a, "="); "echo " a[2] " | base64 -w 0" |& getline b64; print "export " a[1] "=" a[2]; print "export " a[1] "_BASE64=" b64}' .env
|
||||
|
||||
ci: ## Run a task using gitlab-runner. Only use to debug problems in the CI pipeline
|
||||
gitlab-runner exec shell --builds-dir '.builds' --env CI_PROJECT_NAME=$(NAME) ${action}
|
||||
|
||||
include $(MK_DIR)/makefiles.mk
|
||||
include $(MK_DIR)/docker.mk
|
||||
include $(MK_DIR)/git.mk
|
||||
|
||||
info:: ## List all variables
|
||||
env
|
||||
|
||||
.PHONY:: config help ci
|
29
community-plugins/emotion-wnaffect/.makefiles/docker.mk
Normal file
29
community-plugins/emotion-wnaffect/.makefiles/docker.mk
Normal file
@ -0,0 +1,29 @@
|
||||
IMAGENAME?=$(NAME)
|
||||
IMAGEWTAG?=$(IMAGENAME):$(VERSION)
|
||||
|
||||
docker-login: ## Log in to the registry. It will only be used in the server, or when running a CI task locally (if CI_BUILD_TOKEN is set).
|
||||
ifeq ($(CI_BUILD_TOKEN),)
|
||||
@echo "Not logging in to the docker registry" "$(CI_REGISTRY)"
|
||||
else
|
||||
@docker login -u gitlab-ci-token -p $(CI_BUILD_TOKEN) $(CI_REGISTRY)
|
||||
endif
|
||||
ifeq ($(HUB_USER),)
|
||||
@echo "Not logging in to global the docker registry"
|
||||
else
|
||||
@docker login -u $(HUB_USER) -p $(HUB_PASSWORD)
|
||||
endif
|
||||
|
||||
docker-clean: ## Remove docker credentials
|
||||
ifeq ($(HUB_USER),)
|
||||
else
|
||||
@docker logout
|
||||
endif
|
||||
|
||||
login:: docker-login
|
||||
|
||||
clean:: docker-clean
|
||||
|
||||
docker-info:
|
||||
@echo IMAGEWTAG=${IMAGEWTAG}
|
||||
|
||||
.PHONY:: docker-login docker-clean login clean
|
28
community-plugins/emotion-wnaffect/.makefiles/git.mk
Normal file
28
community-plugins/emotion-wnaffect/.makefiles/git.mk
Normal file
@ -0,0 +1,28 @@
|
||||
commit:
|
||||
git commit -a
|
||||
|
||||
tag:
|
||||
git tag ${VERSION}
|
||||
|
||||
git-push::
|
||||
git push --tags -u origin HEAD
|
||||
|
||||
git-pull:
|
||||
git pull --all
|
||||
|
||||
push-github: ## Push the code to github. You need to set up GITHUB_DEPLOY_KEY
|
||||
ifeq ($(GITHUB_DEPLOY_KEY),)
|
||||
else
|
||||
$(eval KEY_FILE := "$(shell mktemp)")
|
||||
@echo "$(GITHUB_DEPLOY_KEY)" > $(KEY_FILE)
|
||||
@git remote rm github-deploy || true
|
||||
git remote add github-deploy $(GITHUB_REPO)
|
||||
-@GIT_SSH_COMMAND="ssh -i $(KEY_FILE)" git fetch github-deploy $(CI_COMMIT_REF_NAME)
|
||||
@GIT_SSH_COMMAND="ssh -i $(KEY_FILE)" git push github-deploy HEAD:$(CI_COMMIT_REF_NAME)
|
||||
rm $(KEY_FILE)
|
||||
endif
|
||||
|
||||
push:: git-push
|
||||
pull:: git-pull
|
||||
|
||||
.PHONY:: commit tag push git-push git-pull push-github
|
51
community-plugins/emotion-wnaffect/.makefiles/k8s.mk
Normal file
51
community-plugins/emotion-wnaffect/.makefiles/k8s.mk
Normal file
@ -0,0 +1,51 @@
|
||||
# Deployment with Kubernetes
|
||||
|
||||
# KUBE_CA_PEM_FILE is the path of a certificate file. It automatically set by GitLab
|
||||
# if you enable Kubernetes integration in a project.
|
||||
#
|
||||
# As of this writing, Kubernetes integration can not be set on a group level, so it has to
|
||||
# be manually set in every project.
|
||||
# Alternatively, we use a custom KUBE_CA_BUNDLE environment variable, which can be set at
|
||||
# the group level. In this case, the variable contains the whole content of the certificate,
|
||||
# which we dump to a temporary file
|
||||
#
|
||||
# Check if the KUBE_CA_PEM_FILE exists. Otherwise, create it from KUBE_CA_BUNDLE
|
||||
KUBE_CA_TEMP=false
|
||||
ifndef KUBE_CA_PEM_FILE
|
||||
KUBE_CA_PEM_FILE:=$$PWD/.ca.crt
|
||||
CREATED:=$(shell echo -e "$(KUBE_CA_BUNDLE)" > $(KUBE_CA_PEM_FILE))
|
||||
endif
|
||||
KUBE_TOKEN?=""
|
||||
KUBE_NAMESPACE?=$(NAME)
|
||||
KUBECTL=docker run --rm -v $(KUBE_CA_PEM_FILE):/tmp/ca.pem -i lachlanevenson/k8s-kubectl --server="$(KUBE_URL)" --token="$(KUBE_TOKEN)" --certificate-authority="/tmp/ca.pem" -n $(KUBE_NAMESPACE)
|
||||
CI_COMMIT_REF_NAME?=master
|
||||
|
||||
info:: ## Print variables. Useful for debugging.
|
||||
@echo "#KUBERNETES"
|
||||
@echo KUBE_URL=$(KUBE_URL)
|
||||
@echo KUBE_CA_PEM_FILE=$(KUBE_CA_PEM_FILE)
|
||||
@echo KUBE_CA_BUNDLE=$$KUBE_CA_BUNDLE
|
||||
@echo KUBE_TOKEN=$(KUBE_TOKEN)
|
||||
@echo KUBE_NAMESPACE=$(KUBE_NAMESPACE)
|
||||
@echo KUBECTL=$(KUBECTL)
|
||||
|
||||
@echo "#CI"
|
||||
@echo CI_PROJECT_NAME=$(CI_PROJECT_NAME)
|
||||
@echo CI_REGISTRY=$(CI_REGISTRY)
|
||||
@echo CI_REGISTRY_USER=$(CI_REGISTRY_USER)
|
||||
@echo CI_COMMIT_REF_NAME=$(CI_COMMIT_REF_NAME)
|
||||
@echo "CREATED=$(CREATED)"
|
||||
|
||||
#
|
||||
# Deployment and advanced features
|
||||
#
|
||||
|
||||
|
||||
deploy: ## Deploy to kubernetes using the credentials in KUBE_CA_PEM_FILE (or KUBE_CA_BUNDLE ) and TOKEN
|
||||
@ls k8s/*.yaml k8s/*.yml k8s/*.tmpl 2>/dev/null || true
|
||||
@cat k8s/*.yaml k8s/*.yml k8s/*.tmpl 2>/dev/null | envsubst | $(KUBECTL) apply -f -
|
||||
|
||||
deploy-check: ## Get the deployed configuration.
|
||||
@$(KUBECTL) get deploy,pods,svc,ingress
|
||||
|
||||
.PHONY:: info deploy deploy-check
|
17
community-plugins/emotion-wnaffect/.makefiles/makefiles.mk
Normal file
17
community-plugins/emotion-wnaffect/.makefiles/makefiles.mk
Normal file
@ -0,0 +1,17 @@
|
||||
makefiles-remote:
|
||||
@git remote add makefiles ssh://git@lab.cluster.gsi.dit.upm.es:2200/docs/templates/makefiles.git 2>/dev/null || true
|
||||
|
||||
makefiles-commit: makefiles-remote
|
||||
git add -f .makefiles
|
||||
git commit -em "Updated makefiles from ${NAME}"
|
||||
|
||||
makefiles-push:
|
||||
git subtree push --prefix=.makefiles/ makefiles $(NAME)
|
||||
|
||||
makefiles-pull: makefiles-remote
|
||||
git subtree pull --prefix=.makefiles/ makefiles master --squash
|
||||
|
||||
pull:: makefiles-pull
|
||||
push:: makefiles-push
|
||||
|
||||
.PHONY:: makefiles-remote makefiles-commit makefiles-push makefiles-pull pull push
|
@ -0,0 +1,5 @@
|
||||
init: ## Init pre-commit hooks (i.e. enforcing format checking before allowing a commit)
|
||||
pip install --user pre-commit
|
||||
pre-commit install
|
||||
|
||||
.PHONY:: init
|
100
community-plugins/emotion-wnaffect/.makefiles/python.mk
Normal file
100
community-plugins/emotion-wnaffect/.makefiles/python.mk
Normal file
@ -0,0 +1,100 @@
|
||||
PYVERSIONS ?= 3.5
|
||||
PYMAIN ?= $(firstword $(PYVERSIONS))
|
||||
TARNAME ?= $(NAME)-$(VERSION).tar.gz
|
||||
VERSIONFILE ?= $(NAME)/VERSION
|
||||
|
||||
DEVPORT ?= 6000
|
||||
|
||||
|
||||
.FORCE:
|
||||
|
||||
version: .FORCE
|
||||
@echo $(VERSION) > $(VERSIONFILE)
|
||||
@echo $(VERSION)
|
||||
|
||||
yapf: ## Format python code
|
||||
yapf -i -r $(NAME)
|
||||
yapf -i -r tests
|
||||
|
||||
dockerfiles: $(addprefix Dockerfile-,$(PYVERSIONS)) ## Generate dockerfiles for each python version
|
||||
@unlink Dockerfile >/dev/null
|
||||
ln -s Dockerfile-$(PYMAIN) Dockerfile
|
||||
|
||||
Dockerfile-%: Dockerfile.template ## Generate a specific dockerfile (e.g. Dockerfile-2.7)
|
||||
sed "s/{{PYVERSION}}/$*/" Dockerfile.template > Dockerfile-$*
|
||||
|
||||
quick_build: $(addprefix build-, $(PYMAIN))
|
||||
|
||||
build: $(addprefix build-, $(PYVERSIONS)) ## Build all images / python versions
|
||||
|
||||
build-%: version Dockerfile-% ## Build a specific version (e.g. build-2.7)
|
||||
docker build -t '$(IMAGEWTAG)-python$*' --cache-from $(IMAGENAME):python$* -f Dockerfile-$* .;
|
||||
|
||||
dev-%: ## Launch a specific development environment using docker (e.g. dev-2.7)
|
||||
@docker start $(NAME)-dev$* || (\
|
||||
$(MAKE) build-$*; \
|
||||
docker run -d -w /usr/src/app/ -p $(DEVPORT):5000 -v $$PWD:/usr/src/app --entrypoint=/bin/bash -ti --name $(NAME)-dev$* '$(IMAGEWTAG)-python$*'; \
|
||||
)\
|
||||
|
||||
docker exec -ti $(NAME)-dev$* bash
|
||||
|
||||
dev: dev-$(PYMAIN) ## Launch a development environment using docker, using the default python version
|
||||
|
||||
quick_test: test-$(PYMAIN)
|
||||
|
||||
test-%: ## Run setup.py from in an isolated container, built from the base image. (e.g. test-2.7)
|
||||
# This speeds tests up because the image has most (if not all) of the dependencies already.
|
||||
docker rm $(NAME)-test-$* || true
|
||||
docker create -ti --name $(NAME)-test-$* --entrypoint="" -w /usr/src/app/ $(IMAGENAME):python$* python setup.py test
|
||||
docker cp . $(NAME)-test-$*:/usr/src/app
|
||||
docker start -a $(NAME)-test-$*
|
||||
|
||||
test: $(addprefix test-,$(PYVERSIONS)) ## Run the tests with the main python version
|
||||
|
||||
run-%: build-%
|
||||
docker run --rm -p $(DEVPORT):5000 -ti '$(IMAGEWTAG)-python$(PYMAIN)' --default-plugins
|
||||
|
||||
run: run-$(PYMAIN)
|
||||
|
||||
# Pypy - Upload a package
|
||||
|
||||
dist/$(TARNAME): version
|
||||
python setup.py sdist;
|
||||
|
||||
sdist: dist/$(TARNAME) ## Generate the distribution file (wheel)
|
||||
|
||||
pip_test-%: sdist ## Test the distribution file using pip install and a specific python version (e.g. pip_test-2.7)
|
||||
docker run --rm -v $$PWD/dist:/dist/ python:$* pip install /dist/$(TARNAME);
|
||||
|
||||
pip_test: $(addprefix pip_test-,$(PYVERSIONS)) ## Test pip installation with the main python version
|
||||
|
||||
pip_upload: pip_test ## Upload package to pip
|
||||
python setup.py sdist upload ;
|
||||
|
||||
# Pushing to docker
|
||||
|
||||
push-latest: $(addprefix push-latest-,$(PYVERSIONS)) ## Push the "latest" tag to dockerhub
|
||||
docker tag '$(IMAGEWTAG)-python$(PYMAIN)' '$(IMAGEWTAG)'
|
||||
docker tag '$(IMAGEWTAG)-python$(PYMAIN)' '$(IMAGENAME)'
|
||||
docker push '$(IMAGENAME):latest'
|
||||
docker push '$(IMAGEWTAG)'
|
||||
|
||||
push-latest-%: build-% ## Push the latest image for a specific python version
|
||||
docker tag $(IMAGENAME):$(VERSION)-python$* $(IMAGENAME):python$*
|
||||
docker push $(IMAGENAME):$(VERSION)-python$*
|
||||
docker push $(IMAGENAME):python$*
|
||||
|
||||
push-%: build-% ## Push the image of the current version (tagged). e.g. push-2.7
|
||||
docker push $(IMAGENAME):$(VERSION)-python$*
|
||||
|
||||
push:: $(addprefix push-,$(PYVERSIONS)) ## Push an image with the current version for every python version
|
||||
docker tag '$(IMAGEWTAG)-python$(PYMAIN)' '$(IMAGEWTAG)'
|
||||
docker push $(IMAGENAME):$(VERSION)
|
||||
|
||||
clean:: ## Clean older docker images and containers related to this project and dev environments
|
||||
@docker stop $(addprefix $(NAME)-dev,$(PYVERSIONS)) 2>/dev/null || true
|
||||
@docker rm $(addprefix $(NAME)-dev,$(PYVERSIONS)) 2>/dev/null || true
|
||||
@docker ps -a | grep $(IMAGENAME) | awk '{ split($$2, vers, "-"); if(vers[0] != "${VERSION}"){ print $$1;}}' | xargs docker rm -v 2>/dev/null|| true
|
||||
@docker images | grep $(IMAGENAME) | awk '{ split($$2, vers, "-"); if(vers[0] != "${VERSION}"){ print $$1":"$$2;}}' | xargs docker rmi 2>/dev/null|| true
|
||||
|
||||
.PHONY:: yapf dockerfiles Dockerfile-% quick_build build build-% dev-% quick-dev test quick_test push-latest push-latest-% push-% push version .FORCE
|
5
community-plugins/emotion-wnaffect/Dockerfile-2.7
Normal file
5
community-plugins/emotion-wnaffect/Dockerfile-2.7
Normal file
@ -0,0 +1,5 @@
|
||||
FROM gsiupm/senpy:python2.7
|
||||
|
||||
MAINTAINER manuel.garcia-amado.sancho@alumnos.upm.es
|
||||
|
||||
COPY data /data
|
4
community-plugins/emotion-wnaffect/Dockerfile-3.5
Normal file
4
community-plugins/emotion-wnaffect/Dockerfile-3.5
Normal file
@ -0,0 +1,4 @@
|
||||
FROM gsiupm/senpy:python3.5
|
||||
|
||||
MAINTAINER manuel.garcia-amado.sancho@alumnos.upm.es
|
||||
COPY data /data
|
3
community-plugins/emotion-wnaffect/Dockerfile.template
Normal file
3
community-plugins/emotion-wnaffect/Dockerfile.template
Normal file
@ -0,0 +1,3 @@
|
||||
FROM gsiupm/senpy:python{{PYVERSION}}
|
||||
|
||||
MAINTAINER manuel.garcia-amado.sancho@alumnos.upm.es
|
9
community-plugins/emotion-wnaffect/Makefile
Normal file
9
community-plugins/emotion-wnaffect/Makefile
Normal file
@ -0,0 +1,9 @@
|
||||
NAME:=wnaffect
|
||||
VERSIONFILE:=VERSION
|
||||
IMAGENAME:=registry.cluster.gsi.dit.upm.es/senpy/emotion-wnaffect
|
||||
PYVERSIONS:=2.7 3.5
|
||||
DEVPORT:=5000
|
||||
|
||||
include .makefiles/base.mk
|
||||
include .makefiles/k8s.mk
|
||||
include .makefiles/python.mk
|
62
community-plugins/emotion-wnaffect/README.md
Normal file
62
community-plugins/emotion-wnaffect/README.md
Normal file
@ -0,0 +1,62 @@
|
||||
# WordNet-Affect plugin
|
||||
|
||||
This plugin uses WordNet-Affect (http://wndomains.fbk.eu/wnaffect.html) to calculate the percentage of each emotion. The plugin classifies among five diferent emotions: anger, fear, disgust, joy and sadness. It is has been used a emotion mapping enlarge the emotions:
|
||||
|
||||
- anger : general-dislike
|
||||
- fear : negative-fear
|
||||
- disgust : shame
|
||||
- joy : gratitude, affective, enthusiasm, love, joy, liking
|
||||
- sadness : ingrattitude, daze, humlity, compassion, despair, anxiety, sadness
|
||||
|
||||
## Installation
|
||||
|
||||
* Download
|
||||
```
|
||||
git clone https://lab.cluster.gsi.dit.upm.es/senpy/emotion-wnaffect.git
|
||||
```
|
||||
* Get data
|
||||
```
|
||||
cd emotion-wnaffect
|
||||
git submodule update --init --recursive
|
||||
```
|
||||
* Run
|
||||
```
|
||||
docker run -p 5000:5000 -v $PWD:/plugins gsiupm/senpy -f /plugins
|
||||
```
|
||||
|
||||
## Data format
|
||||
|
||||
`data/a-hierarchy.xml` is a xml file
|
||||
`data/a-synsets.xml` is a xml file
|
||||
|
||||
## Usage
|
||||
|
||||
The parameters accepted are:
|
||||
|
||||
- Language: English (en).
|
||||
- Input: Text to analyse.
|
||||
|
||||
Example request:
|
||||
```
|
||||
http://senpy.cluster.gsi.dit.upm.es/api/?algo=emotion-wnaffect&language=en&input=I%20love%20Madrid
|
||||
```
|
||||
|
||||
Example respond: This plugin follows the standard for the senpy plugin response. For more information, please visit [senpy documentation](http://senpy.readthedocs.io). Specifically, NIF API section.
|
||||
|
||||
|
||||
The response of this plugin uses [Onyx ontology](https://www.gsi.dit.upm.es/ontologies/onyx/) developed at GSI UPM for semantic web.
|
||||
|
||||
This plugin uses WNAffect labels for emotion analysis.
|
||||
|
||||
The emotion-wnaffect.senpy file can be copied and modified to use different versions of wnaffect with the same python code.
|
||||
|
||||
|
||||
## Known issues
|
||||
|
||||
- This plugin run on **Python2.7** and **Python3.5**
|
||||
- Wnaffect and corpora files are not included in the repository, but can be easily added either to the docker image (using a volume) or in a new docker image.
|
||||
- You can download Wordnet 1.6 here: <http://wordnetcode.princeton.edu/1.6/wn16.unix.tar.gz> and extract the dict folder.
|
||||
- The hierarchy and synsets files can be found here: <https://github.com/larsmans/wordnet-domains-sentiwords/tree/master/wn-domains/wn-affect-1.1>
|
||||
|
||||
![alt GSI Logo][logoGSI]
|
||||
[logoGSI]: http://www.gsi.dit.upm.es/images/stories/logos/gsi.png "GSI Logo"
|
278
community-plugins/emotion-wnaffect/emotion-wnaffect.py
Normal file
278
community-plugins/emotion-wnaffect/emotion-wnaffect.py
Normal file
@ -0,0 +1,278 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import division
|
||||
import re
|
||||
import nltk
|
||||
import os
|
||||
import string
|
||||
import xml.etree.ElementTree as ET
|
||||
from nltk.corpus import stopwords
|
||||
from nltk.corpus import WordNetCorpusReader
|
||||
from nltk.stem import wordnet
|
||||
from emotion import Emotion as Emo
|
||||
from senpy.plugins import EmotionPlugin, AnalysisPlugin, ShelfMixin
|
||||
from senpy.models import Results, EmotionSet, Entry, Emotion
|
||||
|
||||
|
||||
class WNAffect(EmotionPlugin, ShelfMixin):
|
||||
'''
|
||||
Emotion classifier using WordNet-Affect to calculate the percentage
|
||||
of each emotion. This plugin classifies among 6 emotions: anger,fear,disgust,joy,sadness
|
||||
or neutral. The only available language is English (en)
|
||||
'''
|
||||
name = 'emotion-wnaffect'
|
||||
author = ["@icorcuera", "@balkian"]
|
||||
version = '0.2'
|
||||
extra_params = {
|
||||
'language': {
|
||||
"@id": 'lang_wnaffect',
|
||||
'description': 'language of the input',
|
||||
'aliases': ['language', 'l'],
|
||||
'required': True,
|
||||
'options': ['en',]
|
||||
}
|
||||
}
|
||||
synsets_path = "a-synsets.xml"
|
||||
hierarchy_path = "a-hierarchy.xml"
|
||||
wn16_path = "wordnet1.6/dict"
|
||||
onyx__usesEmotionModel = "emoml:big6"
|
||||
nltk_resources = ['stopwords', 'averaged_perceptron_tagger', 'wordnet']
|
||||
|
||||
def _load_synsets(self, synsets_path):
|
||||
"""Returns a dictionary POS tag -> synset offset -> emotion (str -> int -> str)."""
|
||||
tree = ET.parse(synsets_path)
|
||||
root = tree.getroot()
|
||||
pos_map = {"noun": "NN", "adj": "JJ", "verb": "VB", "adv": "RB"}
|
||||
|
||||
synsets = {}
|
||||
for pos in ["noun", "adj", "verb", "adv"]:
|
||||
tag = pos_map[pos]
|
||||
synsets[tag] = {}
|
||||
for elem in root.findall(
|
||||
".//{0}-syn-list//{0}-syn".format(pos, pos)):
|
||||
offset = int(elem.get("id")[2:])
|
||||
if not offset: continue
|
||||
if elem.get("categ"):
|
||||
synsets[tag][offset] = Emo.emotions[elem.get(
|
||||
"categ")] if elem.get(
|
||||
"categ") in Emo.emotions else None
|
||||
elif elem.get("noun-id"):
|
||||
synsets[tag][offset] = synsets[pos_map["noun"]][int(
|
||||
elem.get("noun-id")[2:])]
|
||||
return synsets
|
||||
|
||||
def _load_emotions(self, hierarchy_path):
|
||||
"""Loads the hierarchy of emotions from the WordNet-Affect xml."""
|
||||
|
||||
tree = ET.parse(hierarchy_path)
|
||||
root = tree.getroot()
|
||||
for elem in root.findall("categ"):
|
||||
name = elem.get("name")
|
||||
if name == "root":
|
||||
Emo.emotions["root"] = Emo("root")
|
||||
else:
|
||||
Emo.emotions[name] = Emo(name, elem.get("isa"))
|
||||
|
||||
def activate(self, *args, **kwargs):
|
||||
|
||||
self._stopwords = stopwords.words('english')
|
||||
self._wnlemma = wordnet.WordNetLemmatizer()
|
||||
self._syntactics = {'N': 'n', 'V': 'v', 'J': 'a', 'S': 's', 'R': 'r'}
|
||||
local_path = os.environ.get("SENPY_DATA")
|
||||
self._categories = {
|
||||
'anger': [
|
||||
'general-dislike',
|
||||
],
|
||||
'fear': [
|
||||
'negative-fear',
|
||||
],
|
||||
'disgust': [
|
||||
'shame',
|
||||
],
|
||||
'joy':
|
||||
['gratitude', 'affective', 'enthusiasm', 'love', 'joy', 'liking'],
|
||||
'sadness': [
|
||||
'ingrattitude', 'daze', 'humility', 'compassion', 'despair',
|
||||
'anxiety', 'sadness'
|
||||
]
|
||||
}
|
||||
|
||||
self._wnaffect_mappings = {
|
||||
'anger': 'anger',
|
||||
'fear': 'negative-fear',
|
||||
'disgust': 'disgust',
|
||||
'joy': 'joy',
|
||||
'sadness': 'sadness'
|
||||
}
|
||||
|
||||
self._load_emotions(self.find_file(self.hierarchy_path))
|
||||
|
||||
if 'total_synsets' not in self.sh:
|
||||
total_synsets = self._load_synsets(self.find_file(self.synsets_path))
|
||||
self.sh['total_synsets'] = total_synsets
|
||||
|
||||
self._total_synsets = self.sh['total_synsets']
|
||||
|
||||
self._wn16_path = self.wn16_path
|
||||
self._wn16 = WordNetCorpusReader(self.find_file(self._wn16_path), nltk.data.find(self.find_file(self._wn16_path)))
|
||||
|
||||
|
||||
def deactivate(self, *args, **kwargs):
|
||||
self.save()
|
||||
|
||||
def _my_preprocessor(self, text):
|
||||
|
||||
regHttp = re.compile(
|
||||
'(http://)[a-zA-Z0-9]*.[a-zA-Z0-9/]*(.[a-zA-Z0-9]*)?')
|
||||
regHttps = re.compile(
|
||||
'(https://)[a-zA-Z0-9]*.[a-zA-Z0-9/]*(.[a-zA-Z0-9]*)?')
|
||||
regAt = re.compile('@([a-zA-Z0-9]*[*_/&%#@$]*)*[a-zA-Z0-9]*')
|
||||
text = re.sub(regHttp, '', text)
|
||||
text = re.sub(regAt, '', text)
|
||||
text = re.sub('RT : ', '', text)
|
||||
text = re.sub(regHttps, '', text)
|
||||
text = re.sub('[0-9]', '', text)
|
||||
text = self._delete_punctuation(text)
|
||||
return text
|
||||
|
||||
def _delete_punctuation(self, text):
|
||||
|
||||
exclude = set(string.punctuation)
|
||||
s = ''.join(ch for ch in text if ch not in exclude)
|
||||
return s
|
||||
|
||||
def _extract_ngrams(self, text):
|
||||
|
||||
unigrams_lemmas = []
|
||||
pos_tagged = []
|
||||
unigrams_words = []
|
||||
tokens = text.split()
|
||||
for token in nltk.pos_tag(tokens):
|
||||
unigrams_words.append(token[0])
|
||||
pos_tagged.append(token[1])
|
||||
if token[1][0] in self._syntactics.keys():
|
||||
unigrams_lemmas.append(
|
||||
self._wnlemma.lemmatize(token[0], self._syntactics[token[1]
|
||||
[0]]))
|
||||
else:
|
||||
unigrams_lemmas.append(token[0])
|
||||
|
||||
return unigrams_words, unigrams_lemmas, pos_tagged
|
||||
|
||||
def _find_ngrams(self, input_list, n):
|
||||
return zip(*[input_list[i:] for i in range(n)])
|
||||
|
||||
def _clean_pos(self, pos_tagged):
|
||||
|
||||
pos_tags = {
|
||||
'NN': 'NN',
|
||||
'NNP': 'NN',
|
||||
'NNP-LOC': 'NN',
|
||||
'NNS': 'NN',
|
||||
'JJ': 'JJ',
|
||||
'JJR': 'JJ',
|
||||
'JJS': 'JJ',
|
||||
'RB': 'RB',
|
||||
'RBR': 'RB',
|
||||
'RBS': 'RB',
|
||||
'VB': 'VB',
|
||||
'VBD': 'VB',
|
||||
'VGB': 'VB',
|
||||
'VBN': 'VB',
|
||||
'VBP': 'VB',
|
||||
'VBZ': 'VB'
|
||||
}
|
||||
|
||||
for i in range(len(pos_tagged)):
|
||||
if pos_tagged[i] in pos_tags:
|
||||
pos_tagged[i] = pos_tags[pos_tagged[i]]
|
||||
return pos_tagged
|
||||
|
||||
def _extract_features(self, text):
|
||||
|
||||
feature_set = {k: 0 for k in self._categories}
|
||||
ngrams_words, ngrams_lemmas, pos_tagged = self._extract_ngrams(text)
|
||||
matches = 0
|
||||
pos_tagged = self._clean_pos(pos_tagged)
|
||||
|
||||
tag_wn = {
|
||||
'NN': self._wn16.NOUN,
|
||||
'JJ': self._wn16.ADJ,
|
||||
'VB': self._wn16.VERB,
|
||||
'RB': self._wn16.ADV
|
||||
}
|
||||
for i in range(len(pos_tagged)):
|
||||
if pos_tagged[i] in tag_wn:
|
||||
synsets = self._wn16.synsets(ngrams_words[i],
|
||||
tag_wn[pos_tagged[i]])
|
||||
if synsets:
|
||||
offset = synsets[0].offset()
|
||||
if offset in self._total_synsets[pos_tagged[i]]:
|
||||
if self._total_synsets[pos_tagged[i]][offset] is None:
|
||||
continue
|
||||
else:
|
||||
emotion = self._total_synsets[pos_tagged[i]][
|
||||
offset].get_level(5).name
|
||||
matches += 1
|
||||
for i in self._categories:
|
||||
if emotion in self._categories[i]:
|
||||
feature_set[i] += 1
|
||||
if matches == 0:
|
||||
matches = 1
|
||||
|
||||
for i in feature_set:
|
||||
feature_set[i] = (feature_set[i] / matches)
|
||||
|
||||
return feature_set
|
||||
|
||||
def analyse_entry(self, entry, activity):
|
||||
params = activity.params
|
||||
|
||||
text_input = entry['nif:isString']
|
||||
|
||||
text = self._my_preprocessor(text_input)
|
||||
|
||||
feature_text = self._extract_features(text)
|
||||
|
||||
emotionSet = EmotionSet(id="Emotions0")
|
||||
emotions = emotionSet.onyx__hasEmotion
|
||||
|
||||
for i in feature_text:
|
||||
emotions.append(
|
||||
Emotion(
|
||||
onyx__hasEmotionCategory=self._wnaffect_mappings[i],
|
||||
onyx__hasEmotionIntensity=feature_text[i]))
|
||||
|
||||
entry.emotions = [emotionSet]
|
||||
|
||||
yield entry
|
||||
|
||||
|
||||
def test(self, *args, **kwargs):
|
||||
results = list()
|
||||
params = {'algo': 'emotion-wnaffect',
|
||||
'intype': 'direct',
|
||||
'expanded-jsonld': 0,
|
||||
'informat': 'text',
|
||||
'prefix': '',
|
||||
'plugin_type': 'analysisPlugin',
|
||||
'urischeme': 'RFC5147String',
|
||||
'outformat': 'json-ld',
|
||||
'i': 'Hello World',
|
||||
'input': 'Hello World',
|
||||
'conversion': 'full',
|
||||
'language': 'en',
|
||||
'algorithm': 'emotion-wnaffect'}
|
||||
|
||||
self.activate()
|
||||
texts = {'I hate you': 'anger',
|
||||
'i am sad': 'sadness',
|
||||
'i am happy with my marks': 'joy',
|
||||
'This movie is scary': 'negative-fear'}
|
||||
|
||||
for text in texts:
|
||||
response = next(self.analyse_entry(Entry(nif__isString=text),
|
||||
self.activity(params)))
|
||||
expected = texts[text]
|
||||
emotionSet = response.emotions[0]
|
||||
max_emotion = max(emotionSet['onyx:hasEmotion'], key=lambda x: x['onyx:hasEmotionIntensity'])
|
||||
assert max_emotion['onyx:hasEmotionCategory'] == expected
|
@ -0,0 +1,6 @@
|
||||
---
|
||||
module: emotion-wnaffect
|
||||
requirements:
|
||||
- nltk>=3.0.5
|
||||
- lxml>=3.4.2
|
||||
async: false
|
95
community-plugins/emotion-wnaffect/emotion.py
Normal file
95
community-plugins/emotion-wnaffect/emotion.py
Normal file
@ -0,0 +1,95 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Clement Michard (c) 2015
|
||||
"""
|
||||
|
||||
class Emotion:
|
||||
"""Defines an emotion."""
|
||||
|
||||
emotions = {} # name to emotion (str -> Emotion)
|
||||
|
||||
def __init__(self, name, parent_name=None):
|
||||
"""Initializes an Emotion object.
|
||||
name -- name of the emotion (str)
|
||||
parent_name -- name of the parent emotion (str)
|
||||
"""
|
||||
|
||||
self.name = name
|
||||
self.parent = None
|
||||
self.level = 0
|
||||
self.children = []
|
||||
|
||||
if parent_name:
|
||||
self.parent = Emotion.emotions[parent_name] if parent_name else None
|
||||
self.parent.children.append(self)
|
||||
self.level = self.parent.level + 1
|
||||
|
||||
|
||||
def get_level(self, level):
|
||||
"""Returns the parent of self at the given level.
|
||||
level -- level in the hierarchy (int)
|
||||
"""
|
||||
|
||||
em = self
|
||||
while em.level > level and em.level >= 0:
|
||||
em = em.parent
|
||||
return em
|
||||
|
||||
|
||||
def __str__(self):
|
||||
"""Returns the emotion string formatted."""
|
||||
|
||||
return self.name
|
||||
|
||||
|
||||
def nb_children(self):
|
||||
"""Returns the number of children of the emotion."""
|
||||
|
||||
return sum(child.nb_children() for child in self.children) + 1
|
||||
|
||||
|
||||
@staticmethod
|
||||
def printTree(emotion=None, indent="", last='updown'):
|
||||
"""Prints the hierarchy of emotions.
|
||||
emotion -- root emotion (Emotion)
|
||||
"""
|
||||
|
||||
if not emotion:
|
||||
emotion = Emotion.emotions["root"]
|
||||
|
||||
size_branch = {child: child.nb_children() for child in emotion.children}
|
||||
leaves = sorted(emotion.children, key=lambda emotion: emotion.nb_children())
|
||||
up, down = [], []
|
||||
if leaves:
|
||||
while sum(size_branch[e] for e in down) < sum(size_branch[e] for e in leaves):
|
||||
down.append(leaves.pop())
|
||||
up = leaves
|
||||
|
||||
for leaf in up:
|
||||
next_last = 'up' if up.index(leaf) is 0 else ''
|
||||
next_indent = '{0}{1}{2}'.format(indent, ' ' if 'up' in last else '│', " " * len(emotion.name))
|
||||
Emotion.printTree(leaf, indent=next_indent, last=next_last)
|
||||
if last == 'up':
|
||||
start_shape = '┌'
|
||||
elif last == 'down':
|
||||
start_shape = '└'
|
||||
elif last == 'updown':
|
||||
start_shape = ' '
|
||||
else:
|
||||
start_shape = '├'
|
||||
if up:
|
||||
end_shape = '┤'
|
||||
elif down:
|
||||
end_shape = '┐'
|
||||
else:
|
||||
end_shape = ''
|
||||
print ('{0}{1}{2}{3}'.format(indent, start_shape, emotion.name, end_shape))
|
||||
for leaf in down:
|
||||
next_last = 'down' if down.index(leaf) is len(down) - 1 else ''
|
||||
next_indent = '{0}{1}{2}'.format(indent, ' ' if 'down' in last else '│', " " * len(emotion.name))
|
||||
Emotion.printTree(leaf, indent=next_indent, last=next_last)
|
||||
|
||||
|
||||
|
||||
|
||||
|
94
community-plugins/emotion-wnaffect/wnaffect.py
Normal file
94
community-plugins/emotion-wnaffect/wnaffect.py
Normal file
@ -0,0 +1,94 @@
|
||||
|
||||
# coding: utf-8
|
||||
|
||||
# In[1]:
|
||||
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Clement Michard (c) 2015
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import nltk
|
||||
from emotion import Emotion
|
||||
from nltk.corpus import WordNetCorpusReader
|
||||
import xml.etree.ElementTree as ET
|
||||
|
||||
class WNAffect:
|
||||
"""WordNet-Affect resource."""
|
||||
|
||||
nltk_resources = ['averaged_perceptron_tagger']
|
||||
|
||||
def __init__(self, wordnet16_dir, wn_domains_dir):
|
||||
"""Initializes the WordNet-Affect object."""
|
||||
|
||||
cwd = os.getcwd()
|
||||
nltk.data.path.append(cwd)
|
||||
wn16_path = "{0}/dict".format(wordnet16_dir)
|
||||
self.wn16 = WordNetCorpusReader(os.path.abspath("{0}/{1}".format(cwd, wn16_path)), nltk.data.find(wn16_path))
|
||||
self.flat_pos = {'NN':'NN', 'NNS':'NN', 'JJ':'JJ', 'JJR':'JJ', 'JJS':'JJ', 'RB':'RB', 'RBR':'RB', 'RBS':'RB', 'VB':'VB', 'VBD':'VB', 'VGB':'VB', 'VBN':'VB', 'VBP':'VB', 'VBZ':'VB'}
|
||||
self.wn_pos = {'NN':self.wn16.NOUN, 'JJ':self.wn16.ADJ, 'VB':self.wn16.VERB, 'RB':self.wn16.ADV}
|
||||
self._load_emotions(wn_domains_dir)
|
||||
self.synsets = self._load_synsets(wn_domains_dir)
|
||||
|
||||
|
||||
|
||||
def _load_synsets(self, wn_domains_dir):
|
||||
"""Returns a dictionary POS tag -> synset offset -> emotion (str -> int -> str)."""
|
||||
|
||||
tree = ET.parse("{0}/a-synsets.xml".format(wn_domains_dir))
|
||||
root = tree.getroot()
|
||||
pos_map = { "noun": "NN", "adj": "JJ", "verb": "VB", "adv": "RB" }
|
||||
|
||||
synsets = {}
|
||||
for pos in ["noun", "adj", "verb", "adv"]:
|
||||
tag = pos_map[pos]
|
||||
synsets[tag] = {}
|
||||
for elem in root.findall(".//{0}-syn-list//{0}-syn".format(pos, pos)):
|
||||
offset = int(elem.get("id")[2:])
|
||||
if not offset: continue
|
||||
if elem.get("categ"):
|
||||
synsets[tag][offset] = Emotion.emotions[elem.get("categ")] if elem.get("categ") in Emotion.emotions else None
|
||||
elif elem.get("noun-id"):
|
||||
synsets[tag][offset] = synsets[pos_map["noun"]][int(elem.get("noun-id")[2:])]
|
||||
|
||||
return synsets
|
||||
|
||||
def _load_emotions(self, wn_domains_dir):
|
||||
"""Loads the hierarchy of emotions from the WordNet-Affect xml."""
|
||||
|
||||
tree = ET.parse("{0}/a-hierarchy.xml".format(wn_domains_dir))
|
||||
root = tree.getroot()
|
||||
for elem in root.findall("categ"):
|
||||
name = elem.get("name")
|
||||
if name == "root":
|
||||
Emotion.emotions["root"] = Emotion("root")
|
||||
else:
|
||||
Emotion.emotions[name] = Emotion(name, elem.get("isa"))
|
||||
|
||||
def get_emotion(self, word, pos):
|
||||
"""Returns the emotion of the word.
|
||||
word -- the word (str)
|
||||
pos -- part-of-speech (str)
|
||||
"""
|
||||
|
||||
if pos in self.flat_pos:
|
||||
pos = self.flat_pos[pos]
|
||||
synsets = self.wn16.synsets(word, self.wn_pos[pos])
|
||||
if synsets:
|
||||
offset = synsets[0].offset()
|
||||
if offset in self.synsets[pos]:
|
||||
return self.synsets[pos][offset]
|
||||
return None
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
wordnet16, wndomains32, word, pos = sys.argv[1:5]
|
||||
wna = WNAffect(wordnet16, wndomains32)
|
||||
print wna.get_emotion(word, pos)
|
||||
|
||||
|
36
community-plugins/example-plugin/example_plugin.py
Normal file
36
community-plugins/example-plugin/example_plugin.py
Normal file
@ -0,0 +1,36 @@
|
||||
from senpy.plugins import AnalysisPlugin
|
||||
from senpy.models import Response, Entry
|
||||
|
||||
|
||||
class ExamplePlugin(AnalysisPlugin):
|
||||
'''A *VERY* simple plugin that exemplifies the development of Senpy Plugins'''
|
||||
name = "example-plugin"
|
||||
author = "@balkian"
|
||||
version = "0.1"
|
||||
extra_params = {
|
||||
"parameter": {
|
||||
"@id": "parameter",
|
||||
"description": "this parameter does nothing, it is only an example",
|
||||
"aliases": ["parameter", "param"],
|
||||
"required": True,
|
||||
"default": 42
|
||||
}
|
||||
}
|
||||
custom_attribute = "42"
|
||||
|
||||
def analyse_entry(self, entry, activity):
|
||||
params = activity.params
|
||||
self.log.debug('Analysing with the example.')
|
||||
self.log.debug('The answer to this response is: %s.' % params['parameter'])
|
||||
resp = Response()
|
||||
entry['example:reversed'] = entry.text[::-1]
|
||||
entry['example:the_answer'] = params['parameter']
|
||||
|
||||
yield entry
|
||||
|
||||
test_cases = [{
|
||||
'input': 'hello',
|
||||
'expected': {
|
||||
'example:reversed': 'olleh'
|
||||
}
|
||||
}]
|
8
community-plugins/k8s/README.md
Normal file
8
community-plugins/k8s/README.md
Normal file
@ -0,0 +1,8 @@
|
||||
Deploy senpy to a kubernetes cluster.
|
||||
The files are templates, which need to be expanded with something like envsubst.
|
||||
|
||||
Example usage:
|
||||
|
||||
```
|
||||
cat k8s/*.ya*ml | envsubst | kubectl apply -n senpy -f -
|
||||
```
|
36
community-plugins/k8s/senpy-deployment.yaml
Normal file
36
community-plugins/k8s/senpy-deployment.yaml
Normal file
@ -0,0 +1,36 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: senpy-$NAME-latest
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
role: $NAME-latest
|
||||
app: test
|
||||
spec:
|
||||
containers:
|
||||
- name: senpy-latest
|
||||
image: $IMAGEWTAG
|
||||
imagePullPolicy: Always
|
||||
resources:
|
||||
limits:
|
||||
memory: "2048Mi"
|
||||
cpu: "1000m"
|
||||
ports:
|
||||
- name: web
|
||||
containerPort: 5000
|
||||
volumeMounts:
|
||||
# name must match the volume name below
|
||||
- name: senpy-data
|
||||
mountPath: "/data"
|
||||
subPath: data
|
||||
env:
|
||||
- name: SENPY_DATA
|
||||
value: '/data'
|
||||
volumes:
|
||||
- name: senpy-data
|
||||
persistentVolumeClaim:
|
||||
claimName: senpy-pvc
|
24
community-plugins/k8s/senpy-ingress.yaml
Normal file
24
community-plugins/k8s/senpy-ingress.yaml
Normal file
@ -0,0 +1,24 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: senpy-$NAME-ingress
|
||||
annotations:
|
||||
ingress.kubernetes.io/proxy-body-size: 0m
|
||||
ingress.kubernetes.io/proxy-buffer-size: "256k"
|
||||
spec:
|
||||
rules:
|
||||
- host: senpy.cluster.gsi.dit.upm.es
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: senpy-$NAME-latest
|
||||
servicePort: 5000
|
||||
- host: senpy.gsi.upm.es
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: senpy-$NAME-latest
|
||||
servicePort: 5000
|
12
community-plugins/k8s/senpy-svc.yaml
Normal file
12
community-plugins/k8s/senpy-svc.yaml
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: senpy-$NAME-latest
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 5000
|
||||
protocol: TCP
|
||||
selector:
|
||||
role: $NAME-latest
|
28
community-plugins/sentiment-basic/README.md
Executable file
28
community-plugins/sentiment-basic/README.md
Executable file
@ -0,0 +1,28 @@
|
||||
# Sentiment basic plugin
|
||||
|
||||
This plugin is based on the classifier developed for the TASS 2015 competition. It has been developed for Spanish and English. This is a demo plugin that uses only some features from the TASS 2015 classifier. To use the entirely functional classifier you can use the service in: http://senpy.cluster.gsi.dit.upm.es
|
||||
|
||||
There is more information avaliable in:
|
||||
|
||||
- Aspect based Sentiment Analysis of Spanish Tweets, Oscar Araque and Ignacio Corcuera-Platas and Constantino Román-Gómez and Carlos A. Iglesias and J. Fernando Sánchez-Rada. http://gsi.dit.upm.es/es/investigacion/publicaciones?view=publication&task=show&id=376
|
||||
|
||||
## Usage
|
||||
Params accepted:
|
||||
|
||||
- Language: Spanish (es).
|
||||
- Input: text to analyse.
|
||||
|
||||
|
||||
Example request:
|
||||
```
|
||||
http://senpy.cluster.gsi.dit.upm.es/api/?algo=sentiment-basic&language=es&input=I%20love%20Madrid
|
||||
```
|
||||
|
||||
Example respond: This plugin follows the standard for the senpy plugin response. For more information, please visit [senpy documentation](http://senpy.readthedocs.io). Specifically, NIF API section.
|
||||
|
||||
This plugin only supports **python2**
|
||||
|
||||
|
||||
![alt GSI Logo][logoGSI]
|
||||
|
||||
[logoGSI]: http://www.gsi.dit.upm.es/images/stories/logos/gsi.png "GSI Logo"
|
177
community-plugins/sentiment-basic/sentiment-basic.py
Normal file
177
community-plugins/sentiment-basic/sentiment-basic.py
Normal file
@ -0,0 +1,177 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
import sys
|
||||
import string
|
||||
import nltk
|
||||
import pickle
|
||||
|
||||
from sentiwn import SentiWordNet
|
||||
from nltk.corpus import wordnet as wn
|
||||
from textblob import TextBlob
|
||||
from scipy.interpolate import interp1d
|
||||
from os import path
|
||||
|
||||
from senpy.plugins import SentimentBox, SenpyPlugin
|
||||
from senpy.models import Results, Entry, Sentiment, Error
|
||||
|
||||
if sys.version_info[0] >= 3:
|
||||
unicode = str
|
||||
|
||||
|
||||
class SentimentBasic(SentimentBox):
|
||||
'''
|
||||
Sentiment classifier using rule-based classification for Spanish. Based on english to spanish translation and SentiWordNet sentiment knowledge. This is a demo plugin that uses only some features from the TASS 2015 classifier. To use the entirely functional classifier you can use the service in: http://senpy.cluster.gsi.dit.upm.es.
|
||||
'''
|
||||
name = "sentiment-basic"
|
||||
author = "github.com/nachtkatze"
|
||||
version = "0.1.1"
|
||||
extra_params = {
|
||||
"language": {
|
||||
"description": "language of the text",
|
||||
"aliases": ["language", "l"],
|
||||
"required": True,
|
||||
"options": ["en","es", "it", "fr"],
|
||||
"default": "en"
|
||||
}
|
||||
}
|
||||
sentiword_path = "SentiWordNet_3.0.txt"
|
||||
pos_path = "unigram_spanish.pickle"
|
||||
maxPolarityValue = 1
|
||||
minPolarityValue = -1
|
||||
nltk_resources = ['punkt','wordnet', 'omw']
|
||||
|
||||
with_polarity = False
|
||||
|
||||
def _load_swn(self):
|
||||
self.swn_path = self.find_file(self.sentiword_path)
|
||||
swn = SentiWordNet(self.swn_path)
|
||||
return swn
|
||||
|
||||
def _load_pos_tagger(self):
|
||||
self.pos_path = self.find_file(self.pos_path)
|
||||
with open(self.pos_path, 'rb') as f:
|
||||
tagger = pickle.load(f)
|
||||
return tagger
|
||||
|
||||
def activate(self, *args, **kwargs):
|
||||
self._swn = self._load_swn()
|
||||
self._pos_tagger = self._load_pos_tagger()
|
||||
|
||||
def _remove_punctuation(self, tokens):
|
||||
return [t for t in tokens if t not in string.punctuation]
|
||||
|
||||
def _tokenize(self, text):
|
||||
sentence_ = {}
|
||||
words = nltk.word_tokenize(text)
|
||||
sentence_['sentence'] = text
|
||||
tokens_ = [w.lower() for w in words]
|
||||
sentence_['tokens'] = self._remove_punctuation(tokens_)
|
||||
return sentence_
|
||||
|
||||
def _pos(self, tokens):
|
||||
tokens['tokens'] = self._pos_tagger.tag(tokens['tokens'])
|
||||
return tokens
|
||||
|
||||
def _compare_synsets(self, synsets, tokens):
|
||||
for synset in synsets:
|
||||
for word, lemmas in tokens['lemmas'].items():
|
||||
for lemma in lemmas:
|
||||
synset_ = lemma.synset()
|
||||
if synset == synset_:
|
||||
return synset
|
||||
return None
|
||||
|
||||
def predict_one(self, features, activity):
|
||||
language = activity.param("language")
|
||||
text = features[0]
|
||||
tokens = self._tokenize(text)
|
||||
tokens = self._pos(tokens)
|
||||
sufixes = {'es':'spa','en':'eng','it':'ita','fr':'fra'}
|
||||
tokens['lemmas'] = {}
|
||||
for w in tokens['tokens']:
|
||||
lemmas = wn.lemmas(w[0], lang=sufixes[language])
|
||||
if len(lemmas) == 0:
|
||||
continue
|
||||
tokens['lemmas'][w[0]] = lemmas
|
||||
if language == "en":
|
||||
trans = TextBlob(unicode(text))
|
||||
else:
|
||||
try:
|
||||
trans = TextBlob(unicode(text)).translate(from_lang=language,to='en')
|
||||
except Exception as ex:
|
||||
raise Error('Could not translate the text from "{}" to "{}": {}'.format(language,
|
||||
'en',
|
||||
str(ex)))
|
||||
useful_synsets = {}
|
||||
for w_i, t_w in enumerate(trans.sentences[0].words):
|
||||
synsets = wn.synsets(trans.sentences[0].words[w_i])
|
||||
if len(synsets) == 0:
|
||||
continue
|
||||
eq_synset = self._compare_synsets(synsets, tokens)
|
||||
useful_synsets[t_w] = eq_synset
|
||||
scores = {}
|
||||
scores = {}
|
||||
if useful_synsets != None:
|
||||
for word in useful_synsets:
|
||||
if useful_synsets[word] is None:
|
||||
continue
|
||||
temp_scores = self._swn.get_score(useful_synsets[word].name().split('.')[0].replace(' ',' '))
|
||||
for score in temp_scores:
|
||||
if score['synset'] == useful_synsets[word]:
|
||||
t_score = score['pos'] - score['neg']
|
||||
f_score = 'neu'
|
||||
if t_score > 0:
|
||||
f_score = 'pos'
|
||||
elif t_score < 0:
|
||||
f_score = 'neg'
|
||||
score['score'] = f_score
|
||||
scores[word] = score
|
||||
break
|
||||
g_score = 0.5
|
||||
|
||||
for i in scores:
|
||||
n_pos = 0.0
|
||||
n_neg = 0.0
|
||||
for w in scores:
|
||||
if scores[w]['score'] == 'pos':
|
||||
n_pos += 1.0
|
||||
elif scores[w]['score'] == 'neg':
|
||||
n_neg += 1.0
|
||||
inter = interp1d([-1.0, 1.0], [0.0, 1.0])
|
||||
|
||||
try:
|
||||
g_score = (n_pos - n_neg) / (n_pos + n_neg)
|
||||
g_score = float(inter(g_score))
|
||||
except:
|
||||
if n_pos == 0 and n_neg == 0:
|
||||
g_score = 0.5
|
||||
|
||||
if g_score > 0.5: # Positive
|
||||
return [1, 0, 0]
|
||||
elif g_score < 0.5: # Negative
|
||||
return [0, 0, 1]
|
||||
else:
|
||||
return [0, 1, 0]
|
||||
|
||||
|
||||
test_cases = [
|
||||
{
|
||||
'input': 'Odio ir al cine',
|
||||
'params': {'language': 'es'},
|
||||
'polarity': 'marl:Negative'
|
||||
|
||||
},
|
||||
{
|
||||
'input': 'El cielo está nublado',
|
||||
'params': {'language': 'es'},
|
||||
'polarity': 'marl:Neutral'
|
||||
|
||||
},
|
||||
{
|
||||
'input': 'Esta tarta está muy buena',
|
||||
'params': {'language': 'es'},
|
||||
'polarity': 'marl:Negative' # SURPRISINGLY!
|
||||
|
||||
}
|
||||
]
|
7
community-plugins/sentiment-basic/sentiment-basic.senpy
Normal file
7
community-plugins/sentiment-basic/sentiment-basic.senpy
Normal file
@ -0,0 +1,7 @@
|
||||
---
|
||||
module: sentiment-basic
|
||||
requirements:
|
||||
- nltk>=3.0.5
|
||||
- scipy>=0.14.0
|
||||
- textblob
|
||||
|
70
community-plugins/sentiment-basic/sentiwn.py
Normal file
70
community-plugins/sentiment-basic/sentiwn.py
Normal file
@ -0,0 +1,70 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Author : Jaganadh Gopinadhan <jaganadhg@gmail.com>
|
||||
Copywright (C) : Jaganadh Gopinadhan
|
||||
|
||||
Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
|
||||
import sys,os
|
||||
import re
|
||||
|
||||
from nltk.corpus import wordnet
|
||||
|
||||
class SentiWordNet(object):
|
||||
"""
|
||||
Interface to SentiWordNet
|
||||
"""
|
||||
def __init__(self,swn_file):
|
||||
"""
|
||||
"""
|
||||
self.swn_file = swn_file
|
||||
self.pos_synset = self.__parse_swn_file()
|
||||
|
||||
def __parse_swn_file(self):
|
||||
"""
|
||||
Parse the SentiWordNet file and populate the POS and SynsetID hash
|
||||
"""
|
||||
pos_synset_hash = {}
|
||||
swn_data = open(self.swn_file,'r').readlines()
|
||||
head_less_swn_data = filter((lambda line: not re.search(r"^\s*#",\
|
||||
line)), swn_data)
|
||||
|
||||
for data in head_less_swn_data:
|
||||
fields = data.strip().split("\t")
|
||||
try:
|
||||
pos,syn_set_id,pos_score,neg_score,syn_set_score,\
|
||||
gloss = fields
|
||||
except:
|
||||
print("Found data without all details")
|
||||
pass
|
||||
|
||||
if pos and syn_set_score:
|
||||
pos_synset_hash[(pos,int(syn_set_id))] = (float(pos_score),\
|
||||
float(neg_score))
|
||||
|
||||
return pos_synset_hash
|
||||
|
||||
def get_score(self,word,pos=None):
|
||||
"""
|
||||
Get score for a given word/word pos combination
|
||||
"""
|
||||
senti_scores = []
|
||||
synsets = wordnet.synsets(word,pos)
|
||||
for synset in synsets:
|
||||
if (synset.pos(), synset.offset()) in self.pos_synset:
|
||||
pos_val, neg_val = self.pos_synset[(synset.pos(), synset.offset())]
|
||||
senti_scores.append({"pos":pos_val,"neg":neg_val,\
|
||||
"obj": 1.0 - (pos_val - neg_val),'synset':synset})
|
||||
|
||||
return senti_scores
|
5
community-plugins/sentiment-meaningCloud/.gitignore
vendored
Normal file
5
community-plugins/sentiment-meaningCloud/.gitignore
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
.*
|
||||
.env
|
||||
__pycache__
|
||||
.pyc
|
||||
VERSION
|
67
community-plugins/sentiment-meaningCloud/.gitlab-ci.yml
Normal file
67
community-plugins/sentiment-meaningCloud/.gitlab-ci.yml
Normal file
@ -0,0 +1,67 @@
|
||||
# Uncomment if you want to use docker-in-docker
|
||||
# image: gsiupm/dockermake:latest
|
||||
# services:
|
||||
# - docker:dind
|
||||
# When using dind, it's wise to use the overlayfs driver for
|
||||
# improved performance.
|
||||
|
||||
stages:
|
||||
- test
|
||||
- push
|
||||
- deploy
|
||||
- clean
|
||||
|
||||
before_script:
|
||||
- make -e login
|
||||
|
||||
.test: &test_definition
|
||||
stage: test
|
||||
script:
|
||||
- make -e test-$PYTHON_VERSION
|
||||
|
||||
test-3.5:
|
||||
<<: *test_definition
|
||||
variables:
|
||||
PYTHON_VERSION: "3.5"
|
||||
|
||||
.image: &image_definition
|
||||
stage: push
|
||||
script:
|
||||
- make -e push-$PYTHON_VERSION
|
||||
only:
|
||||
- tags
|
||||
- triggers
|
||||
|
||||
push-3.5:
|
||||
<<: *image_definition
|
||||
variables:
|
||||
PYTHON_VERSION: "3.5"
|
||||
|
||||
push-latest:
|
||||
<<: *image_definition
|
||||
variables:
|
||||
PYTHON_VERSION: latest
|
||||
only:
|
||||
- tags
|
||||
- triggers
|
||||
|
||||
deploy:
|
||||
stage: deploy
|
||||
environment: production
|
||||
script:
|
||||
- make -e deploy
|
||||
only:
|
||||
- tags
|
||||
- triggers
|
||||
|
||||
clean :
|
||||
stage: clean
|
||||
script:
|
||||
- make -e clean
|
||||
when: manual
|
||||
|
||||
cleanup_py:
|
||||
stage: clean
|
||||
when: always # this is important; run even if preceding stages failed.
|
||||
script:
|
||||
- docker logout
|
@ -0,0 +1,27 @@
|
||||
These makefiles are recipes for several common tasks in different types of projects.
|
||||
To add them to your project, simply do:
|
||||
|
||||
```
|
||||
git remote add makefiles ssh://git@lab.cluster.gsi.dit.upm.es:2200/docs/templates/makefiles.git
|
||||
git subtree add --prefix=.makefiles/ makefiles master
|
||||
touch Makefile
|
||||
echo "include .makefiles/base.mk" >> Makefile
|
||||
```
|
||||
|
||||
Now you can take advantage of the recipes.
|
||||
For instance, to add useful targets for a python project, just add this to your Makefile:
|
||||
|
||||
```
|
||||
include .makefiles/python.mk
|
||||
```
|
||||
|
||||
You may need to set special variables like the name of your project or the python versions you're targetting.
|
||||
Take a look at each specific `.mk` file for more information, and the `Makefile` in the [senpy](https://lab.cluster.gsi.dit.upm.es/senpy/senpy) project for a real use case.
|
||||
|
||||
If you update the makefiles from your repository, make sure to push the changes for review in upstream (this repository):
|
||||
|
||||
```
|
||||
make makefiles-push
|
||||
```
|
||||
|
||||
It will automatically commit all unstaged changes in the .makefiles folder.
|
36
community-plugins/sentiment-meaningCloud/.makefiles/base.mk
Normal file
36
community-plugins/sentiment-meaningCloud/.makefiles/base.mk
Normal file
@ -0,0 +1,36 @@
|
||||
export
|
||||
NAME ?= $(shell basename $(CURDIR))
|
||||
VERSION ?= $(shell git describe --tags --dirty 2>/dev/null)
|
||||
|
||||
ifeq ($(VERSION),)
|
||||
VERSION:="unknown"
|
||||
endif
|
||||
|
||||
# Get the location of this makefile.
|
||||
MK_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
|
||||
|
||||
-include .env
|
||||
-include ../.env
|
||||
|
||||
help: ## Show this help.
|
||||
@fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/\(.*:\)[^#]*##\s*\(.*\)/\1\t\2/' | column -t -s " "
|
||||
|
||||
config: ## Load config from the environment. You should run it once in every session before other tasks. Run: eval $(make config)
|
||||
@awk '{ print "export " $$0}' ../.env
|
||||
@awk '{ print "export " $$0}' .env
|
||||
@echo "# Please, run: "
|
||||
@echo "# eval \$$(make config)"
|
||||
# If you need to run a command on the key/value pairs, use this:
|
||||
# @awk '{ split($$0, a, "="); "echo " a[2] " | base64 -w 0" |& getline b64; print "export " a[1] "=" a[2]; print "export " a[1] "_BASE64=" b64}' .env
|
||||
|
||||
ci: ## Run a task using gitlab-runner. Only use to debug problems in the CI pipeline
|
||||
gitlab-runner exec shell --builds-dir '.builds' --env CI_PROJECT_NAME=$(NAME) ${action}
|
||||
|
||||
include $(MK_DIR)/makefiles.mk
|
||||
include $(MK_DIR)/docker.mk
|
||||
include $(MK_DIR)/git.mk
|
||||
|
||||
info:: ## List all variables
|
||||
env
|
||||
|
||||
.PHONY:: config help ci
|
@ -0,0 +1,29 @@
|
||||
IMAGENAME?=$(NAME)
|
||||
IMAGEWTAG?=$(IMAGENAME):$(VERSION)
|
||||
|
||||
docker-login: ## Log in to the registry. It will only be used in the server, or when running a CI task locally (if CI_BUILD_TOKEN is set).
|
||||
ifeq ($(CI_BUILD_TOKEN),)
|
||||
@echo "Not logging in to the docker registry" "$(CI_REGISTRY)"
|
||||
else
|
||||
@docker login -u gitlab-ci-token -p $(CI_BUILD_TOKEN) $(CI_REGISTRY)
|
||||
endif
|
||||
ifeq ($(HUB_USER),)
|
||||
@echo "Not logging in to global the docker registry"
|
||||
else
|
||||
@docker login -u $(HUB_USER) -p $(HUB_PASSWORD)
|
||||
endif
|
||||
|
||||
docker-clean: ## Remove docker credentials
|
||||
ifeq ($(HUB_USER),)
|
||||
else
|
||||
@docker logout
|
||||
endif
|
||||
|
||||
login:: docker-login
|
||||
|
||||
clean:: docker-clean
|
||||
|
||||
docker-info:
|
||||
@echo IMAGEWTAG=${IMAGEWTAG}
|
||||
|
||||
.PHONY:: docker-login docker-clean login clean
|
28
community-plugins/sentiment-meaningCloud/.makefiles/git.mk
Normal file
28
community-plugins/sentiment-meaningCloud/.makefiles/git.mk
Normal file
@ -0,0 +1,28 @@
|
||||
commit:
|
||||
git commit -a
|
||||
|
||||
tag:
|
||||
git tag ${VERSION}
|
||||
|
||||
git-push::
|
||||
git push --tags -u origin HEAD
|
||||
|
||||
git-pull:
|
||||
git pull --all
|
||||
|
||||
push-github: ## Push the code to github. You need to set up GITHUB_DEPLOY_KEY
|
||||
ifeq ($(GITHUB_DEPLOY_KEY),)
|
||||
else
|
||||
$(eval KEY_FILE := "$(shell mktemp)")
|
||||
@echo "$(GITHUB_DEPLOY_KEY)" > $(KEY_FILE)
|
||||
@git remote rm github-deploy || true
|
||||
git remote add github-deploy $(GITHUB_REPO)
|
||||
-@GIT_SSH_COMMAND="ssh -i $(KEY_FILE)" git fetch github-deploy $(CI_COMMIT_REF_NAME)
|
||||
@GIT_SSH_COMMAND="ssh -i $(KEY_FILE)" git push github-deploy HEAD:$(CI_COMMIT_REF_NAME)
|
||||
rm $(KEY_FILE)
|
||||
endif
|
||||
|
||||
push:: git-push
|
||||
pull:: git-pull
|
||||
|
||||
.PHONY:: commit tag push git-push git-pull push-github
|
51
community-plugins/sentiment-meaningCloud/.makefiles/k8s.mk
Normal file
51
community-plugins/sentiment-meaningCloud/.makefiles/k8s.mk
Normal file
@ -0,0 +1,51 @@
|
||||
# Deployment with Kubernetes
|
||||
|
||||
# KUBE_CA_PEM_FILE is the path of a certificate file. It automatically set by GitLab
|
||||
# if you enable Kubernetes integration in a project.
|
||||
#
|
||||
# As of this writing, Kubernetes integration can not be set on a group level, so it has to
|
||||
# be manually set in every project.
|
||||
# Alternatively, we use a custom KUBE_CA_BUNDLE environment variable, which can be set at
|
||||
# the group level. In this case, the variable contains the whole content of the certificate,
|
||||
# which we dump to a temporary file
|
||||
#
|
||||
# Check if the KUBE_CA_PEM_FILE exists. Otherwise, create it from KUBE_CA_BUNDLE
|
||||
KUBE_CA_TEMP=false
|
||||
ifndef KUBE_CA_PEM_FILE
|
||||
KUBE_CA_PEM_FILE:=$$PWD/.ca.crt
|
||||
CREATED:=$(shell echo -e "$(KUBE_CA_BUNDLE)" > $(KUBE_CA_PEM_FILE))
|
||||
endif
|
||||
KUBE_TOKEN?=""
|
||||
KUBE_NAMESPACE?=$(NAME)
|
||||
KUBECTL=docker run --rm -v $(KUBE_CA_PEM_FILE):/tmp/ca.pem -i lachlanevenson/k8s-kubectl --server="$(KUBE_URL)" --token="$(KUBE_TOKEN)" --certificate-authority="/tmp/ca.pem" -n $(KUBE_NAMESPACE)
|
||||
CI_COMMIT_REF_NAME?=master
|
||||
|
||||
info:: ## Print variables. Useful for debugging.
|
||||
@echo "#KUBERNETES"
|
||||
@echo KUBE_URL=$(KUBE_URL)
|
||||
@echo KUBE_CA_PEM_FILE=$(KUBE_CA_PEM_FILE)
|
||||
@echo KUBE_CA_BUNDLE=$$KUBE_CA_BUNDLE
|
||||
@echo KUBE_TOKEN=$(KUBE_TOKEN)
|
||||
@echo KUBE_NAMESPACE=$(KUBE_NAMESPACE)
|
||||
@echo KUBECTL=$(KUBECTL)
|
||||
|
||||
@echo "#CI"
|
||||
@echo CI_PROJECT_NAME=$(CI_PROJECT_NAME)
|
||||
@echo CI_REGISTRY=$(CI_REGISTRY)
|
||||
@echo CI_REGISTRY_USER=$(CI_REGISTRY_USER)
|
||||
@echo CI_COMMIT_REF_NAME=$(CI_COMMIT_REF_NAME)
|
||||
@echo "CREATED=$(CREATED)"
|
||||
|
||||
#
|
||||
# Deployment and advanced features
|
||||
#
|
||||
|
||||
|
||||
deploy: ## Deploy to kubernetes using the credentials in KUBE_CA_PEM_FILE (or KUBE_CA_BUNDLE ) and TOKEN
|
||||
@ls k8s/*.yaml k8s/*.yml k8s/*.tmpl 2>/dev/null || true
|
||||
@cat k8s/*.yaml k8s/*.yml k8s/*.tmpl 2>/dev/null | envsubst | $(KUBECTL) apply -f -
|
||||
|
||||
deploy-check: ## Get the deployed configuration.
|
||||
@$(KUBECTL) get deploy,pods,svc,ingress
|
||||
|
||||
.PHONY:: info deploy deploy-check
|
@ -0,0 +1,17 @@
|
||||
makefiles-remote:
|
||||
@git remote add makefiles ssh://git@lab.cluster.gsi.dit.upm.es:2200/docs/templates/makefiles.git 2>/dev/null || true
|
||||
|
||||
makefiles-commit: makefiles-remote
|
||||
git add -f .makefiles
|
||||
git commit -em "Updated makefiles from ${NAME}"
|
||||
|
||||
makefiles-push:
|
||||
git subtree push --prefix=.makefiles/ makefiles $(NAME)
|
||||
|
||||
makefiles-pull: makefiles-remote
|
||||
git subtree pull --prefix=.makefiles/ makefiles master --squash
|
||||
|
||||
pull:: makefiles-pull
|
||||
push:: makefiles-push
|
||||
|
||||
.PHONY:: makefiles-remote makefiles-commit makefiles-push makefiles-pull pull push
|
@ -0,0 +1,5 @@
|
||||
init: ## Init pre-commit hooks (i.e. enforcing format checking before allowing a commit)
|
||||
pip install --user pre-commit
|
||||
pre-commit install
|
||||
|
||||
.PHONY:: init
|
100
community-plugins/sentiment-meaningCloud/.makefiles/python.mk
Normal file
100
community-plugins/sentiment-meaningCloud/.makefiles/python.mk
Normal file
@ -0,0 +1,100 @@
|
||||
PYVERSIONS ?= 3.5
|
||||
PYMAIN ?= $(firstword $(PYVERSIONS))
|
||||
TARNAME ?= $(NAME)-$(VERSION).tar.gz
|
||||
VERSIONFILE ?= $(NAME)/VERSION
|
||||
|
||||
DEVPORT ?= 6000
|
||||
|
||||
|
||||
.FORCE:
|
||||
|
||||
version: .FORCE
|
||||
@echo $(VERSION) > $(VERSIONFILE)
|
||||
@echo $(VERSION)
|
||||
|
||||
yapf: ## Format python code
|
||||
yapf -i -r $(NAME)
|
||||
yapf -i -r tests
|
||||
|
||||
dockerfiles: $(addprefix Dockerfile-,$(PYVERSIONS)) ## Generate dockerfiles for each python version
|
||||
@unlink Dockerfile >/dev/null
|
||||
ln -s Dockerfile-$(PYMAIN) Dockerfile
|
||||
|
||||
Dockerfile-%: Dockerfile.template ## Generate a specific dockerfile (e.g. Dockerfile-2.7)
|
||||
sed "s/{{PYVERSION}}/$*/" Dockerfile.template > Dockerfile-$*
|
||||
|
||||
quick_build: $(addprefix build-, $(PYMAIN))
|
||||
|
||||
build: $(addprefix build-, $(PYVERSIONS)) ## Build all images / python versions
|
||||
|
||||
build-%: version Dockerfile-% ## Build a specific version (e.g. build-2.7)
|
||||
docker build -t '$(IMAGEWTAG)-python$*' --cache-from $(IMAGENAME):python$* -f Dockerfile-$* .;
|
||||
|
||||
dev-%: ## Launch a specific development environment using docker (e.g. dev-2.7)
|
||||
@docker start $(NAME)-dev$* || (\
|
||||
$(MAKE) build-$*; \
|
||||
docker run -d -w /usr/src/app/ -p $(DEVPORT):5000 -v $$PWD:/usr/src/app --entrypoint=/bin/bash -ti --name $(NAME)-dev$* '$(IMAGEWTAG)-python$*'; \
|
||||
)\
|
||||
|
||||
docker exec -ti $(NAME)-dev$* bash
|
||||
|
||||
dev: dev-$(PYMAIN) ## Launch a development environment using docker, using the default python version
|
||||
|
||||
quick_test: test-$(PYMAIN)
|
||||
|
||||
test-%: ## Run setup.py from in an isolated container, built from the base image. (e.g. test-2.7)
|
||||
# This speeds tests up because the image has most (if not all) of the dependencies already.
|
||||
docker rm $(NAME)-test-$* || true
|
||||
docker create -ti --name $(NAME)-test-$* --entrypoint="" -w /usr/src/app/ $(IMAGENAME):python$* python setup.py test
|
||||
docker cp . $(NAME)-test-$*:/usr/src/app
|
||||
docker start -a $(NAME)-test-$*
|
||||
|
||||
test: $(addprefix test-,$(PYVERSIONS)) ## Run the tests with the main python version
|
||||
|
||||
run-%: build-%
|
||||
docker run --rm -p $(DEVPORT):5000 -ti '$(IMAGEWTAG)-python$(PYMAIN)' --default-plugins
|
||||
|
||||
run: run-$(PYMAIN)
|
||||
|
||||
# Pypy - Upload a package
|
||||
|
||||
dist/$(TARNAME): version
|
||||
python setup.py sdist;
|
||||
|
||||
sdist: dist/$(TARNAME) ## Generate the distribution file (wheel)
|
||||
|
||||
pip_test-%: sdist ## Test the distribution file using pip install and a specific python version (e.g. pip_test-2.7)
|
||||
docker run --rm -v $$PWD/dist:/dist/ python:$* pip install /dist/$(TARNAME);
|
||||
|
||||
pip_test: $(addprefix pip_test-,$(PYVERSIONS)) ## Test pip installation with the main python version
|
||||
|
||||
pip_upload: pip_test ## Upload package to pip
|
||||
python setup.py sdist upload ;
|
||||
|
||||
# Pushing to docker
|
||||
|
||||
push-latest: $(addprefix push-latest-,$(PYVERSIONS)) ## Push the "latest" tag to dockerhub
|
||||
docker tag '$(IMAGEWTAG)-python$(PYMAIN)' '$(IMAGEWTAG)'
|
||||
docker tag '$(IMAGEWTAG)-python$(PYMAIN)' '$(IMAGENAME)'
|
||||
docker push '$(IMAGENAME):latest'
|
||||
docker push '$(IMAGEWTAG)'
|
||||
|
||||
push-latest-%: build-% ## Push the latest image for a specific python version
|
||||
docker tag $(IMAGENAME):$(VERSION)-python$* $(IMAGENAME):python$*
|
||||
docker push $(IMAGENAME):$(VERSION)-python$*
|
||||
docker push $(IMAGENAME):python$*
|
||||
|
||||
push-%: build-% ## Push the image of the current version (tagged). e.g. push-2.7
|
||||
docker push $(IMAGENAME):$(VERSION)-python$*
|
||||
|
||||
push:: $(addprefix push-,$(PYVERSIONS)) ## Push an image with the current version for every python version
|
||||
docker tag '$(IMAGEWTAG)-python$(PYMAIN)' '$(IMAGEWTAG)'
|
||||
docker push $(IMAGENAME):$(VERSION)
|
||||
|
||||
clean:: ## Clean older docker images and containers related to this project and dev environments
|
||||
@docker stop $(addprefix $(NAME)-dev,$(PYVERSIONS)) 2>/dev/null || true
|
||||
@docker rm $(addprefix $(NAME)-dev,$(PYVERSIONS)) 2>/dev/null || true
|
||||
@docker ps -a | grep $(IMAGENAME) | awk '{ split($$2, vers, "-"); if(vers[0] != "${VERSION}"){ print $$1;}}' | xargs docker rm -v 2>/dev/null|| true
|
||||
@docker images | grep $(IMAGENAME) | awk '{ split($$2, vers, "-"); if(vers[0] != "${VERSION}"){ print $$1":"$$2;}}' | xargs docker rmi 2>/dev/null|| true
|
||||
|
||||
.PHONY:: yapf dockerfiles Dockerfile-% quick_build build build-% dev-% quick-dev test quick_test push-latest push-latest-% push-% push version .FORCE
|
1
community-plugins/sentiment-meaningCloud/Dockerfile
Symbolic link
1
community-plugins/sentiment-meaningCloud/Dockerfile
Symbolic link
@ -0,0 +1 @@
|
||||
Dockerfile-3.5
|
4
community-plugins/sentiment-meaningCloud/Dockerfile-3.5
Normal file
4
community-plugins/sentiment-meaningCloud/Dockerfile-3.5
Normal file
@ -0,0 +1,4 @@
|
||||
FROM gsiupm/senpy:0.10.4-python3.5
|
||||
|
||||
|
||||
MAINTAINER manuel.garcia-amado.sancho@alumnos.upm.es
|
@ -0,0 +1,4 @@
|
||||
FROM gsiupm/senpy:0.10.4-python{{PYVERSION}}
|
||||
|
||||
|
||||
MAINTAINER manuel.garcia-amado.sancho@alumnos.upm.es
|
9
community-plugins/sentiment-meaningCloud/Makefile
Normal file
9
community-plugins/sentiment-meaningCloud/Makefile
Normal file
@ -0,0 +1,9 @@
|
||||
NAME:=meaningcloud
|
||||
VERSIONFILE:=VERSION
|
||||
IMAGENAME:=registry.cluster.gsi.dit.upm.es/senpy/sentiment-meaningcloud
|
||||
PYVERSIONS:= 3.5
|
||||
DEVPORT:=5000
|
||||
|
||||
include .makefiles/base.mk
|
||||
include .makefiles/k8s.mk
|
||||
include .makefiles/python.mk
|
34
community-plugins/sentiment-meaningCloud/README.md
Normal file
34
community-plugins/sentiment-meaningCloud/README.md
Normal file
@ -0,0 +1,34 @@
|
||||
# Senpy Plugin MeaningCloud
|
||||
|
||||
MeaningCloud plugin uses API from Meaning Cloud to perform sentiment analysis.
|
||||
|
||||
For more information about Meaning Cloud and its services, please visit: https://www.meaningcloud.com/developer/apis
|
||||
|
||||
## Usage
|
||||
|
||||
To use this plugin, you need to obtain an API key from meaningCloud signing up here: https://www.meaningcloud.com/developer/login
|
||||
|
||||
When you had obtained the meaningCloud API Key, you have to provide it to the plugin, using the param **apiKey**.
|
||||
|
||||
To use this plugin, you should use a GET Requests with the following possible params:
|
||||
Params:
|
||||
- Language: English (en) and Spanish (es). (default: en)
|
||||
- API Key: the API key from Meaning Cloud. Aliases: ["apiKey","meaningCloud-key"]. (required)
|
||||
- Input: text to analyse.(required)
|
||||
- Model: model provided to Meaning Cloud API (for general domain). (default: general)
|
||||
|
||||
## Example of Usage
|
||||
|
||||
Example request:
|
||||
```
|
||||
http://senpy.cluster.gsi.dit.upm.es/api/?algo=meaningCloud&language=en&apiKey=<put here your API key>&input=I%20love%20Madrid
|
||||
```
|
||||
|
||||
|
||||
Example respond: This plugin follows the standard for the senpy plugin response. For more information, please visit [senpy documentation](http://senpy.readthedocs.io). Specifically, NIF API section.
|
||||
|
||||
This plugin supports **python2.7** and **python3**.
|
||||
|
||||
![alt GSI Logo][logoGSI]
|
||||
|
||||
[logoGSI]: http://www.gsi.dit.upm.es/images/stories/logos/gsi.png "GSI Logo"
|
28
community-plugins/sentiment-meaningCloud/docker-compose.yml
Normal file
28
community-plugins/sentiment-meaningCloud/docker-compose.yml
Normal file
@ -0,0 +1,28 @@
|
||||
version: '3'
|
||||
services:
|
||||
dev:
|
||||
image: gsiupm/senpy:latest
|
||||
entrypoint: ["/bin/bash"]
|
||||
working_dir: "/senpy-plugins"
|
||||
tty: true
|
||||
ports:
|
||||
- "127.0.0.1:5005:5000"
|
||||
volumes:
|
||||
- ".:/senpy-plugins"
|
||||
test:
|
||||
image: gsiupm/senpy:latest
|
||||
entrypoint: ["py.test"]
|
||||
working_dir: "/usr/src/app/"
|
||||
volumes:
|
||||
- ".:/senpy-plugins/"
|
||||
command:
|
||||
[]
|
||||
meaningcloud:
|
||||
image: "${IMAGENAME-gsiupm/meaningcloud}:${VERSION-dev}"
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile-3.5
|
||||
ports:
|
||||
- 5001:5000
|
||||
volumes:
|
||||
- "./data:/data"
|
7
community-plugins/sentiment-meaningCloud/k8s/README.md
Normal file
7
community-plugins/sentiment-meaningCloud/k8s/README.md
Normal file
@ -0,0 +1,7 @@
|
||||
Deploy senpy to a kubernetes cluster.
|
||||
|
||||
Usage:
|
||||
|
||||
```
|
||||
kubectl apply -f . -n senpy
|
||||
```
|
@ -0,0 +1,27 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: ${NAME}
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
role: senpy-plugin
|
||||
app: ${NAME}
|
||||
spec:
|
||||
containers:
|
||||
- name: senpy-latest
|
||||
image: ${CI_REGISTRY_IMAGE}:${VERSION}
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- "-f"
|
||||
- "/senpy-plugins"
|
||||
resources:
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "1000m"
|
||||
ports:
|
||||
- name: web
|
||||
containerPort: 5000
|
@ -0,0 +1,14 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: ${NAME}
|
||||
spec:
|
||||
rules:
|
||||
- host: ${NAME}.senpy.cluster.gsi.dit.upm.es
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: ${NAME}
|
||||
servicePort: 5000
|
12
community-plugins/sentiment-meaningCloud/k8s/senpy-svc.yaml
Normal file
12
community-plugins/sentiment-meaningCloud/k8s/senpy-svc.yaml
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ${NAME}
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 5000
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: ${NAME}
|
254
community-plugins/sentiment-meaningCloud/meaningcloud_plugin.py
Normal file
254
community-plugins/sentiment-meaningCloud/meaningcloud_plugin.py
Normal file
@ -0,0 +1,254 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import time
|
||||
import requests
|
||||
import json
|
||||
import string
|
||||
import os
|
||||
from os import path
|
||||
import time
|
||||
from senpy.plugins import SentimentPlugin
|
||||
from senpy.models import Results, Entry, Entity, Topic, Sentiment, Error
|
||||
from senpy.utils import check_template
|
||||
|
||||
|
||||
class MeaningCloudPlugin(SentimentPlugin):
|
||||
'''
|
||||
Sentiment analysis with meaningCloud service.
|
||||
To use this plugin, you need to obtain an API key from meaningCloud signing up here:
|
||||
https://www.meaningcloud.com/developer/login
|
||||
|
||||
When you had obtained the meaningCloud API Key, you have to provide it to the plugin, using param apiKey.
|
||||
Example request:
|
||||
|
||||
http://senpy.cluster.gsi.dit.upm.es/api/?algo=meaningCloud&language=en&apiKey=YOUR_API_KEY&input=I%20love%20Madrid.
|
||||
'''
|
||||
name = 'sentiment-meaningcloud'
|
||||
author = 'GSI UPM'
|
||||
version = "1.1"
|
||||
maxPolarityValue = 1
|
||||
minPolarityValue = -1
|
||||
|
||||
extra_params = {
|
||||
"language": {
|
||||
"description": "language of the input",
|
||||
"aliases": ["language", "l"],
|
||||
"required": True,
|
||||
"options": ["en","es","ca","it","pt","fr","auto"],
|
||||
"default": "auto"
|
||||
},
|
||||
"apikey":{
|
||||
"description": "API key for the meaningcloud service. See https://www.meaningcloud.com/developer/login",
|
||||
"aliases": ["apiKey", "meaningcloud-key", "meaningcloud-apikey"],
|
||||
"required": True
|
||||
}
|
||||
}
|
||||
|
||||
def _polarity(self, value):
|
||||
|
||||
if 'NONE' in value:
|
||||
polarity = 'marl:Neutral'
|
||||
polarityValue = 0
|
||||
elif 'N' in value:
|
||||
polarity = 'marl:Negative'
|
||||
polarityValue = -1
|
||||
elif 'P' in value:
|
||||
polarity = 'marl:Positive'
|
||||
polarityValue = 1
|
||||
return polarity, polarityValue
|
||||
|
||||
def analyse_entry(self, entry, activity):
|
||||
params = activity.params
|
||||
|
||||
txt = entry['nif:isString']
|
||||
api = 'http://api.meaningcloud.com/'
|
||||
lang = params.get("language")
|
||||
model = "general"
|
||||
key = params["apikey"]
|
||||
parameters = {
|
||||
'key': key,
|
||||
'model': model,
|
||||
'lang': lang,
|
||||
'of': 'json',
|
||||
'txt': txt,
|
||||
'tt': 'a'
|
||||
}
|
||||
try:
|
||||
r = requests.post(
|
||||
api + "sentiment-2.1", params=parameters, timeout=3)
|
||||
parameters['lang'] = r.json()['model'].split('_')[1]
|
||||
lang = parameters['lang']
|
||||
r2 = requests.post(
|
||||
api + "topics-2.0", params=parameters, timeout=3)
|
||||
except requests.exceptions.Timeout:
|
||||
raise Error("Meaning Cloud API does not response")
|
||||
|
||||
api_response = r.json()
|
||||
api_response_topics = r2.json()
|
||||
if not api_response.get('score_tag'):
|
||||
raise Error(r.json())
|
||||
entry['language_detected'] = lang
|
||||
self.log.debug(api_response)
|
||||
agg_polarity, agg_polarityValue = self._polarity(
|
||||
api_response.get('score_tag', None))
|
||||
agg_opinion = Sentiment(
|
||||
id="Opinion0",
|
||||
marl__hasPolarity=agg_polarity,
|
||||
marl__polarityValue=agg_polarityValue,
|
||||
marl__opinionCount=len(api_response['sentence_list']))
|
||||
agg_opinion.prov(self)
|
||||
entry.sentiments.append(agg_opinion)
|
||||
self.log.debug(api_response['sentence_list'])
|
||||
count = 1
|
||||
|
||||
for sentence in api_response['sentence_list']:
|
||||
for nopinion in sentence['segment_list']:
|
||||
self.log.debug(nopinion)
|
||||
polarity, polarityValue = self._polarity(
|
||||
nopinion.get('score_tag', None))
|
||||
opinion = Sentiment(
|
||||
id="Opinion{}".format(count),
|
||||
marl__hasPolarity=polarity,
|
||||
marl__polarityValue=polarityValue,
|
||||
marl__aggregatesOpinion=agg_opinion.get('id'),
|
||||
nif__anchorOf=nopinion.get('text', None),
|
||||
nif__beginIndex=int(nopinion.get('inip', None)),
|
||||
nif__endIndex=int(nopinion.get('endp', None)))
|
||||
count += 1
|
||||
opinion.prov(self)
|
||||
entry.sentiments.append(opinion)
|
||||
|
||||
mapper = {'es': 'es.', 'en': '', 'ca': 'es.', 'it':'it.', 'fr':'fr.', 'pt':'pt.'}
|
||||
|
||||
for sent_entity in api_response_topics['entity_list']:
|
||||
resource = "_".join(sent_entity.get('form', None).split())
|
||||
entity = Entity(
|
||||
id="Entity{}".format(sent_entity.get('id')),
|
||||
itsrdf__taIdentRef="http://{}dbpedia.org/resource/{}".format(
|
||||
mapper[lang], resource),
|
||||
nif__anchorOf=sent_entity.get('form', None),
|
||||
nif__beginIndex=int(sent_entity['variant_list'][0].get('inip', None)),
|
||||
nif__endIndex=int(sent_entity['variant_list'][0].get('endp', None)))
|
||||
sementity = sent_entity['sementity'].get('type', None).split(">")[-1]
|
||||
entity['@type'] = "ODENTITY_{}".format(sementity)
|
||||
entity.prov(self)
|
||||
if 'senpy:hasEntity' not in entry:
|
||||
entry['senpy:hasEntity'] = []
|
||||
entry['senpy:hasEntity'].append(entity)
|
||||
|
||||
for topic in api_response_topics['concept_list']:
|
||||
if 'semtheme_list' in topic:
|
||||
for theme in topic['semtheme_list']:
|
||||
concept = Topic()
|
||||
concept.id = "Topic{}".format(topic.get('id'))
|
||||
concept['@type'] = "ODTHEME_{}".format(theme['type'].split(">")[-1])
|
||||
concept['fam:topic-reference'] = "http://dbpedia.org/resource/{}".format(theme['type'].split('>')[-1])
|
||||
entry.prov(self)
|
||||
if 'senpy:hasTopic' not in entry:
|
||||
entry['senpy:hasTopic'] = []
|
||||
entry['senpy:hasTopic'].append(concept)
|
||||
yield entry
|
||||
|
||||
test_cases = [
|
||||
{
|
||||
'params': {
|
||||
'algo': 'sentiment-meaningCloud',
|
||||
'intype': 'direct',
|
||||
'expanded-jsonld': 0,
|
||||
'informat': 'text',
|
||||
'prefix': '',
|
||||
'plugin_type': 'analysisPlugin',
|
||||
'urischeme': 'RFC5147String',
|
||||
'outformat': 'json-ld',
|
||||
'conversion': 'full',
|
||||
'language': 'en',
|
||||
'apikey': '00000',
|
||||
'algorithm': 'sentiment-meaningCloud'
|
||||
},
|
||||
'input': 'Hello World Obama',
|
||||
'expected': {
|
||||
'marl:hasOpinion': [
|
||||
{'marl:hasPolarity': 'marl:Neutral'}],
|
||||
'senpy:hasEntity': [
|
||||
{'itsrdf:taIdentRef': 'http://dbpedia.org/resource/Obama'}],
|
||||
'senpy:hasTopic': [
|
||||
{'fam:topic-reference': 'http://dbpedia.org/resource/Astronomy'}]
|
||||
},
|
||||
'responses': [
|
||||
{
|
||||
'url': 'http://api.meaningcloud.com/sentiment-2.1',
|
||||
'method': 'POST',
|
||||
'json': {
|
||||
'model': 'general_en',
|
||||
'sentence_list': [{
|
||||
'text':
|
||||
'Hello World',
|
||||
'endp':
|
||||
'10',
|
||||
'inip':
|
||||
'0',
|
||||
'segment_list': [{
|
||||
'text':
|
||||
'Hello World',
|
||||
'segment_type':
|
||||
'secondary',
|
||||
'confidence':
|
||||
'100',
|
||||
'inip':
|
||||
'0',
|
||||
'agreement':
|
||||
'AGREEMENT',
|
||||
'endp':
|
||||
'10',
|
||||
'polarity_term_list': [],
|
||||
'score_tag':
|
||||
'NONE'
|
||||
}],
|
||||
'score_tag':
|
||||
'NONE',
|
||||
}],
|
||||
'score_tag':
|
||||
'NONE'
|
||||
}
|
||||
}, {
|
||||
'url': 'http://api.meaningcloud.com/topics-2.0',
|
||||
'method': 'POST',
|
||||
'json': {
|
||||
'entity_list': [{
|
||||
'form':
|
||||
'Obama',
|
||||
'id':
|
||||
'__1265958475430276310',
|
||||
'variant_list': [{
|
||||
'endp': '16',
|
||||
'form': 'Obama',
|
||||
'inip': '12'
|
||||
}],
|
||||
'sementity': {
|
||||
'fiction': 'nonfiction',
|
||||
'confidence': 'uncertain',
|
||||
'class': 'instance',
|
||||
'type': 'Top>Person'
|
||||
}
|
||||
}],
|
||||
'concept_list': [{
|
||||
'form':
|
||||
'world',
|
||||
'id':
|
||||
'5c053cd39d',
|
||||
'relevance':
|
||||
'100',
|
||||
'semtheme_list': [{
|
||||
'id': 'ODTHEME_ASTRONOMY',
|
||||
'type': 'Top>NaturalSciences>Astronomy'
|
||||
}]
|
||||
}],
|
||||
}
|
||||
}]
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from senpy import easy_test
|
||||
easy_test()
|
87
community-plugins/sentiment-senticnet/senticnet_plugin.py
Normal file
87
community-plugins/sentiment-senticnet/senticnet_plugin.py
Normal file
@ -0,0 +1,87 @@
|
||||
#
|
||||
# Copyright 2014 Grupo de Sistemas Inteligentes (GSI) DIT, UPM
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import requests
|
||||
import json
|
||||
|
||||
from senpy.plugins import SentimentBox
|
||||
from senpy import easy_test
|
||||
|
||||
ENDPOINT = 'http://sentiment.gelbukh.com/sentiment/run.php'
|
||||
|
||||
|
||||
class Senticnet(SentimentBox):
|
||||
'''Connects to the SenticNet free polarity detection API: http://sentiment.gelbukh.com/sentiment/'''
|
||||
|
||||
author = "@balkian"
|
||||
version = '0.1'
|
||||
url = "https://github.com/gsi-upm/senpy-plugins-community"
|
||||
extra_params = {
|
||||
}
|
||||
|
||||
classes = ['marl:Positive', 'marl:Neutral', 'marl:Negative']
|
||||
binary = True
|
||||
|
||||
def predict_one(self, features, activity):
|
||||
text = ' '.join(features)
|
||||
|
||||
res = requests.post(ENDPOINT,
|
||||
data={'input': text})
|
||||
|
||||
if '-' not in res.text:
|
||||
raise Exception('Invalid response from server: {}'.format(res.text))
|
||||
|
||||
label = res.text.split('-')[1].lower()
|
||||
|
||||
if 'positive' in label:
|
||||
return [1, 0, 0]
|
||||
elif 'negative' in label:
|
||||
return [0, 0, 1]
|
||||
|
||||
return [0, 1, 0]
|
||||
|
||||
test_cases = [
|
||||
{
|
||||
'entry': {
|
||||
'nif:isString': 'I love Titanic'
|
||||
},
|
||||
'params': {},
|
||||
'expected': {
|
||||
"nif:isString": "I love Titanic",
|
||||
'marl:hasOpinion': [
|
||||
{
|
||||
'marl:hasPolarity': 'marl:Positive',
|
||||
}
|
||||
]
|
||||
},
|
||||
}, {
|
||||
'entry': {
|
||||
'nif:isString': 'I hate my life'
|
||||
},
|
||||
'params': {},
|
||||
'expected': {
|
||||
"nif:isString": "I hate my life",
|
||||
'marl:hasOpinion': [
|
||||
{
|
||||
'marl:hasPolarity': 'marl:Negative',
|
||||
}
|
||||
]
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
if __name__ == '__main__':
|
||||
easy_test()
|
42
community-plugins/sentiment-vader/README.md
Normal file
42
community-plugins/sentiment-vader/README.md
Normal file
@ -0,0 +1,42 @@
|
||||
# Sentimet-vader plugin
|
||||
|
||||
Vader is a plugin developed at GSI UPM for sentiment analysis.
|
||||
The response of this plugin uses [Marl ontology](https://www.gsi.dit.upm.es/ontologies/marl/) developed at GSI UPM for semantic web.
|
||||
|
||||
## Acknowledgements
|
||||
|
||||
This plugin uses the vaderSentiment module underneath, which is described in the paper:
|
||||
|
||||
VADER: A Parsimonious Rule-based Model for Sentiment Analysis of Social Media Text
|
||||
C.J. Hutto and Eric Gilbert
|
||||
Eighth International Conference on Weblogs and Social Media (ICWSM-14). Ann Arbor, MI, June 2014.
|
||||
|
||||
If you use this plugin in your research, please cite the above paper.
|
||||
|
||||
For more information about the functionality, check the official repository
|
||||
|
||||
https://github.com/cjhutto/vaderSentiment
|
||||
|
||||
## Usage
|
||||
|
||||
Parameters:
|
||||
|
||||
- Language: es (Spanish), en(English).
|
||||
- Input: Text to analyse.
|
||||
|
||||
|
||||
Example request:
|
||||
|
||||
```
|
||||
http://senpy.cluster.gsi.dit.upm.es/api/?algo=sentiment-vader&language=en&input=I%20love%20Madrid
|
||||
```
|
||||
|
||||
Example respond: This plugin follows the standard for the senpy plugin response. For more information, please visit [senpy documentation](http://senpy.readthedocs.io). Specifically, NIF API section.
|
||||
|
||||
This plugin supports **python3**
|
||||
|
||||
![alt GSI Logo][logoGSI]
|
||||
|
||||
[logoGSI]: http://www.gsi.dit.upm.es/images/stories/logos/gsi.png "GSI Logo"
|
||||
|
||||
========
|
368
community-plugins/sentiment-vader/vaderSentiment.py
Normal file
368
community-plugins/sentiment-vader/vaderSentiment.py
Normal file
@ -0,0 +1,368 @@
|
||||
#!/usr/bin/python
|
||||
# coding: utf-8
|
||||
'''
|
||||
Created on July 04, 2013
|
||||
@author: C.J. Hutto
|
||||
|
||||
Citation Information
|
||||
|
||||
If you use any of the VADER sentiment analysis tools
|
||||
(VADER sentiment lexicon or Python code for rule-based sentiment
|
||||
analysis engine) in your work or research, please cite the paper.
|
||||
For example:
|
||||
|
||||
Hutto, C.J. & Gilbert, E.E. (2014). VADER: A Parsimonious Rule-based Model for
|
||||
Sentiment Analysis of Social Media Text. Eighth International Conference on
|
||||
Weblogs and Social Media (ICWSM-14). Ann Arbor, MI, June 2014.
|
||||
'''
|
||||
|
||||
import os, math, re, sys, fnmatch, string
|
||||
import codecs
|
||||
|
||||
def make_lex_dict(f):
|
||||
maps = {}
|
||||
with codecs.open(f, encoding='iso-8859-1') as f:
|
||||
for wmsr in f:
|
||||
w, m = wmsr.strip().split('\t')[:2]
|
||||
maps[w] = m
|
||||
return maps
|
||||
|
||||
f = 'vader_sentiment_lexicon.txt' # empirically derived valence ratings for words, emoticons, slang, swear words, acronyms/initialisms
|
||||
try:
|
||||
word_valence_dict = make_lex_dict(f)
|
||||
except:
|
||||
f = os.path.join(os.path.dirname(__file__),'vader_sentiment_lexicon.txt')
|
||||
word_valence_dict = make_lex_dict(f)
|
||||
|
||||
# for removing punctuation
|
||||
regex_remove_punctuation = re.compile('[%s]' % re.escape(string.punctuation))
|
||||
|
||||
def sentiment(text):
|
||||
"""
|
||||
Returns a float for sentiment strength based on the input text.
|
||||
Positive values are positive valence, negative value are negative valence.
|
||||
"""
|
||||
wordsAndEmoticons = str(text).split() #doesn't separate words from adjacent punctuation (keeps emoticons & contractions)
|
||||
text_mod = regex_remove_punctuation.sub('', text) # removes punctuation (but loses emoticons & contractions)
|
||||
wordsOnly = str(text_mod).split()
|
||||
# get rid of empty items or single letter "words" like 'a' and 'I' from wordsOnly
|
||||
for word in wordsOnly:
|
||||
if len(word) <= 1:
|
||||
wordsOnly.remove(word)
|
||||
# now remove adjacent & redundant punctuation from [wordsAndEmoticons] while keeping emoticons and contractions
|
||||
puncList = [".", "!", "?", ",", ";", ":", "-", "'", "\"",
|
||||
"!!", "!!!", "??", "???", "?!?", "!?!", "?!?!", "!?!?"]
|
||||
for word in wordsOnly:
|
||||
for p in puncList:
|
||||
pword = p + word
|
||||
x1 = wordsAndEmoticons.count(pword)
|
||||
while x1 > 0:
|
||||
i = wordsAndEmoticons.index(pword)
|
||||
wordsAndEmoticons.remove(pword)
|
||||
wordsAndEmoticons.insert(i, word)
|
||||
x1 = wordsAndEmoticons.count(pword)
|
||||
|
||||
wordp = word + p
|
||||
x2 = wordsAndEmoticons.count(wordp)
|
||||
while x2 > 0:
|
||||
i = wordsAndEmoticons.index(wordp)
|
||||
wordsAndEmoticons.remove(wordp)
|
||||
wordsAndEmoticons.insert(i, word)
|
||||
x2 = wordsAndEmoticons.count(wordp)
|
||||
# get rid of residual empty items or single letter "words" like 'a' and 'I' from wordsAndEmoticons
|
||||
for word in wordsAndEmoticons:
|
||||
if len(word) <= 1:
|
||||
wordsAndEmoticons.remove(word)
|
||||
|
||||
# remove stopwords from [wordsAndEmoticons]
|
||||
#stopwords = [str(word).strip() for word in open('stopwords.txt')]
|
||||
#for word in wordsAndEmoticons:
|
||||
# if word in stopwords:
|
||||
# wordsAndEmoticons.remove(word)
|
||||
|
||||
# check for negation
|
||||
negate = ["aint", "arent", "cannot", "cant", "couldnt", "darent", "didnt", "doesnt",
|
||||
"ain't", "aren't", "can't", "couldn't", "daren't", "didn't", "doesn't",
|
||||
"dont", "hadnt", "hasnt", "havent", "isnt", "mightnt", "mustnt", "neither",
|
||||
"don't", "hadn't", "hasn't", "haven't", "isn't", "mightn't", "mustn't",
|
||||
"neednt", "needn't", "never", "none", "nope", "nor", "not", "nothing", "nowhere",
|
||||
"oughtnt", "shant", "shouldnt", "uhuh", "wasnt", "werent",
|
||||
"oughtn't", "shan't", "shouldn't", "uh-uh", "wasn't", "weren't",
|
||||
"without", "wont", "wouldnt", "won't", "wouldn't", "rarely", "seldom", "despite"]
|
||||
def negated(list, nWords=[], includeNT=True):
|
||||
nWords.extend(negate)
|
||||
for word in nWords:
|
||||
if word in list:
|
||||
return True
|
||||
if includeNT:
|
||||
for word in list:
|
||||
if "n't" in word:
|
||||
return True
|
||||
if "least" in list:
|
||||
i = list.index("least")
|
||||
if i > 0 and list[i-1] != "at":
|
||||
return True
|
||||
return False
|
||||
|
||||
def normalize(score, alpha=15):
|
||||
# normalize the score to be between -1 and 1 using an alpha that approximates the max expected value
|
||||
normScore = score/math.sqrt( ((score*score) + alpha) )
|
||||
return normScore
|
||||
|
||||
def wildCardMatch(patternWithWildcard, listOfStringsToMatchAgainst):
|
||||
listOfMatches = fnmatch.filter(listOfStringsToMatchAgainst, patternWithWildcard)
|
||||
return listOfMatches
|
||||
|
||||
|
||||
def isALLCAP_differential(wordList):
|
||||
countALLCAPS= 0
|
||||
for w in wordList:
|
||||
if str(w).isupper():
|
||||
countALLCAPS += 1
|
||||
cap_differential = len(wordList) - countALLCAPS
|
||||
if cap_differential > 0 and cap_differential < len(wordList):
|
||||
isDiff = True
|
||||
else: isDiff = False
|
||||
return isDiff
|
||||
isCap_diff = isALLCAP_differential(wordsAndEmoticons)
|
||||
|
||||
b_incr = 0.293 #(empirically derived mean sentiment intensity rating increase for booster words)
|
||||
b_decr = -0.293
|
||||
# booster/dampener 'intensifiers' or 'degree adverbs' http://en.wiktionary.org/wiki/Category:English_degree_adverbs
|
||||
booster_dict = {"absolutely": b_incr, "amazingly": b_incr, "awfully": b_incr, "completely": b_incr, "considerably": b_incr,
|
||||
"decidedly": b_incr, "deeply": b_incr, "effing": b_incr, "enormously": b_incr,
|
||||
"entirely": b_incr, "especially": b_incr, "exceptionally": b_incr, "extremely": b_incr,
|
||||
"fabulously": b_incr, "flipping": b_incr, "flippin": b_incr,
|
||||
"fricking": b_incr, "frickin": b_incr, "frigging": b_incr, "friggin": b_incr, "fully": b_incr, "fucking": b_incr,
|
||||
"greatly": b_incr, "hella": b_incr, "highly": b_incr, "hugely": b_incr, "incredibly": b_incr,
|
||||
"intensely": b_incr, "majorly": b_incr, "more": b_incr, "most": b_incr, "particularly": b_incr,
|
||||
"purely": b_incr, "quite": b_incr, "really": b_incr, "remarkably": b_incr,
|
||||
"so": b_incr, "substantially": b_incr,
|
||||
"thoroughly": b_incr, "totally": b_incr, "tremendously": b_incr,
|
||||
"uber": b_incr, "unbelievably": b_incr, "unusually": b_incr, "utterly": b_incr,
|
||||
"very": b_incr,
|
||||
|
||||
"almost": b_decr, "barely": b_decr, "hardly": b_decr, "just enough": b_decr,
|
||||
"kind of": b_decr, "kinda": b_decr, "kindof": b_decr, "kind-of": b_decr,
|
||||
"less": b_decr, "little": b_decr, "marginally": b_decr, "occasionally": b_decr, "partly": b_decr,
|
||||
"scarcely": b_decr, "slightly": b_decr, "somewhat": b_decr,
|
||||
"sort of": b_decr, "sorta": b_decr, "sortof": b_decr, "sort-of": b_decr}
|
||||
sentiments = []
|
||||
for item in wordsAndEmoticons:
|
||||
v = 0
|
||||
i = wordsAndEmoticons.index(item)
|
||||
if (i < len(wordsAndEmoticons)-1 and str(item).lower() == "kind" and \
|
||||
str(wordsAndEmoticons[i+1]).lower() == "of") or str(item).lower() in booster_dict:
|
||||
sentiments.append(v)
|
||||
continue
|
||||
item_lowercase = str(item).lower()
|
||||
if item_lowercase in word_valence_dict:
|
||||
#get the sentiment valence
|
||||
v = float(word_valence_dict[item_lowercase])
|
||||
|
||||
#check if sentiment laden word is in ALLCAPS (while others aren't)
|
||||
c_incr = 0.733 #(empirically derived mean sentiment intensity rating increase for using ALLCAPs to emphasize a word)
|
||||
if str(item).isupper() and isCap_diff:
|
||||
if v > 0: v += c_incr
|
||||
else: v -= c_incr
|
||||
|
||||
#check if the preceding words increase, decrease, or negate/nullify the valence
|
||||
def scalar_inc_dec(word, valence):
|
||||
scalar = 0.0
|
||||
word_lower = str(word).lower()
|
||||
if word_lower in booster_dict:
|
||||
scalar = booster_dict[word_lower]
|
||||
if valence < 0: scalar *= -1
|
||||
#check if booster/dampener word is in ALLCAPS (while others aren't)
|
||||
if str(word).isupper() and isCap_diff:
|
||||
if valence > 0: scalar += c_incr
|
||||
else: scalar -= c_incr
|
||||
return scalar
|
||||
n_scalar = -0.74
|
||||
if i > 0 and str(wordsAndEmoticons[i-1]).lower() not in word_valence_dict:
|
||||
s1 = scalar_inc_dec(wordsAndEmoticons[i-1], v)
|
||||
v = v+s1
|
||||
if negated([wordsAndEmoticons[i-1]]): v = v*n_scalar
|
||||
if i > 1 and str(wordsAndEmoticons[i-2]).lower() not in word_valence_dict:
|
||||
s2 = scalar_inc_dec(wordsAndEmoticons[i-2], v)
|
||||
if s2 != 0: s2 = s2*0.95
|
||||
v = v+s2
|
||||
# check for special use of 'never' as valence modifier instead of negation
|
||||
if wordsAndEmoticons[i-2] == "never" and (wordsAndEmoticons[i-1] == "so" or wordsAndEmoticons[i-1] == "this"):
|
||||
v = v*1.5
|
||||
# otherwise, check for negation/nullification
|
||||
elif negated([wordsAndEmoticons[i-2]]): v = v*n_scalar
|
||||
if i > 2 and str(wordsAndEmoticons[i-3]).lower() not in word_valence_dict:
|
||||
s3 = scalar_inc_dec(wordsAndEmoticons[i-3], v)
|
||||
if s3 != 0: s3 = s3*0.9
|
||||
v = v+s3
|
||||
# check for special use of 'never' as valence modifier instead of negation
|
||||
if wordsAndEmoticons[i-3] == "never" and \
|
||||
(wordsAndEmoticons[i-2] == "so" or wordsAndEmoticons[i-2] == "this") or \
|
||||
(wordsAndEmoticons[i-1] == "so" or wordsAndEmoticons[i-1] == "this"):
|
||||
v = v*1.25
|
||||
# otherwise, check for negation/nullification
|
||||
elif negated([wordsAndEmoticons[i-3]]): v = v*n_scalar
|
||||
|
||||
# check for special case idioms using a sentiment-laden keyword known to SAGE
|
||||
special_case_idioms = {"the shit": 3, "the bomb": 3, "bad ass": 1.5, "yeah right": -2,
|
||||
"cut the mustard": 2, "kiss of death": -1.5, "hand to mouth": -2}
|
||||
# future work: consider other sentiment-laden idioms
|
||||
#other_idioms = {"back handed": -2, "blow smoke": -2, "blowing smoke": -2, "upper hand": 1, "break a leg": 2,
|
||||
# "cooking with gas": 2, "in the black": 2, "in the red": -2, "on the ball": 2,"under the weather": -2}
|
||||
onezero = "{} {}".format(str(wordsAndEmoticons[i-1]), str(wordsAndEmoticons[i]))
|
||||
twoonezero = "{} {} {}".format(str(wordsAndEmoticons[i-2]), str(wordsAndEmoticons[i-1]), str(wordsAndEmoticons[i]))
|
||||
twoone = "{} {}".format(str(wordsAndEmoticons[i-2]), str(wordsAndEmoticons[i-1]))
|
||||
threetwoone = "{} {} {}".format(str(wordsAndEmoticons[i-3]), str(wordsAndEmoticons[i-2]), str(wordsAndEmoticons[i-1]))
|
||||
threetwo = "{} {}".format(str(wordsAndEmoticons[i-3]), str(wordsAndEmoticons[i-2]))
|
||||
if onezero in special_case_idioms: v = special_case_idioms[onezero]
|
||||
elif twoonezero in special_case_idioms: v = special_case_idioms[twoonezero]
|
||||
elif twoone in special_case_idioms: v = special_case_idioms[twoone]
|
||||
elif threetwoone in special_case_idioms: v = special_case_idioms[threetwoone]
|
||||
elif threetwo in special_case_idioms: v = special_case_idioms[threetwo]
|
||||
if len(wordsAndEmoticons)-1 > i:
|
||||
zeroone = "{} {}".format(str(wordsAndEmoticons[i]), str(wordsAndEmoticons[i+1]))
|
||||
if zeroone in special_case_idioms: v = special_case_idioms[zeroone]
|
||||
if len(wordsAndEmoticons)-1 > i+1:
|
||||
zeroonetwo = "{} {}".format(str(wordsAndEmoticons[i]), str(wordsAndEmoticons[i+1]), str(wordsAndEmoticons[i+2]))
|
||||
if zeroonetwo in special_case_idioms: v = special_case_idioms[zeroonetwo]
|
||||
|
||||
# check for booster/dampener bi-grams such as 'sort of' or 'kind of'
|
||||
if threetwo in booster_dict or twoone in booster_dict:
|
||||
v = v+b_decr
|
||||
|
||||
# check for negation case using "least"
|
||||
if i > 1 and str(wordsAndEmoticons[i-1]).lower() not in word_valence_dict \
|
||||
and str(wordsAndEmoticons[i-1]).lower() == "least":
|
||||
if (str(wordsAndEmoticons[i-2]).lower() != "at" and str(wordsAndEmoticons[i-2]).lower() != "very"):
|
||||
v = v*n_scalar
|
||||
elif i > 0 and str(wordsAndEmoticons[i-1]).lower() not in word_valence_dict \
|
||||
and str(wordsAndEmoticons[i-1]).lower() == "least":
|
||||
v = v*n_scalar
|
||||
sentiments.append(v)
|
||||
|
||||
# check for modification in sentiment due to contrastive conjunction 'but'
|
||||
if 'but' in wordsAndEmoticons or 'BUT' in wordsAndEmoticons:
|
||||
try: bi = wordsAndEmoticons.index('but')
|
||||
except: bi = wordsAndEmoticons.index('BUT')
|
||||
for s in sentiments:
|
||||
si = sentiments.index(s)
|
||||
if si < bi:
|
||||
sentiments.pop(si)
|
||||
sentiments.insert(si, s*0.5)
|
||||
elif si > bi:
|
||||
sentiments.pop(si)
|
||||
sentiments.insert(si, s*1.5)
|
||||
|
||||
if sentiments:
|
||||
sum_s = float(sum(sentiments))
|
||||
#print sentiments, sum_s
|
||||
|
||||
# check for added emphasis resulting from exclamation points (up to 4 of them)
|
||||
ep_count = str(text).count("!")
|
||||
if ep_count > 4: ep_count = 4
|
||||
ep_amplifier = ep_count*0.292 #(empirically derived mean sentiment intensity rating increase for exclamation points)
|
||||
if sum_s > 0: sum_s += ep_amplifier
|
||||
elif sum_s < 0: sum_s -= ep_amplifier
|
||||
|
||||
# check for added emphasis resulting from question marks (2 or 3+)
|
||||
qm_count = str(text).count("?")
|
||||
qm_amplifier = 0
|
||||
if qm_count > 1:
|
||||
if qm_count <= 3: qm_amplifier = qm_count*0.18
|
||||
else: qm_amplifier = 0.96
|
||||
if sum_s > 0: sum_s += qm_amplifier
|
||||
elif sum_s < 0: sum_s -= qm_amplifier
|
||||
|
||||
compound = normalize(sum_s)
|
||||
|
||||
# want separate positive versus negative sentiment scores
|
||||
pos_sum = 0.0
|
||||
neg_sum = 0.0
|
||||
neu_count = 0
|
||||
for sentiment_score in sentiments:
|
||||
if sentiment_score > 0:
|
||||
pos_sum += (float(sentiment_score) +1) # compensates for neutral words that are counted as 1
|
||||
if sentiment_score < 0:
|
||||
neg_sum += (float(sentiment_score) -1) # when used with math.fabs(), compensates for neutrals
|
||||
if sentiment_score == 0:
|
||||
neu_count += 1
|
||||
|
||||
if pos_sum > math.fabs(neg_sum): pos_sum += (ep_amplifier+qm_amplifier)
|
||||
elif pos_sum < math.fabs(neg_sum): neg_sum -= (ep_amplifier+qm_amplifier)
|
||||
|
||||
total = pos_sum + math.fabs(neg_sum) + neu_count
|
||||
pos = math.fabs(pos_sum / total)
|
||||
neg = math.fabs(neg_sum / total)
|
||||
neu = math.fabs(neu_count / total)
|
||||
|
||||
else:
|
||||
compound = 0.0; pos = 0.0; neg = 0.0; neu = 0.0
|
||||
|
||||
s = {"neg" : round(neg, 3),
|
||||
"neu" : round(neu, 3),
|
||||
"pos" : round(pos, 3),
|
||||
"compound" : round(compound, 4)}
|
||||
return s
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# --- examples -------
|
||||
sentences = [
|
||||
"VADER is smart, handsome, and funny.", # positive sentence example
|
||||
"VADER is smart, handsome, and funny!", # punctuation emphasis handled correctly (sentiment intensity adjusted)
|
||||
"VADER is very smart, handsome, and funny.", # booster words handled correctly (sentiment intensity adjusted)
|
||||
"VADER is VERY SMART, handsome, and FUNNY.", # emphasis for ALLCAPS handled
|
||||
"VADER is VERY SMART, handsome, and FUNNY!!!",# combination of signals - VADER appropriately adjusts intensity
|
||||
"VADER is VERY SMART, really handsome, and INCREDIBLY FUNNY!!!",# booster words & punctuation make this close to ceiling for score
|
||||
"The book was good.", # positive sentence
|
||||
"The book was kind of good.", # qualified positive sentence is handled correctly (intensity adjusted)
|
||||
"The plot was good, but the characters are uncompelling and the dialog is not great.", # mixed negation sentence
|
||||
"A really bad, horrible book.", # negative sentence with booster words
|
||||
"At least it isn't a horrible book.", # negated negative sentence with contraction
|
||||
":) and :D", # emoticons handled
|
||||
"", # an empty string is correctly handled
|
||||
"Today sux", # negative slang handled
|
||||
"Today sux!", # negative slang with punctuation emphasis handled
|
||||
"Today SUX!", # negative slang with capitalization emphasis
|
||||
"Today kinda sux! But I'll get by, lol" # mixed sentiment example with slang and constrastive conjunction "but"
|
||||
]
|
||||
paragraph = "It was one of the worst movies I've seen, despite good reviews. \
|
||||
Unbelievably bad acting!! Poor direction. VERY poor production. \
|
||||
The movie was bad. Very bad movie. VERY bad movie. VERY BAD movie. VERY BAD movie!"
|
||||
|
||||
from nltk import tokenize
|
||||
lines_list = tokenize.sent_tokenize(paragraph)
|
||||
sentences.extend(lines_list)
|
||||
|
||||
tricky_sentences = [
|
||||
"Most automated sentiment analysis tools are shit.",
|
||||
"VADER sentiment analysis is the shit.",
|
||||
"Sentiment analysis has never been good.",
|
||||
"Sentiment analysis with VADER has never been this good.",
|
||||
"Warren Beatty has never been so entertaining.",
|
||||
"I won't say that the movie is astounding and I wouldn't claim that the movie is too banal either.",
|
||||
"I like to hate Michael Bay films, but I couldn't fault this one",
|
||||
"It's one thing to watch an Uwe Boll film, but another thing entirely to pay for it",
|
||||
"The movie was too good",
|
||||
"This movie was actually neither that funny, nor super witty.",
|
||||
"This movie doesn't care about cleverness, wit or any other kind of intelligent humor.",
|
||||
"Those who find ugly meanings in beautiful things are corrupt without being charming.",
|
||||
"There are slow and repetitive parts, BUT it has just enough spice to keep it interesting.",
|
||||
"The script is not fantastic, but the acting is decent and the cinematography is EXCELLENT!",
|
||||
"Roger Dodger is one of the most compelling variations on this theme.",
|
||||
"Roger Dodger is one of the least compelling variations on this theme.",
|
||||
"Roger Dodger is at least compelling as a variation on the theme.",
|
||||
"they fall in love with the product",
|
||||
"but then it breaks",
|
||||
"usually around the time the 90 day warranty expires",
|
||||
"the twin towers collapsed today",
|
||||
"However, Mr. Carter solemnly argues, his client carried out the kidnapping under orders and in the ''least offensive way possible.''"
|
||||
]
|
||||
sentences.extend(tricky_sentences)
|
||||
for sentence in sentences:
|
||||
print(sentence)
|
||||
ss = sentiment(sentence)
|
||||
print("\t" + str(ss))
|
||||
|
||||
print("\n\n Done!")
|
74
community-plugins/sentiment-vader/vader_plugin.py
Normal file
74
community-plugins/sentiment-vader/vader_plugin.py
Normal file
@ -0,0 +1,74 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from vaderSentiment import sentiment
|
||||
from senpy.plugins import SentimentBox, SenpyPlugin
|
||||
from senpy.models import Results, Sentiment, Entry
|
||||
import logging
|
||||
|
||||
|
||||
class VaderSentimentPlugin(SentimentBox):
|
||||
'''
|
||||
Sentiment classifier using vaderSentiment module. Params accepted: Language: {en, es}. The output uses Marl ontology developed at GSI UPM for semantic web.
|
||||
'''
|
||||
name = "sentiment-vader"
|
||||
module = "sentiment-vader"
|
||||
author = "@icorcuera"
|
||||
version = "0.1.1"
|
||||
extra_params = {
|
||||
"language": {
|
||||
"description": "language of the input",
|
||||
"@id": "lang_rand",
|
||||
"aliases": ["language", "l"],
|
||||
"default": "auto",
|
||||
"options": ["es", "en", "auto"]
|
||||
},
|
||||
|
||||
"aggregate": {
|
||||
"description": "Show only the strongest sentiment (aggregate) or all sentiments",
|
||||
"aliases": ["aggregate","agg"],
|
||||
"options": [True, False],
|
||||
"default": False
|
||||
}
|
||||
|
||||
}
|
||||
requirements = {}
|
||||
|
||||
_VADER_KEYS = ['pos', 'neu', 'neg']
|
||||
binary = False
|
||||
|
||||
|
||||
def predict_one(self, features, activity):
|
||||
text_input = ' '.join(features)
|
||||
scores = sentiment(text_input)
|
||||
|
||||
sentiments = []
|
||||
for k in self._VADER_KEYS:
|
||||
sentiments.append(scores[k])
|
||||
|
||||
if activity.param('aggregate'):
|
||||
m = max(sentiments)
|
||||
sentiments = [k if k==m else None for k in sentiments]
|
||||
|
||||
return sentiments
|
||||
|
||||
test_cases = []
|
||||
|
||||
test_cases = [
|
||||
{
|
||||
'input': 'I am tired :(',
|
||||
'polarity': 'marl:Negative'
|
||||
},
|
||||
{
|
||||
'input': 'I love pizza :(',
|
||||
'polarity': 'marl:Positive'
|
||||
},
|
||||
{
|
||||
'input': 'I enjoy going to the cinema :)',
|
||||
'polarity': 'marl:Negative'
|
||||
},
|
||||
{
|
||||
'input': 'This cake is disgusting',
|
||||
'polarity': 'marl:Negative'
|
||||
},
|
||||
|
||||
]
|
7517
community-plugins/sentiment-vader/vader_sentiment_lexicon.txt
Normal file
7517
community-plugins/sentiment-vader/vader_sentiment_lexicon.txt
Normal file
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user