mirror of
https://github.com/gsi-upm/senpy
synced 2025-09-17 03:52:22 +00:00
Compare commits
26 Commits
0.8.6
...
0.8.8-fix1
Author | SHA1 | Date | |
---|---|---|---|
|
e31bca7016 | ||
|
7956d54c35 | ||
|
0f8d1dff69 | ||
|
236183593c | ||
|
7637498517 | ||
|
ce83fb3981 | ||
|
28f29d159a | ||
|
c803f60fd4 | ||
|
12eae16e37 | ||
|
f3372c27b6 | ||
|
b6de72a143 | ||
|
0f89b92457 | ||
|
ea91e3e4a4 | ||
|
f76b777b9f | ||
|
e112dd55ce | ||
|
60ef304108 | ||
|
7927cf1587 | ||
|
13cefbedfb | ||
|
4ba9535d56 | ||
|
e582ef07d4 | ||
|
ef40bdb545 | ||
|
e0b4c76238 | ||
|
14c86ec38c | ||
|
d3d05b3218 | ||
|
eababcadb0 | ||
|
7efece0224 |
@@ -1,18 +1,22 @@
|
||||
image: gsiupm/dockermake:latest
|
||||
|
||||
|
||||
# Uncomment if you want to use docker-in-docker
|
||||
# image: gsiupm/dockermake:latest
|
||||
# services:
|
||||
# - docker:dind
|
||||
# When using dind, it's wise to use the overlayfs driver for
|
||||
# improved performance.
|
||||
|
||||
variables:
|
||||
DOCKER_DRIVER: overlay
|
||||
DOCKERFILE: Dockerfile
|
||||
IMAGENAME: $CI_REGISTRY_IMAGE
|
||||
|
||||
stages:
|
||||
- test
|
||||
- push
|
||||
- deploy
|
||||
- clean
|
||||
|
||||
before_script:
|
||||
- docker login -u $HUB_USER -p $HUB_PASSWORD
|
||||
|
||||
.test: &test_definition
|
||||
stage: test
|
||||
script:
|
||||
@@ -28,11 +32,8 @@ test-2.7:
|
||||
variables:
|
||||
PYTHON_VERSION: "2.7"
|
||||
|
||||
|
||||
.image: &image_definition
|
||||
stage: push
|
||||
before_script:
|
||||
- docker login -u gitlab-ci-token -p $CI_BUILD_TOKEN $CI_REGISTRY
|
||||
script:
|
||||
- make -e push-$PYTHON_VERSION
|
||||
only:
|
||||
@@ -57,9 +58,44 @@ push-latest:
|
||||
- master
|
||||
- triggers
|
||||
|
||||
clean :
|
||||
push-github:
|
||||
stage: deploy
|
||||
script:
|
||||
- make -e push-github
|
||||
only:
|
||||
- master
|
||||
- triggers
|
||||
|
||||
deploy_pypi:
|
||||
stage: deploy
|
||||
script: # Configure the PyPI credentials, then push the package, and cleanup the creds.
|
||||
- echo "[server-login]" >> ~/.pypirc
|
||||
- echo "username=" ${PYPI_USER} >> ~/.pypirc
|
||||
- echo "password=" ${PYPI_PASSWORD} >> ~/.pypirc
|
||||
- make pip_upload
|
||||
- echo "" > ~/.pypirc && rm ~/.pypirc # If the above fails, this won't run.
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+([abc]\d*)?$/ # PEP-440 compliant version (tags)
|
||||
except:
|
||||
- branches
|
||||
|
||||
deploy:
|
||||
stage: deploy
|
||||
environment: test
|
||||
script:
|
||||
- make -e deploy
|
||||
only:
|
||||
- master
|
||||
|
||||
clean_docker :
|
||||
stage: clean
|
||||
script:
|
||||
- make -e clean
|
||||
only:
|
||||
- master
|
||||
when: manual
|
||||
|
||||
cleanup_pypirc:
|
||||
stage: clean
|
||||
when: always # this is important; run even if preceding stages failed.
|
||||
script:
|
||||
- rm -vf ~/.pypirc # we don't want to leave these around, but GitLab may clean up anyway.
|
||||
- docker logout
|
@@ -17,6 +17,6 @@ WORKDIR /usr/src/app
|
||||
COPY test-requirements.txt requirements.txt /usr/src/app/
|
||||
RUN pip install --use-wheel -r test-requirements.txt -r requirements.txt
|
||||
COPY . /usr/src/app/
|
||||
RUN pip install --no-deps --no-index .
|
||||
RUN pip install --no-index --no-deps --editable .
|
||||
|
||||
ENTRYPOINT ["python", "-m", "senpy", "-f", "/senpy-plugins/", "--host", "0.0.0.0"]
|
||||
|
73
Makefile
73
Makefile
@@ -1,14 +1,30 @@
|
||||
NAME=senpy
|
||||
VERSION=$(shell git describe --tags --dirty 2>/dev/null)
|
||||
GITHUB_REPO=git@github.com:gsi-upm/senpy.git
|
||||
|
||||
IMAGENAME=gsiupm/senpy
|
||||
IMAGEWTAG=$(IMAGENAME):$(VERSION)
|
||||
|
||||
PYVERSIONS=3.5 2.7
|
||||
PYMAIN=$(firstword $(PYVERSIONS))
|
||||
NAME=senpy
|
||||
REPO=gsiupm
|
||||
VERSION=$(shell git describe --tags --dirty 2>/dev/null)
|
||||
TARNAME=$(NAME)-$(VERSION).tar.gz
|
||||
IMAGENAME=$(REPO)/$(NAME)
|
||||
IMAGEWTAG=$(IMAGENAME):$(VERSION)
|
||||
|
||||
DEVPORT=5000
|
||||
|
||||
TARNAME=$(NAME)-$(VERSION).tar.gz
|
||||
action="test-${PYMAIN}"
|
||||
|
||||
KUBE_CA_PEM_FILE=""
|
||||
KUBE_URL=""
|
||||
KUBE_TOKEN=""
|
||||
KUBE_NAMESPACE=$(NAME)
|
||||
KUBECTL=docker run --rm -v $(KUBE_CA_PEM_FILE):/tmp/ca.pem -v $$PWD:/tmp/cwd/ -i lachlanevenson/k8s-kubectl --server="$(KUBE_URL)" --token="$(KUBE_TOKEN)" --certificate-authority="/tmp/ca.pem" -n $(KUBE_NAMESPACE)
|
||||
CI_REGISTRY=docker.io
|
||||
CI_REGISTRY_USER=gitlab
|
||||
CI_BUILD_TOKEN=""
|
||||
CI_COMMIT_REF_NAME=master
|
||||
|
||||
|
||||
|
||||
all: build run
|
||||
|
||||
.FORCE:
|
||||
@@ -18,7 +34,7 @@ version: .FORCE
|
||||
@echo $(VERSION)
|
||||
|
||||
yapf:
|
||||
yapf -i -r senpy
|
||||
yapf -i -r $(NAME)
|
||||
yapf -i -r tests
|
||||
|
||||
init:
|
||||
@@ -37,7 +53,7 @@ quick_build: $(addprefix build-, $(PYMAIN))
|
||||
build: $(addprefix build-, $(PYVERSIONS))
|
||||
|
||||
build-%: version Dockerfile-%
|
||||
docker build -t '$(IMAGEWTAG)-python$*' -f Dockerfile-$* .;
|
||||
docker build -t '$(IMAGEWTAG)-python$*' --cache-from $(IMAGENAME):python$* -f Dockerfile-$* .;
|
||||
|
||||
quick_test: $(addprefix test-,$(PYMAIN))
|
||||
|
||||
@@ -53,8 +69,8 @@ dev: dev-$(PYMAIN)
|
||||
|
||||
test-all: $(addprefix test-,$(PYVERSIONS))
|
||||
|
||||
test-%: build-%
|
||||
docker run --rm --entrypoint /usr/local/bin/python -w /usr/src/app $(IMAGEWTAG)-python$* setup.py test
|
||||
test-%:
|
||||
docker run --rm --entrypoint /usr/local/bin/python -w /usr/src/app $(IMAGENAME):python$* setup.py test
|
||||
|
||||
test: test-$(PYMAIN)
|
||||
|
||||
@@ -70,11 +86,14 @@ pip_test-%: sdist
|
||||
|
||||
pip_test: $(addprefix pip_test-,$(PYVERSIONS))
|
||||
|
||||
clean:
|
||||
@docker ps -a | awk '/$(REPO)\/$(NAME)/{ split($$2, vers, "-"); if(vers[0] != "${VERSION}"){ print $$1;}}' | xargs docker rm -v 2>/dev/null|| true
|
||||
@docker images | awk '/$(REPO)\/$(NAME)/{ split($$2, vers, "-"); if(vers[0] != "${VERSION}"){ print $$1":"$$2;}}' | xargs docker rmi 2>/dev/null|| true
|
||||
@docker rmi $(NAME)-dev 2>/dev/null || true
|
||||
pip_upload: pip_test
|
||||
python setup.py sdist upload ;
|
||||
|
||||
clean:
|
||||
@docker ps -a | grep $(IMAGENAME) | awk '{ split($$2, vers, "-"); if(vers[0] != "${VERSION}"){ print $$1;}}' | xargs docker rm -v 2>/dev/null|| true
|
||||
@docker images | grep $(IMAGENAME) | awk '{ split($$2, vers, "-"); if(vers[0] != "${VERSION}"){ print $$1":"$$2;}}' | xargs docker rmi 2>/dev/null|| true
|
||||
@docker stop $(addprefix $(NAME)-dev,$(PYVERSIONS)) 2>/dev/null || true
|
||||
@docker rm $(addprefix $(NAME)-dev,$(PYVERSIONS)) 2>/dev/null || true
|
||||
|
||||
git_commit:
|
||||
git commit -a
|
||||
@@ -85,20 +104,22 @@ git_tag:
|
||||
git_push:
|
||||
git push --tags origin master
|
||||
|
||||
pip_upload: pip_test
|
||||
python setup.py sdist upload ;
|
||||
|
||||
run-%: build-%
|
||||
docker run --rm -p $(DEVPORT):5000 -ti '$(IMAGEWTAG)-python$(PYMAIN)' --default-plugins
|
||||
|
||||
run: run-$(PYMAIN)
|
||||
|
||||
push-latest: build-$(PYMAIN)
|
||||
push-latest: $(addprefix push-latest-,$(PYVERSIONS))
|
||||
docker tag '$(IMAGEWTAG)-python$(PYMAIN)' '$(IMAGEWTAG)'
|
||||
docker tag '$(IMAGEWTAG)-python$(PYMAIN)' '$(IMAGENAME)'
|
||||
docker push '$(IMAGENAME):latest'
|
||||
docker push '$(IMAGEWTAG)'
|
||||
|
||||
push-latest-%: build-%
|
||||
docker tag $(IMAGENAME):$(VERSION)-python$* $(IMAGENAME):python$*
|
||||
docker push $(IMAGENAME):$(VERSION)-python$*
|
||||
docker push $(IMAGENAME):python$*
|
||||
|
||||
push-%: build-%
|
||||
docker push $(IMAGENAME):$(VERSION)-python$*
|
||||
|
||||
@@ -106,7 +127,21 @@ push: $(addprefix push-,$(PYVERSIONS))
|
||||
docker tag '$(IMAGEWTAG)-python$(PYMAIN)' '$(IMAGEWTAG)'
|
||||
docker push $(IMAGENAME):$(VERSION)
|
||||
|
||||
push-github:
|
||||
$(eval KEY_FILE := $(shell mktemp))
|
||||
@echo "$$GITHUB_DEPLOY_KEY" > $(KEY_FILE)
|
||||
@git remote rm github-deploy || true
|
||||
git remote add github-deploy $(GITHUB_REPO)
|
||||
@GIT_SSH_COMMAND="ssh -i $(KEY_FILE)" git push github-deploy $(CI_COMMIT_REF_NAME)
|
||||
rm $(KEY_FILE)
|
||||
|
||||
ci:
|
||||
gitlab-runner exec docker --docker-volumes /var/run/docker.sock:/var/run/docker.sock --env CI_PROJECT_NAME=$(NAME) ${action}
|
||||
|
||||
.PHONY: test test-% test-all build-% build test pip_test run yapf push-main push-% dev ci version .FORCE
|
||||
deploy:
|
||||
@$(KUBECTL) delete secret $(CI_REGISTRY) || true
|
||||
@$(KUBECTL) create secret docker-registry $(CI_REGISTRY) --docker-server=$(CI_REGISTRY) --docker-username=$(CI_REGISTRY_USER) --docker-email=$(CI_REGISTRY_USER) --docker-password=$(CI_BUILD_TOKEN)
|
||||
@$(KUBECTL) apply -f /tmp/cwd/k8s/
|
||||
|
||||
|
||||
.PHONY: test test-% test-all build-% build test pip_test run yapf push-main push-% dev ci version .FORCE deploy
|
||||
|
78
docs/bad-examples/results/example-analysis-as-id-FAIL.json
Normal file
78
docs/bad-examples/results/example-analysis-as-id-FAIL.json
Normal file
@@ -0,0 +1,78 @@
|
||||
{
|
||||
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
||||
"@id": "me:Result1",
|
||||
"@type": "results",
|
||||
"analysis": [
|
||||
"me:SAnalysis1",
|
||||
"me:SgAnalysis1",
|
||||
"me:EmotionAnalysis1",
|
||||
"me:NER1",
|
||||
{
|
||||
"@type": "analysis",
|
||||
"@id": "wrong"
|
||||
}
|
||||
],
|
||||
"entries": [
|
||||
{
|
||||
"@id": "http://micro.blog/status1",
|
||||
"@type": [
|
||||
"nif:RFC5147String",
|
||||
"nif:Context"
|
||||
],
|
||||
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
||||
"entities": [
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=5,13",
|
||||
"nif:beginIndex": 5,
|
||||
"nif:endIndex": 13,
|
||||
"nif:anchorOf": "Microsoft",
|
||||
"me:references": "http://dbpedia.org/page/Microsoft",
|
||||
"prov:wasGeneratedBy": "me:NER1"
|
||||
},
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=25,37",
|
||||
"nif:beginIndex": 25,
|
||||
"nif:endIndex": 37,
|
||||
"nif:anchorOf": "Windows Phone",
|
||||
"me:references": "http://dbpedia.org/page/Windows_Phone",
|
||||
"prov:wasGeneratedBy": "me:NER1"
|
||||
}
|
||||
],
|
||||
"suggestions": [
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=16,77",
|
||||
"nif:beginIndex": 16,
|
||||
"nif:endIndex": 77,
|
||||
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
|
||||
"prov:wasGeneratedBy": "me:SgAnalysis1"
|
||||
}
|
||||
],
|
||||
"sentiments": [
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=80,97",
|
||||
"nif:beginIndex": 80,
|
||||
"nif:endIndex": 97,
|
||||
"nif:anchorOf": "You'll be awesome.",
|
||||
"marl:hasPolarity": "marl:Positive",
|
||||
"marl:polarityValue": 0.9,
|
||||
"prov:wasGeneratedBy": "me:SAnalysis1"
|
||||
}
|
||||
],
|
||||
"emotions": [
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=0,109",
|
||||
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
||||
"prov:wasGeneratedBy": "me:EAnalysis1",
|
||||
"onyx:hasEmotion": [
|
||||
{
|
||||
"onyx:hasEmotionCategory": "wna:liking"
|
||||
},
|
||||
{
|
||||
"onyx:hasEmotionCategory": "wna:excitement"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
74
docs/examples/results/example-analysis-as-id.json
Normal file
74
docs/examples/results/example-analysis-as-id.json
Normal file
@@ -0,0 +1,74 @@
|
||||
{
|
||||
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
|
||||
"@id": "me:Result1",
|
||||
"@type": "results",
|
||||
"analysis": [
|
||||
"me:SAnalysis1",
|
||||
"me:SgAnalysis1",
|
||||
"me:EmotionAnalysis1",
|
||||
"me:NER1"
|
||||
],
|
||||
"entries": [
|
||||
{
|
||||
"@id": "http://micro.blog/status1",
|
||||
"@type": [
|
||||
"nif:RFC5147String",
|
||||
"nif:Context"
|
||||
],
|
||||
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
||||
"entities": [
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=5,13",
|
||||
"nif:beginIndex": 5,
|
||||
"nif:endIndex": 13,
|
||||
"nif:anchorOf": "Microsoft",
|
||||
"me:references": "http://dbpedia.org/page/Microsoft",
|
||||
"prov:wasGeneratedBy": "me:NER1"
|
||||
},
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=25,37",
|
||||
"nif:beginIndex": 25,
|
||||
"nif:endIndex": 37,
|
||||
"nif:anchorOf": "Windows Phone",
|
||||
"me:references": "http://dbpedia.org/page/Windows_Phone",
|
||||
"prov:wasGeneratedBy": "me:NER1"
|
||||
}
|
||||
],
|
||||
"suggestions": [
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=16,77",
|
||||
"nif:beginIndex": 16,
|
||||
"nif:endIndex": 77,
|
||||
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
|
||||
"prov:wasGeneratedBy": "me:SgAnalysis1"
|
||||
}
|
||||
],
|
||||
"sentiments": [
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=80,97",
|
||||
"nif:beginIndex": 80,
|
||||
"nif:endIndex": 97,
|
||||
"nif:anchorOf": "You'll be awesome.",
|
||||
"marl:hasPolarity": "marl:Positive",
|
||||
"marl:polarityValue": 0.9,
|
||||
"prov:wasGeneratedBy": "me:SAnalysis1"
|
||||
}
|
||||
],
|
||||
"emotions": [
|
||||
{
|
||||
"@id": "http://micro.blog/status1#char=0,109",
|
||||
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
|
||||
"prov:wasGeneratedBy": "me:EAnalysis1",
|
||||
"onyx:hasEmotion": [
|
||||
{
|
||||
"onyx:hasEmotionCategory": "wna:liking"
|
||||
},
|
||||
{
|
||||
"onyx:hasEmotionCategory": "wna:excitement"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
@@ -183,7 +183,11 @@ Training a classifier can be time time consuming. To avoid running the training
|
||||
def deactivate(self):
|
||||
self.close()
|
||||
|
||||
You can speficy a 'shelf_file' in your .senpy file. By default the ShelfMixin creates a file based on the plugin name and stores it in that plugin's folder.
|
||||
You can specify a 'shelf_file' in your .senpy file. By default the ShelfMixin creates a file based on the plugin name and stores it in that plugin's folder.
|
||||
|
||||
Shelves may get corrupted if the plugin exists unexpectedly.
|
||||
A corrupt shelf prevents the plugin from loading.
|
||||
If you do not care about the pickle, you can force your plugin to remove the corrupted file and load anyway, set the 'force_shelf' to True in your .senpy file.
|
||||
|
||||
I want to implement my service as a plugin, How i can do it?
|
||||
????????????????????????????????????????????????????????????
|
||||
|
7
k8s/README.md
Normal file
7
k8s/README.md
Normal file
@@ -0,0 +1,7 @@
|
||||
Deploy senpy to a kubernetes cluster.
|
||||
|
||||
Usage:
|
||||
|
||||
```
|
||||
kubectl apply -f . -n senpy
|
||||
```
|
25
k8s/senpy-deployment.yaml
Normal file
25
k8s/senpy-deployment.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: senpy-latest
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
role: senpy-latest
|
||||
app: test
|
||||
spec:
|
||||
containers:
|
||||
- name: senpy-latest
|
||||
image: registry.cluster.gsi.dit.upm.es/senpy/senpy:latest
|
||||
args:
|
||||
- "--default-plugins"
|
||||
resources:
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "1000m"
|
||||
ports:
|
||||
- name: web
|
||||
containerPort: 5000
|
14
k8s/senpy-ingress.yaml
Normal file
14
k8s/senpy-ingress.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: senpy-ingress
|
||||
spec:
|
||||
rules:
|
||||
- host: latest.senpy.cluster.gsi.dit.upm.es
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: senpy-latest
|
||||
servicePort: 5000
|
12
k8s/senpy-svc.yaml
Normal file
12
k8s/senpy-svc.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: senpy-latest
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 5000
|
||||
protocol: TCP
|
||||
selector:
|
||||
role: senpy-latest
|
@@ -1,6 +1,6 @@
|
||||
Flask>=0.10.1
|
||||
requests>=2.4.1
|
||||
gevent>=1.1rc4
|
||||
tornado>=4.4.3
|
||||
PyLD>=0.6.5
|
||||
six
|
||||
future
|
||||
|
@@ -22,15 +22,16 @@ the server.
|
||||
|
||||
from flask import Flask
|
||||
from senpy.extensions import Senpy
|
||||
from gevent.wsgi import WSGIServer
|
||||
from gevent.monkey import patch_all
|
||||
from tornado.wsgi import WSGIContainer
|
||||
from tornado.httpserver import HTTPServer
|
||||
from tornado.ioloop import IOLoop
|
||||
|
||||
|
||||
import logging
|
||||
import os
|
||||
import argparse
|
||||
import senpy
|
||||
|
||||
patch_all(thread=False)
|
||||
|
||||
SERVER_PORT = os.environ.get("PORT", 5000)
|
||||
|
||||
|
||||
@@ -77,7 +78,16 @@ def main():
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='Do not run a server, only install plugin dependencies')
|
||||
parser.add_argument(
|
||||
'--version',
|
||||
'-v',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='Output the senpy version and exit')
|
||||
args = parser.parse_args()
|
||||
if args.version:
|
||||
print('Senpy version {}'.format(senpy.__version__))
|
||||
exit(1)
|
||||
logging.basicConfig()
|
||||
rl = logging.getLogger()
|
||||
rl.setLevel(getattr(logging, args.level))
|
||||
@@ -92,9 +102,10 @@ def main():
|
||||
print('Server running on port %s:%d. Ctrl+C to quit' % (args.host,
|
||||
args.port))
|
||||
if not app.debug:
|
||||
http_server = WSGIServer((args.host, args.port), app)
|
||||
http_server = HTTPServer(WSGIContainer(app))
|
||||
http_server.listen(args.port, address=args.host)
|
||||
try:
|
||||
http_server.serve_forever()
|
||||
IOLoop.instance().start()
|
||||
except KeyboardInterrupt:
|
||||
print('Bye!')
|
||||
http_server.stop()
|
||||
|
@@ -1,6 +1,7 @@
|
||||
import requests
|
||||
import logging
|
||||
from . import models
|
||||
from .plugins import default_plugin_type
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -12,6 +13,10 @@ class Client(object):
|
||||
def analyse(self, input, method='GET', **kwargs):
|
||||
return self.request('/', method=method, input=input, **kwargs)
|
||||
|
||||
def plugins(self, ptype=default_plugin_type):
|
||||
resp = self.request(path='/plugins', plugin_type=ptype).plugins
|
||||
return {p.name: p for p in resp}
|
||||
|
||||
def request(self, path=None, method='GET', **params):
|
||||
url = '{}{}'.format(self.endpoint, path)
|
||||
response = requests.request(method=method, url=url, params=params)
|
||||
|
@@ -178,12 +178,12 @@ class Senpy(object):
|
||||
except (Error, Exception) as ex:
|
||||
if not isinstance(ex, Error):
|
||||
ex = Error(message=str(ex), status=500)
|
||||
logger.error('Error returning analysis result')
|
||||
logger.exception('Error returning analysis result')
|
||||
raise ex
|
||||
return resp
|
||||
|
||||
def _conversion_candidates(self, fromModel, toModel):
|
||||
candidates = self.filter_plugins(**{'@type': 'emotionConversionPlugin'})
|
||||
candidates = self.filter_plugins(plugin_type='emotionConversionPlugin')
|
||||
for name, candidate in candidates.items():
|
||||
for pair in candidate.onyx__doesConversion:
|
||||
logging.debug(pair)
|
||||
@@ -195,7 +195,7 @@ class Senpy(object):
|
||||
|
||||
def convert_emotions(self, resp, plugins, params):
|
||||
"""
|
||||
Conversion of all emotions in a response.
|
||||
Conversion of all emotions in a response **in place**.
|
||||
In addition to converting from one model to another, it has
|
||||
to include the conversion plugin to the analysis list.
|
||||
Needless to say, this is far from an elegant solution, but it works.
|
||||
@@ -220,7 +220,6 @@ class Senpy(object):
|
||||
e.parameters = params
|
||||
raise e
|
||||
newentries = []
|
||||
resp.analysis = set(resp.analysis)
|
||||
for i in resp.entries:
|
||||
if output == "full":
|
||||
newemotions = copy.deepcopy(i.emotions)
|
||||
@@ -229,7 +228,7 @@ class Senpy(object):
|
||||
for j in i.emotions:
|
||||
plugname = j['prov:wasGeneratedBy']
|
||||
candidate = candidates[plugname]
|
||||
resp.analysis.add(candidate.id)
|
||||
resp.analysis.append(candidate.id)
|
||||
for k in candidate.convert(j, fromModel, toModel, params):
|
||||
k.prov__wasGeneratedBy = candidate.id
|
||||
if output == 'nested':
|
||||
@@ -238,6 +237,7 @@ class Senpy(object):
|
||||
i.emotions = newemotions
|
||||
newentries.append(i)
|
||||
resp.entries = newentries
|
||||
resp.analysis = list(set(resp.analysis))
|
||||
|
||||
@property
|
||||
def default_plugin(self):
|
||||
@@ -303,6 +303,7 @@ class Senpy(object):
|
||||
else:
|
||||
th = Thread(target=act)
|
||||
th.start()
|
||||
return th
|
||||
|
||||
def deactivate_plugin(self, plugin_name, sync=False):
|
||||
try:
|
||||
@@ -327,6 +328,7 @@ class Senpy(object):
|
||||
else:
|
||||
th = Thread(target=deact)
|
||||
th.start()
|
||||
return th
|
||||
|
||||
@classmethod
|
||||
def validate_info(cls, info):
|
||||
@@ -417,33 +419,7 @@ class Senpy(object):
|
||||
return self._plugin_list
|
||||
|
||||
def filter_plugins(self, **kwargs):
|
||||
""" Filter plugins by different criteria """
|
||||
ptype = kwargs.pop('plugin_type', None)
|
||||
logger.debug('#' * 100)
|
||||
logger.debug('ptype {}'.format(ptype))
|
||||
if ptype:
|
||||
try:
|
||||
ptype = ptype[0].upper() + ptype[1:]
|
||||
pclass = getattr(plugins, ptype)
|
||||
logger.debug('Class: {}'.format(pclass))
|
||||
candidates = filter(lambda x: isinstance(x, pclass),
|
||||
self.plugins.values())
|
||||
except AttributeError:
|
||||
raise Error('{} is not a valid type'.format(ptype))
|
||||
else:
|
||||
candidates = self.plugins.values()
|
||||
|
||||
logger.debug(candidates)
|
||||
|
||||
def matches(plug):
|
||||
res = all(getattr(plug, k, None) == v for (k, v) in kwargs.items())
|
||||
logger.debug(
|
||||
"matching {} with {}: {}".format(plug.name, kwargs, res))
|
||||
return res
|
||||
|
||||
if kwargs:
|
||||
candidates = filter(matches, candidates)
|
||||
return {p.name: p for p in candidates}
|
||||
return plugins.pfilter(self.plugins, **kwargs)
|
||||
|
||||
@property
|
||||
def analysis_plugins(self):
|
||||
|
@@ -237,7 +237,10 @@ class BaseModel(SenpyMixin, dict):
|
||||
self.__setitem__(self._get_key(key), value)
|
||||
|
||||
def __delattr__(self, key):
|
||||
self.__delitem__(self._get_key(key))
|
||||
try:
|
||||
object.__delattr__(self, key)
|
||||
except AttributeError:
|
||||
self.__delitem__(self._get_key(key))
|
||||
|
||||
def _plain_dict(self):
|
||||
d = {k: v for (k, v) in self.items() if k[0] != "_"}
|
||||
|
@@ -9,6 +9,7 @@ import logging
|
||||
import tempfile
|
||||
import copy
|
||||
from .. import models
|
||||
from ..api import API_PARAMS
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -95,7 +96,12 @@ class ShelfMixin(object):
|
||||
if not hasattr(self, '_sh') or self._sh is None:
|
||||
self.__dict__['_sh'] = {}
|
||||
if os.path.isfile(self.shelf_file):
|
||||
self.__dict__['_sh'] = pickle.load(open(self.shelf_file, 'rb'))
|
||||
try:
|
||||
self.__dict__['_sh'] = pickle.load(open(self.shelf_file, 'rb'))
|
||||
except (IndexError, EOFError, pickle.UnpicklingError):
|
||||
logger.warning('{} has a corrupted shelf file!'.format(self.id))
|
||||
if not self.get('force_shelf', False):
|
||||
raise
|
||||
return self._sh
|
||||
|
||||
@sh.deleter
|
||||
@@ -117,3 +123,40 @@ class ShelfMixin(object):
|
||||
if hasattr(self, '_sh') and self._sh is not None:
|
||||
with open(self.shelf_file, 'wb') as f:
|
||||
pickle.dump(self._sh, f)
|
||||
|
||||
|
||||
default_plugin_type = API_PARAMS['plugin_type']['default']
|
||||
|
||||
|
||||
def pfilter(plugins, **kwargs):
|
||||
""" Filter plugins by different criteria """
|
||||
if isinstance(plugins, models.Plugins):
|
||||
plugins = plugins.plugins
|
||||
elif isinstance(plugins, dict):
|
||||
plugins = plugins.values()
|
||||
ptype = kwargs.pop('plugin_type', default_plugin_type)
|
||||
logger.debug('#' * 100)
|
||||
logger.debug('ptype {}'.format(ptype))
|
||||
if ptype:
|
||||
try:
|
||||
ptype = ptype[0].upper() + ptype[1:]
|
||||
pclass = globals()[ptype]
|
||||
logger.debug('Class: {}'.format(pclass))
|
||||
candidates = filter(lambda x: isinstance(x, pclass),
|
||||
plugins)
|
||||
except KeyError:
|
||||
raise models.Error('{} is not a valid type'.format(ptype))
|
||||
else:
|
||||
candidates = plugins
|
||||
|
||||
logger.debug(candidates)
|
||||
|
||||
def matches(plug):
|
||||
res = all(getattr(plug, k, None) == v for (k, v) in kwargs.items())
|
||||
logger.debug(
|
||||
"matching {} with {}: {}".format(plug.name, kwargs, res))
|
||||
return res
|
||||
|
||||
if kwargs:
|
||||
candidates = filter(matches, candidates)
|
||||
return {p.name: p for p in candidates}
|
||||
|
@@ -37,12 +37,12 @@
|
||||
"@type": "@id",
|
||||
"@container": "@set"
|
||||
},
|
||||
"plugins": {
|
||||
"@container": "@list"
|
||||
},
|
||||
"options": {
|
||||
"@container": "@set"
|
||||
},
|
||||
"plugins": {
|
||||
"@container": "@set"
|
||||
},
|
||||
"prov:wasGeneratedBy": {
|
||||
"@type": "@id"
|
||||
},
|
||||
|
@@ -10,8 +10,6 @@
|
||||
"items": {
|
||||
"$ref": "plugin.json"
|
||||
}
|
||||
},
|
||||
"@type": {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -18,10 +18,16 @@
|
||||
"type": "string"
|
||||
},
|
||||
"analysis": {
|
||||
"type": "array",
|
||||
"default": [],
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "analysis.json"
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "analysis.json"
|
||||
},{
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"entries": {
|
||||
|
23
tests/plugins/async_plugin/asyncplugin.py
Normal file
23
tests/plugins/async_plugin/asyncplugin.py
Normal file
@@ -0,0 +1,23 @@
|
||||
from senpy.plugins import AnalysisPlugin
|
||||
|
||||
import multiprocessing
|
||||
|
||||
|
||||
def _train(process_number):
|
||||
return process_number
|
||||
|
||||
|
||||
class AsyncPlugin(AnalysisPlugin):
|
||||
def _do_async(self, num_processes):
|
||||
pool = multiprocessing.Pool(processes=num_processes)
|
||||
values = pool.map(_train, range(num_processes))
|
||||
|
||||
return values
|
||||
|
||||
def activate(self):
|
||||
self.value = self._do_async(4)
|
||||
|
||||
def analyse_entry(self, entry, params):
|
||||
values = self._do_async(2)
|
||||
entry.async_values = values
|
||||
yield entry
|
8
tests/plugins/async_plugin/asyncplugin.senpy
Normal file
8
tests/plugins/async_plugin/asyncplugin.senpy
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
name: Async
|
||||
module: asyncplugin
|
||||
description: I am async
|
||||
author: "@balkian"
|
||||
version: '0.1'
|
||||
async: true
|
||||
extra_params: {}
|
@@ -4,18 +4,21 @@ try:
|
||||
except ImportError:
|
||||
from mock import patch
|
||||
|
||||
import json
|
||||
|
||||
from senpy.client import Client
|
||||
from senpy.models import Results, Error
|
||||
from senpy.models import Results, Plugins, Error
|
||||
from senpy.plugins import AnalysisPlugin, default_plugin_type
|
||||
|
||||
|
||||
class Call(dict):
|
||||
def __init__(self, obj):
|
||||
self.obj = obj.jsonld()
|
||||
self.obj = obj.serialize()
|
||||
self.status_code = 200
|
||||
self.content = self.json()
|
||||
|
||||
def json(self):
|
||||
return self.obj
|
||||
return json.loads(self.obj)
|
||||
|
||||
|
||||
class ModelsTest(TestCase):
|
||||
@@ -44,3 +47,19 @@ class ModelsTest(TestCase):
|
||||
method='GET',
|
||||
params={'input': 'hello',
|
||||
'algorithm': 'NONEXISTENT'})
|
||||
|
||||
def test_plugins(self):
|
||||
endpoint = 'http://dummy/'
|
||||
client = Client(endpoint)
|
||||
plugins = Plugins()
|
||||
p1 = AnalysisPlugin({'name': 'AnalysisP1', 'version': 0, 'description': 'No'})
|
||||
plugins.plugins = [p1, ]
|
||||
success = Call(plugins)
|
||||
with patch('requests.request', return_value=success) as patched:
|
||||
response = client.plugins()
|
||||
assert isinstance(response, dict)
|
||||
assert len(response) == 1
|
||||
assert "AnalysisP1" in response
|
||||
patched.assert_called_with(
|
||||
url=endpoint + '/plugins', method='GET',
|
||||
params={'plugin_type': default_plugin_type})
|
||||
|
@@ -167,7 +167,7 @@ class ExtensionsTest(TestCase):
|
||||
assert len(senpy.plugins) > 1
|
||||
|
||||
def test_convert_emotions(self):
|
||||
self.senpy.activate_all()
|
||||
self.senpy.activate_all(sync=True)
|
||||
plugin = Plugin({
|
||||
'id': 'imaginary',
|
||||
'onyx:usesEmotionModel': 'emoml:fsre-dimensions'
|
||||
@@ -205,3 +205,15 @@ class ExtensionsTest(TestCase):
|
||||
[plugin, ],
|
||||
params)
|
||||
assert len(r3.entries[0].emotions) == 1
|
||||
r3.jsonld()
|
||||
|
||||
# def test_async_plugin(self):
|
||||
# """ We should accept multiprocessing plugins with async=False"""
|
||||
# thread1 = self.senpy.activate_plugin("Async", sync=False)
|
||||
# thread1.join(timeout=1)
|
||||
# assert len(self.senpy.plugins['Async'].value) == 4
|
||||
|
||||
# resp = self.senpy.analyse(input='nothing', algorithm='Async')
|
||||
|
||||
# assert len(resp.entries[0].async_values) == 2
|
||||
# self.senpy.activate_plugin("Async", sync=True)
|
||||
|
@@ -109,13 +109,15 @@ class ModelsTest(TestCase):
|
||||
}
|
||||
}})
|
||||
c = p.jsonld()
|
||||
assert "info" not in c
|
||||
assert "repo" not in c
|
||||
assert "extra_params" in c
|
||||
logging.debug("Framed:")
|
||||
assert '@type' in c
|
||||
assert c['@type'] == 'plugin'
|
||||
assert 'info' not in c
|
||||
assert 'repo' not in c
|
||||
assert 'extra_params' in c
|
||||
logging.debug('Framed:')
|
||||
logging.debug(c)
|
||||
p.validate()
|
||||
assert "es" in c['extra_params']['none']['options']
|
||||
assert 'es' in c['extra_params']['none']['options']
|
||||
assert isinstance(c['extra_params']['none']['options'], list)
|
||||
|
||||
def test_str(self):
|
||||
@@ -158,14 +160,20 @@ class ModelsTest(TestCase):
|
||||
g = rdflib.Graph().parse(data=t, format='turtle')
|
||||
assert len(g) == len(triples)
|
||||
|
||||
def test_plugin_list(self):
|
||||
"""The plugin list should be of type \"plugins\""""
|
||||
plugs = Plugins()
|
||||
c = plugs.jsonld()
|
||||
assert '@type' in c
|
||||
assert c['@type'] == 'plugins'
|
||||
|
||||
def test_single_plugin(self):
|
||||
"""A response with a single plugin should still return a list"""
|
||||
plugs = Plugins()
|
||||
for i in range(10):
|
||||
p = Plugin({'id': str(i),
|
||||
'version': 0,
|
||||
'description': 'dummy'})
|
||||
plugs.plugins.append(p)
|
||||
p = Plugin({'id': str(1),
|
||||
'version': 0,
|
||||
'description': 'dummy'})
|
||||
plugs.plugins.append(p)
|
||||
assert isinstance(plugs.plugins, list)
|
||||
js = plugs.jsonld()
|
||||
assert isinstance(js['plugins'], list)
|
||||
|
@@ -83,7 +83,39 @@ class PluginsTest(TestCase):
|
||||
res2 = a.analyse(input=1)
|
||||
assert res2.entries[0].nif__isString == 2
|
||||
|
||||
def test_two(self):
|
||||
def test_corrupt_shelf(self):
|
||||
''' Reusing the values of a previous shelf '''
|
||||
|
||||
emptyfile = os.path.join(self.shelf_dir, "emptyfile")
|
||||
invalidfile = os.path.join(self.shelf_dir, "invalid_file")
|
||||
with open(emptyfile, 'w+b'), open(invalidfile, 'w+b') as inf:
|
||||
inf.write(b'ohno')
|
||||
|
||||
files = {emptyfile: ['empty file', (EOFError, IndexError)],
|
||||
invalidfile: ['invalid file', (pickle.UnpicklingError, IndexError)]}
|
||||
|
||||
for fn in files:
|
||||
with open(fn, 'rb') as f:
|
||||
msg, error = files[fn]
|
||||
a = ShelfDummyPlugin(info={
|
||||
'name': 'shelve',
|
||||
'version': 'test',
|
||||
'shelf_file': f.name
|
||||
})
|
||||
assert os.path.isfile(a.shelf_file)
|
||||
print('Shelf file: %s' % a.shelf_file)
|
||||
with self.assertRaises(error):
|
||||
a.sh['a'] = 'fromA'
|
||||
a.save()
|
||||
del a._sh
|
||||
assert os.path.isfile(a.shelf_file)
|
||||
a.force_shelf = True
|
||||
a.sh['a'] = 'fromA'
|
||||
a.save()
|
||||
b = pickle.load(f)
|
||||
assert b['a'] == 'fromA'
|
||||
|
||||
def test_reuse_shelf(self):
|
||||
''' Reusing the values of a previous shelf '''
|
||||
a = ShelfDummyPlugin(info={
|
||||
'name': 'shelve',
|
||||
|
Reference in New Issue
Block a user