1
0
mirror of https://github.com/gsi-upm/senpy synced 2025-10-18 17:28:28 +00:00

Compare commits

...

13 Commits

Author SHA1 Message Date
J. Fernando Sánchez
45421f4613 Small tweaks in docs 2019-04-04 18:54:15 +02:00
J. Fernando Sánchez
7aa69e3d02 restore hash function in js 2019-04-04 17:32:54 +02:00
J. Fernando Sánchez
a20252e4bd Update docs + notebooks 2019-04-04 17:32:38 +02:00
J. Fernando Sánchez
9758a2977f Release 0.20 2019-04-04 12:36:35 +02:00
J. Fernando Sánchez
8a516d927e Multiple changes in the API, schemas and UI
Check out the CHANGELOG.md file for more information
2019-04-04 10:00:24 +02:00
J. Fernando Sánchez
4ba30304a4 New schema for parameters
* Improve extra requirement handling
* New mechanism to handle parameters beforehand in chained
  calls, and the ability to get help on available parameters in chained
  calls (through `?help`).
* Redefined Analysis, to reflect the new ontology
* Add parameters as an entity in the schema
* Update examples to include analyses and parameters
* Add processing plugins, with an interface similar to analysis plugins
* Update tests
* Avoid duplication in split plugin

Closes #51

Squashed commit of the following:

commit d145a852e7
commit 6a1069780b
commit ca69bddc17
commit aa35e62a27
2018-12-07 18:30:05 +01:00
J. Fernando Sánchez
41aa142ce0 Refactored conversion and postprocessing 2018-11-22 17:27:43 +01:00
J. Fernando Sánchez
b48730137d Remove makefiles from auto push/pull 2018-11-06 17:12:54 +01:00
J. Fernando Sánchez
f1ec057b16 Add fetch to makefiles push 2018-11-06 17:02:59 +01:00
J. Fernando Sánchez
f6ca82cac8 Merge branch '56-exception-when-using-post' into 'master'
Replace algorithm list with a tuple

Closes #56

See merge request senpy/senpy!25
2018-11-06 14:56:00 +00:00
J. Fernando Sánchez
318acd5a71 Replace algorithm list with a tuple 2018-11-06 15:52:05 +01:00
J. Fernando Sánchez
c8f6f5613d Change CI to include make push
This replaces the makes for each python version with a simple `make push`.
It will also add a "main image" for each version, i.e. `gsiupm/senpy:1.0.0` in
addition to `gsiupm/senpy:1.0.0-python2.7` and `gsiupm/senpy:1.0.0-python3.5`.
2018-10-30 17:45:44 +01:00
J. Fernando Sánchez
748d1a00bd Fix bug in POST 2018-10-30 16:35:17 +01:00
126 changed files with 10072 additions and 46557 deletions

View File

@@ -21,39 +21,30 @@ before_script:
except:
- tags # Avoid unnecessary double testing
test-3.5:
test-3.6:
<<: *test_definition
variables:
PYTHON_VERSION: "3.5"
PYTHON_VERSION: "3.6"
test-2.7:
test-3.7:
<<: *test_definition
allow_failure: true
variables:
PYTHON_VERSION: "2.7"
PYTHON_VERSION: "3.7"
.image: &image_definition
push:
stage: push
script:
- make -e push-$PYTHON_VERSION
- make -e push
only:
- tags
- triggers
- fix-makefiles
push-3.5:
<<: *image_definition
variables:
PYTHON_VERSION: "3.5"
push-2.7:
<<: *image_definition
variables:
PYTHON_VERSION: "2.7"
push-latest:
<<: *image_definition
variables:
PYTHON_VERSION: latest
stage: push
script:
- make -e push-latest
only:
- master
- triggers
@@ -110,4 +101,3 @@ cleanup_py:
when: always # this is important; run even if preceding stages failed.
script:
- rm -vf ~/.pypirc # we don't want to leave these around, but GitLab may clean up anyway.
- docker logout

View File

@@ -22,7 +22,4 @@ else
rm $(KEY_FILE)
endif
push:: git-push
pull:: git-pull
.PHONY:: commit tag push git-push git-pull push-github
.PHONY:: commit tag git-push git-pull push-github

View File

@@ -1,17 +1,15 @@
makefiles-remote:
@git remote add makefiles ssh://git@lab.cluster.gsi.dit.upm.es:2200/docs/templates/makefiles.git 2>/dev/null || true
git ls-remote --exit-code makefiles 2> /dev/null || git remote add makefiles ssh://git@lab.cluster.gsi.dit.upm.es:2200/docs/templates/makefiles.git
makefiles-commit: makefiles-remote
git add -f .makefiles
git commit -em "Updated makefiles from ${NAME}"
makefiles-push:
git fetch makefiles $(NAME)
git subtree push --prefix=.makefiles/ makefiles $(NAME)
makefiles-pull: makefiles-remote
git subtree pull --prefix=.makefiles/ makefiles master --squash
pull:: makefiles-pull
push:: makefiles-push
.PHONY:: makefiles-remote makefiles-commit makefiles-push makefiles-pull pull push
.PHONY:: makefiles-remote makefiles-commit makefiles-push makefiles-pull

View File

@@ -29,7 +29,7 @@ build: $(addprefix build-, $(PYVERSIONS)) ## Build all images / python versions
docker tag $(IMAGEWTAG)-python$(PYMAIN) $(IMAGEWTAG)
build-%: version Dockerfile-% ## Build a specific version (e.g. build-2.7)
docker build -t '$(IMAGEWTAG)-python$*' -f Dockerfile-$* .;
docker build --pull -t '$(IMAGEWTAG)-python$*' -f Dockerfile-$* .;
dev-%: ## Launch a specific development environment using docker (e.g. dev-2.7)
@docker start $(NAME)-dev$* || (\

View File

@@ -6,7 +6,10 @@ services:
language: python
env:
- PYV=2.7
- PYV=3.4
- PYV=3.5
- PYV=3.6
- PYV=3.7
# - PYV=3.3 # Apt fails in this docker image
# run nosetests - Tests
script: make test-$PYV

59
CHANGELOG.md Normal file
View File

@@ -0,0 +1,59 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
### Fixed
* Restored hash changing function in `main.js`
## 0.20
### Added
* Objects can control the keys that will be used in `serialize`/`jsonld`/`as_dict` by specifying a list of keys in `terse_keys`.
e.g.
```python
>>> class MyModel(senpy.models.BaseModel):
... _terse_keys = ['visible']
... invisible = 5
... visible = 1
...
>>> m = MyModel(id='testing')
>>> m.jsonld()
{'invisible': 5, 'visible': 1, '@id': 'testing'}
>>> m.jsonld(verbose=False)
{'visible': 1}
```
* Configurable logging format.
* Added default terse keys for the most common classes (entry, sentiment, emotion...).
* Flag parameters (boolean) are set to true even when no value is added (e.g. `&verbose` is the same as `&verbose=true`).
* Plugin and parameter descriptions are now formatted with (showdown)[https://github.com/showdownjs/showdown].
* The web UI requests extra_parameters from the server. This is useful for pipelines. See #52
* First batch of semantic tests (using SPARQL)
* `Plugin.path()` method to get a file path from a relative path (using the senpy data folder)
### Changed
* `install_deps` now checks what requirements are already met before installing with pip.
* Help is now provided verbosely by default
* Other outputs are terse by default. This means some properties are now hidden unless verbose is set.
* `sentiments` and `emotions` are now `marl:hasOpinion` and `onyx:hasEmotionSet`, respectively.
* Nicer logging format
* Context aliases (e.g. `sentiments` and `emotions` properties) have been replaced with the original properties (e.g. `marl:hasOpinion` and `onyx:hasEmotionSet**), to use aliases, pass the `aliases** parameter.
* Several UI improvements
* Dedicated tab to show the list of plugins
* URLs in plugin descriptions are shown as links
* The format of the response is selected by clicking on a tab instead of selecting from a drop-down
* list of examples
* Bootstrap v4
* RandEmotion and RandSentiment are no longer included in the base set of plugins
* The `--plugin-folder` option can be used more than once, and every folder will be added to the app.
### Deprecated
### Removed
* Python 2.7 is no longer test or officially supported
### Fixed
* Plugin descriptions are now dedented when they are extracted from the docstring.
### Security

View File

@@ -5,7 +5,7 @@ IMAGENAME=gsiupm/senpy
# The first version is the main one (used for quick builds)
# See .makefiles/python.mk for more info
PYVERSIONS=3.5 2.7
PYVERSIONS=3.6 3.7
DEVPORT=5000

View File

@@ -1 +1 @@
web: python -m senpy --host 0.0.0.0 --port $PORT --default-plugins
web: python -m senpy --host 0.0.0.0 --port $PORT

View File

@@ -1,10 +1,19 @@
.. image:: img/header.png
:width: 100%
:target: http://demos.gsi.dit.upm.es/senpy
:target: http://senpy.gsi.upm.es
.. image:: https://travis-ci.org/gsi-upm/senpy.svg?branch=master
:target: https://travis-ci.org/gsi-upm/senpy
.. image:: https://lab.gsi.upm.es/senpy/senpy/badges/master/pipeline.svg
:target: https://lab.gsi.upm.es/senpy/senpy/commits/master
.. image:: https://lab.gsi.upm.es/senpy/senpy/badges/master/coverage.svg
:target: https://lab.gsi.upm.es/senpy/senpy/commits/master
.. image:: https://img.shields.io/pypi/l/requests.svg
:target: https://lab.gsi.upm.es/senpy/senpy/
Senpy lets you create sentiment analysis web services easily, fast and using a well known API.
As a bonus, senpy services use semantic vocabularies (e.g. `NIF <http://persistence.uni-leipzig.org/nlp2rdf/>`_, `Marl <http://www.gsi.dit.upm.es/ontologies/marl>`_, `Onyx <http://www.gsi.dit.upm.es/ontologies/onyx>`_) and formats (turtle, JSON-LD, xml-rdf).
@@ -12,7 +21,7 @@ Have you ever wanted to turn your sentiment analysis algorithms into a service?
With senpy, now you can.
It provides all the tools so you just have to worry about improving your algorithms:
`See it in action. <http://senpy.cluster.gsi.dit.upm.es/>`_
`See it in action. <http://senpy.gsi.upm.es/>`_
Installation
------------
@@ -38,9 +47,9 @@ If you want to install senpy globally, use sudo instead of the ``--user`` flag.
Docker Image
************
Build the image or use the pre-built one: ``docker run -ti -p 5000:5000 gsiupm/senpy --default-plugins``.
Build the image or use the pre-built one: ``docker run -ti -p 5000:5000 gsiupm/senpy``.
To add custom plugins, add a volume and tell senpy where to find the plugins: ``docker run -ti -p 5000:5000 -v <PATH OF PLUGINS>:/plugins gsiupm/senpy --default-plugins -f /plugins``
To add custom plugins, add a volume and tell senpy where to find the plugins: ``docker run -ti -p 5000:5000 -v <PATH OF PLUGINS>:/plugins gsiupm/senpy -f /plugins``
Developing
@@ -125,6 +134,16 @@ For more information, check out the `documentation <http://senpy.readthedocs.org
------------------------------------------------------------------------------------
Python 2.x compatibility
------------------------
Keeping compatibility between python 2.7 and 3.x is not always easy, especially for a framework that deals both with text and web requests.
Hence, starting February 2019, this project will no longer make efforts to support python 2.7, which will reach its end of life in 2020.
Most of the functionality should still work, and the compatibility shims will remain for now, but we cannot make any guarantees at this point.
Instead, the maintainers will focus their efforts on keeping the codebase compatible across different Python 3.3+ versions, including upcoming ones.
We apologize for the inconvenience.
Acknowledgement
---------------
This development has been partially funded by the European Union through the MixedEmotions Project (project number H2020 655632), as part of the `RIA ICT 15 Big data and Open Data Innovation and take-up` programme.

View File

@@ -1,4 +0,0 @@
import os
SERVER_PORT = os.environ.get("SERVER_PORT", 5000)
DEBUG = os.environ.get("DEBUG", True)

View File

@@ -24,6 +24,7 @@ I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " entr to watch for changes and continuously make HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@@ -49,6 +50,9 @@ help:
clean:
rm -rf $(BUILDDIR)/*
entr:
while true; do ag -g rst | entr -d make html; done
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,152 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Senpy in 1 minute\n",
"\n",
"This mini-tutorial only shows how to annotate with a service.\n",
"We will use the [demo server](http://senpy.gsi.upm.es), which runs some open source plugins for sentiment and emotion analysis."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Annotating with senpy is as simple as issuing an HTTP request to the API using your favourite tool.\n",
"This is just an example using curl:"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\r\n",
" \"@context\": \"http://senpy.gsi.upm.es/api/contexts/YXBpL3NlbnRpbWVudDE0MD8j\",\r\n",
" \"@type\": \"Results\",\r\n",
" \"entries\": [\r\n",
" {\r\n",
" \"@id\": \"prefix:\",\r\n",
" \"@type\": \"Entry\",\r\n",
" \"marl:hasOpinion\": [\r\n",
" {\r\n",
" \"@type\": \"Sentiment\",\r\n",
" \"marl:hasPolarity\": \"marl:Positive\",\r\n",
" \"prov:wasGeneratedBy\": \"prefix:Analysis_1554389334.6431913\"\r\n",
" }\r\n",
" ],\r\n",
" \"nif:isString\": \"Senpy is awesome\",\r\n",
" \"onyx:hasEmotionSet\": []\r\n",
" }\r\n",
" ]\r\n",
"}"
]
}
],
"source": [
"!curl \"http://senpy.gsi.upm.es/api/sentiment140\" --data-urlencode \"input=Senpy is awesome\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"**Congratulations**, you've used your first senpy service!"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Here is the equivalent using the `requests` library:"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\n",
" \"@context\": \"http://senpy.gsi.upm.es/api/contexts/YXBpL3NlbnRpbWVudDE0MD9pbnB1dD1TZW5weStpcythd2Vzb21lIw%3D%3D\",\n",
" \"@type\": \"Results\",\n",
" \"entries\": [\n",
" {\n",
" \"@id\": \"prefix:\",\n",
" \"@type\": \"Entry\",\n",
" \"marl:hasOpinion\": [\n",
" {\n",
" \"@type\": \"Sentiment\",\n",
" \"marl:hasPolarity\": \"marl:Positive\",\n",
" \"prov:wasGeneratedBy\": \"prefix:Analysis_1554389335.9803226\"\n",
" }\n",
" ],\n",
" \"nif:isString\": \"Senpy is awesome\",\n",
" \"onyx:hasEmotionSet\": []\n",
" }\n",
" ]\n",
"}\n"
]
}
],
"source": [
"import requests\n",
"res = requests.get('http://senpy.gsi.upm.es/api/sentiment140',\n",
" params={\"input\": \"Senpy is awesome\",})\n",
"print(res.text)"
]
}
],
"metadata": {
"anaconda-cloud": {},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.3"
},
"toc": {
"colors": {
"hover_highlight": "#DAA520",
"running_highlight": "#FF0000",
"selected_highlight": "#FFD700"
},
"moveMenuLeft": true,
"nav_menu": {
"height": "68px",
"width": "252px"
},
"navigate_menu": true,
"number_sections": true,
"sideBar": true,
"threshold": 4,
"toc_cell": false,
"toc_section_display": "block",
"toc_window_display": false
}
},
"nbformat": 4,
"nbformat_minor": 1
}

File diff suppressed because it is too large Load Diff

81
docs/Quickstart.ipynb Normal file
View File

@@ -0,0 +1,81 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Quickstart\n",
"\n",
"This short tutorial will teach you how to consume senpy services for several tasks, and how to take advantage of the features of the framework.\n",
"\n",
"In particular, it covers:\n",
"\n",
"* Annotating text with sentiment and emotion using interoperable services\n",
"* Switching to different services (service interoperability)\n",
"* Getting results in different formats (Turtle, XML, text...)\n",
"* Asking for specific emotion models (automatic model conversion)\n",
"* Listing available services in an endpoint\n",
"* Calling multiple services in the same request (Pipelines)\n",
"\n",
"These topics are split into two separate tutorials.\n",
"\n",
"Reading all the sections is not necessary, although it is encouraged in order to get a glimpse of all the features."
]
},
{
"cell_type": "markdown",
"metadata": {
"nbsphinx-toctree": {
"maxdepth": 2
}
},
"source": [
"* [Senpy in 1 minute](./Quickstart-1minute.ipynb) shows how to query the API.\n",
"* [Senpy in 10 minutes](./Quickstart-10minutes.ipynb) introduces basic sentiment and emotion analysis.\n",
"* [Senpy in 30 minutes](./Quickstart-30minutes.ipynb) builds on the previous and adds more advanced functionalities, such as emotion conversion, field selection and pipelines."
]
}
],
"metadata": {
"anaconda-cloud": {},
"celltoolbar": "Edit Metadata",
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.3"
},
"toc": {
"colors": {
"hover_highlight": "#DAA520",
"running_highlight": "#FF0000",
"selected_highlight": "#FFD700"
},
"moveMenuLeft": true,
"nav_menu": {
"height": "68px",
"width": "252px"
},
"navigate_menu": true,
"number_sections": true,
"sideBar": true,
"threshold": 4,
"toc_cell": false,
"toc_section_display": "block",
"toc_window_display": false
}
},
"nbformat": 4,
"nbformat_minor": 1
}

View File

@@ -1,317 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:05:31.465571Z",
"start_time": "2017-04-10T19:05:31.458282+02:00"
},
"deletable": true,
"editable": true
},
"source": [
"# Client"
]
},
{
"cell_type": "markdown",
"metadata": {
"collapsed": true,
"deletable": true,
"editable": true
},
"source": [
"The built-in senpy client allows you to query any Senpy endpoint. We will illustrate how to use it with the public demo endpoint, and then show you how to spin up your own endpoint using docker."
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"Demo Endpoint\n",
"-------------"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"To start using senpy, simply create a new Client and point it to your endpoint. In this case, the latest version of Senpy at GSI."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:29:12.827640Z",
"start_time": "2017-04-10T19:29:12.818617+02:00"
},
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"from senpy.client import Client\n",
"\n",
"c = Client('http://latest.senpy.cluster.gsi.dit.upm.es/api')\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"Now, let's use that client analyse some queries:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:29:14.011657Z",
"start_time": "2017-04-10T19:29:13.701808+02:00"
},
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"r = c.analyse('I like sugar!!', algorithm='sentiment140')\n",
"r"
]
},
{
"cell_type": "markdown",
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:08:19.616754Z",
"start_time": "2017-04-10T19:08:19.610767+02:00"
},
"deletable": true,
"editable": true
},
"source": [
"As you can see, that gave us the full JSON result. A more concise way to print it would be:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:29:14.854213Z",
"start_time": "2017-04-10T19:29:14.842068+02:00"
},
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"for entry in r.entries:\n",
" print('{} -> {}'.format(entry['text'], entry['sentiments'][0]['marl:hasPolarity']))"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"We can also obtain a list of available plugins with the client:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:29:16.245198Z",
"start_time": "2017-04-10T19:29:16.056545+02:00"
},
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"c.plugins()"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"Or, more concisely:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:29:17.663275Z",
"start_time": "2017-04-10T19:29:17.484623+02:00"
},
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"c.plugins().keys()"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"Local Endpoint\n",
"--------------"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"To run your own instance of senpy, just create a docker container with the latest Senpy image. Using `--default-plugins` you will get some extra plugins to start playing with the API."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:29:20.637539Z",
"start_time": "2017-04-10T19:29:19.938322+02:00"
},
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"!docker run -ti --name 'SenpyEndpoint' -d -p 6000:5000 gsiupm/senpy:0.8.6 --host 0.0.0.0 --default-plugins"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"To use this endpoint:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:29:21.263976Z",
"start_time": "2017-04-10T19:29:21.260595+02:00"
},
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"c_local = Client('http://127.0.0.1:6000/api')"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"That's all! After you are done with your analysis, stop the docker container:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2017-04-10T17:29:33.226686Z",
"start_time": "2017-04-10T19:29:22.392121+02:00"
},
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"!docker stop SenpyEndpoint\n",
"!docker rm SenpyEndpoint"
]
}
],
"metadata": {
"anaconda-cloud": {},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.0"
},
"toc": {
"colors": {
"hover_highlight": "#DAA520",
"running_highlight": "#FF0000",
"selected_highlight": "#FFD700"
},
"moveMenuLeft": true,
"nav_menu": {
"height": "68px",
"width": "252px"
},
"navigate_menu": true,
"number_sections": true,
"sideBar": true,
"threshold": 4,
"toc_cell": false,
"toc_section_display": "block",
"toc_window_display": false
}
},
"nbformat": 4,
"nbformat_minor": 1
}

View File

@@ -1,106 +0,0 @@
Client
======
Demo Endpoint
-------------
Import Client and send a request
.. code:: python
from senpy.client import Client
c = Client('http://latest.senpy.cluster.gsi.dit.upm.es/api')
r = c.analyse('I like Pizza', algorithm='sentiment140')
Print response
.. code:: python
for entry in r.entries:
print('{} -> {}'.format(entry['text'], entry['sentiments'][0]['marl:hasPolarity']))
.. parsed-literal::
I like Pizza -> marl:Positive
Obtain a list of available plugins
.. code:: python
for plugin in c.request('/plugins')['plugins']:
print(plugin['name'])
.. parsed-literal::
emoRand
rand
sentiment140
Local Endpoint
--------------
Run a docker container with Senpy image and default plugins
.. code::
docker run -ti --name 'SenpyEndpoint' -d -p 5000:5000 gsiupm/senpy:0.8.6 --host 0.0.0.0 --default-plugins
.. parsed-literal::
a0157cd98057072388bfebeed78a830da7cf0a796f4f1a3fd9188f9f2e5fe562
Import client and send a request to localhost
.. code:: python
c_local = Client('http://127.0.0.1:5000/api')
r = c_local.analyse('Hello world', algorithm='sentiment140')
Print response
.. code:: python
for entry in r.entries:
print('{} -> {}'.format(entry['text'], entry['sentiments'][0]['marl:hasPolarity']))
.. parsed-literal::
Hello world -> marl:Neutral
Obtain a list of available plugins deployed locally
.. code:: python
c_local.plugins().keys()
.. parsed-literal::
rand
sentiment140
emoRand
Stop the docker container
.. code:: python
!docker stop SenpyEndpoint
!docker rm SenpyEndpoint
.. parsed-literal::
SenpyEndpoint
SenpyEndpoint

View File

@@ -1,11 +0,0 @@
About
--------
If you use Senpy in your research, please cite `Senpy: A Pragmatic Linked Sentiment Analysis Framework <http://gsi.dit.upm.es/index.php/es/investigacion/publicaciones?view=publication&task=show&id=417>`__ (`BibTex <http://gsi.dit.upm.es/index.php/es/investigacion/publicaciones?controller=publications&task=export&format=bibtex&id=417>`__):
.. code-block:: text
Sánchez-Rada, J. F., Iglesias, C. A., Corcuera, I., & Araque, Ó. (2016, October).
Senpy: A Pragmatic Linked Sentiment Analysis Framework.
In Data Science and Advanced Analytics (DSAA),
2016 IEEE International Conference on (pp. 735-742). IEEE.

10
docs/advanced.rst Normal file
View File

@@ -0,0 +1,10 @@
Advanced usage
--------------
.. toctree::
:maxdepth: 1
server-cli
conversion
commandline
development

View File

@@ -25,7 +25,7 @@ NIF API
"@context":"http://127.0.0.1/api/contexts/Results.jsonld",
"@id":"_:Results_11241245.22",
"@type":"results"
"analysis": [
"activities": [
"plugins/sentiment-140_0.1"
],
"entries": [
@@ -73,7 +73,7 @@ NIF API
.. http:get:: /api/plugins
Returns a list of installed plugins.
**Example request**:
**Example request and response**:
.. sourcecode:: http
@@ -82,10 +82,6 @@ NIF API
Accept: application/json, text/javascript
**Example response**:
.. sourcecode:: http
{
"@id": "plugins/sentiment-140_0.1",
"@type": "sentimentPlugin",
@@ -143,19 +139,14 @@ NIF API
.. http:get:: /api/plugins/<pluginname>
Returns the information of a specific plugin.
**Example request**:
**Example request and response**:
.. sourcecode:: http
GET /api/plugins/rand/ HTTP/1.1
GET /api/plugins/sentiment-random/ HTTP/1.1
Host: localhost
Accept: application/json, text/javascript
**Example response**:
.. sourcecode:: http
{
"@context": "http://127.0.0.1/api/contexts/ExamplePlugin.jsonld",
"@id": "plugins/ExamplePlugin_0.1",

View File

@@ -1,5 +1,6 @@
API and Examples
################
API and vocabularies
####################
.. toctree::
vocabularies.rst

View File

@@ -2,7 +2,7 @@
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
"@id": "me:Result1",
"@type": "results",
"analysis": [
"activities": [
"me:SAnalysis1",
"me:SgAnalysis1",
"me:EmotionAnalysis1",

View File

@@ -2,17 +2,13 @@
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
"@id": "http://example.com#NIFExample",
"@type": "results",
"analysis": [
"activities": [
],
"entries": [
{
"@type": [
"nif:RFC5147String",
"nif:Context"
],
"nif:beginIndex": 0,
"nif:endIndex": 40,
"nif:isString": "My favourite actress is Natalie Portman"
"text": "An entry should have a nif:isString key"
}
]
}

View File

@@ -1,7 +1,8 @@
Command line
============
This video shows how to analyse text directly on the command line using the senpy tool.
Although the main use of senpy is to publish services, the tool can also be used locally to analyze text in the command line.
This is a short video demonstration:
.. image:: https://asciinema.org/a/9uwef1ghkjk062cw2t4mhzpyk.png
:width: 100%

View File

@@ -38,6 +38,8 @@ extensions = [
'sphinxcontrib.httpdomain',
'sphinx.ext.coverage',
'sphinx.ext.autosectionlabel',
'nbsphinx',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
@@ -54,7 +56,7 @@ master_doc = 'index'
# General information about the project.
project = u'Senpy'
copyright = u'2016, J. Fernando Sánchez'
copyright = u'2019, J. Fernando Sánchez'
description = u'A framework for sentiment and emotion analysis services'
# The version info for the project you're documenting, acts as replacement for
@@ -79,7 +81,9 @@ language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
exclude_patterns = ['_build', '**.ipynb_checkpoints']
# The reST default role (used for this markup: `text`) to use for all
# documents.
@@ -286,3 +290,13 @@ texinfo_documents = [
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
nbsphinx_prolog = """
.. note:: This page has been auto-generated from a Jupyter notebook using nbsphinx_.
The original source is available at: https://github.com/gsi-upm/senpy/tree/master/docs//{{ env.doc2path(env.docname, base=None) }}
.. _nbsphinx: https://nbsphinx.readthedocs.io/
----
"""

View File

@@ -7,9 +7,9 @@ Senpy includes experimental support for emotion/sentiment conversion plugins.
Use
===
Consider the original query: http://127.0.0.1:5000/api/?i=hello&algo=emoRand
Consider the original query: http://127.0.0.1:5000/api/?i=hello&algo=emotion-random
The requested plugin (emoRand) returns emotions using Ekman's model (or big6 in EmotionML):
The requested plugin (emotion-random) returns emotions using Ekman's model (or big6 in EmotionML):
.. code:: json
@@ -21,14 +21,14 @@ The requested plugin (emoRand) returns emotions using Ekman's model (or big6 in
"@type": "emotion",
"onyx:hasEmotionCategory": "emoml:big6anger"
},
"prov:wasGeneratedBy": "plugins/emoRand_0.1"
"prov:wasGeneratedBy": "plugins/emotion-random_0.1"
}
To get these emotions in VAD space (FSRE dimensions in EmotionML), we'd do this:
http://127.0.0.1:5000/api/?i=hello&algo=emoRand&emotionModel=emoml:fsre-dimensions
http://127.0.0.1:5000/api/?i=hello&algo=emotion-random&emotionModel=emoml:fsre-dimensions
This call, provided there is a valid conversion plugin from Ekman's to VAD, would return something like this:
@@ -42,7 +42,7 @@ This call, provided there is a valid conversion plugin from Ekman's to VAD, woul
"@type": "emotion",
"onyx:hasEmotionCategory": "emoml:big6anger"
},
"prov:wasGeneratedBy": "plugins/emoRand_0.1"
"prov:wasGeneratedBy": "plugins/emotion-random.1"
}, {
"@type": "emotionSet",
"onyx:hasEmotion": {
@@ -69,7 +69,7 @@ It is also possible to get the original emotion nested within the new converted
"@type": "emotion",
"onyx:hasEmotionCategory": "emoml:big6anger"
},
"prov:wasGeneratedBy": "plugins/emoRand_0.1"
"prov:wasGeneratedBy": "plugins/emotion-random.1"
"onyx:wasDerivedFrom": {
"@type": "emotionSet",
"onyx:hasEmotion": {

View File

@@ -1,16 +1,13 @@
Demo
----
There is a demo available on http://senpy.cluster.gsi.dit.upm.es/, where you can test a serie of different plugins.
There is a demo available on http://senpy.gsi.upm.es/, where you can test a live instance of Senpy, with several open source plugins.
You can use the playground (a web interface) or make HTTP requests to the service API.
.. image:: senpy-playground.png
:height: 400px
.. image:: playground-0.20.png
:target: http://senpy.gsi.upm.es
:width: 800px
:scale: 100 %
:align: center
Plugins Demo
============
The source code and description of the plugins used in the demo is available here: https://lab.cluster.gsi.dit.upm.es/senpy/senpy-plugins-community/.
The source code and description of the plugins used in the demo are available here: https://github.com/gsi-upm/senpy-plugins-community/.

27
docs/development.rst Normal file
View File

@@ -0,0 +1,27 @@
Developing new services
-----------------------
Developing web services can be hard.
To illustrate it, the figure below summarizes the typical features in a text analysis service.
.. image:: senpy-framework.png
:width: 60%
:align: center
Senpy implements all the common blocks, so developers can focus on what really matters: great analysis algorithms that solve real problems.
Among other things, Senpy takes care of these tasks:
* Interfacing with the user: parameter validation, error handling.
* Formatting: JSON-LD, Turtle/n-triples input and output, or simple text input
* Linked Data: senpy results are semantically annotated, using a series of well established vocabularies, and sane default URIs.
* User interface: a web UI where users can explore your service and test different settings
* A client to interact with the service. Currently only available in Python.
You only need to provide the algorithm to turn a piece of text into an annotation
Sharing your sentiment analysis with the world has never been easier!
.. toctree::
:maxdepth: 1
plugins-quickstart
plugins-faq

BIN
docs/evaluation-results.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 76 KiB

View File

@@ -1,5 +1,6 @@
Examples
------
--------
All the examples in this page use the :download:`the main schema <_static/schemas/definitions.json>`.
Simple NIF annotation
@@ -17,6 +18,7 @@ Sentiment Analysis
.....................
Description
,,,,,,,,,,,
This annotation corresponds to the sentiment analysis of an input. The example shows the sentiment represented according to Marl format.
The sentiments detected are contained in the Sentiments array with their related part of the text.

View File

@@ -2,11 +2,22 @@
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
"@id": "me:Result1",
"@type": "results",
"analysis": [
"me:SAnalysis1",
"me:SgAnalysis1",
"me:EmotionAnalysis1",
"me:NER1"
"activities": [
{
"@id": "_:SAnalysis1_Activity",
"@type": "marl:SentimentAnalysis",
"prov:wasAssociatedWith": "me:SAnalysis1"
},
{
"@id": "_:EmotionAnalysis1_Activity",
"@type": "onyx:EmotionAnalysis",
"prov:wasAssociatedWith": "me:EmotionAnalysis1"
},
{
"@id": "_:NER1_Activity",
"@type": "me:NER",
"prov:wasAssociatedWith": "me:NER1"
}
],
"entries": [
{
@@ -23,7 +34,7 @@
"nif:endIndex": 13,
"nif:anchorOf": "Microsoft",
"me:references": "http://dbpedia.org/page/Microsoft",
"prov:wasGeneratedBy": "me:NER1"
"prov:wasGeneratedBy": "_:NER1_Activity"
},
{
"@id": "http://micro.blog/status1#char=25,37",
@@ -31,7 +42,7 @@
"nif:endIndex": 37,
"nif:anchorOf": "Windows Phone",
"me:references": "http://dbpedia.org/page/Windows_Phone",
"prov:wasGeneratedBy": "me:NER1"
"prov:wasGeneratedBy": "_:NER1_Activity"
}
],
"suggestions": [
@@ -40,7 +51,7 @@
"nif:beginIndex": 16,
"nif:endIndex": 77,
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
"prov:wasGeneratedBy": "me:SgAnalysis1"
"prov:wasGeneratedBy": "_:SgAnalysis1_Activity"
}
],
"sentiments": [
@@ -51,14 +62,14 @@
"nif:anchorOf": "You'll be awesome.",
"marl:hasPolarity": "marl:Positive",
"marl:polarityValue": 0.9,
"prov:wasGeneratedBy": "me:SAnalysis1"
"prov:wasGeneratedBy": "_:SgAnalysis1_Activity"
}
],
"emotions": [
{
"@id": "http://micro.blog/status1#char=0,109",
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
"prov:wasGeneratedBy": "me:EAnalysis1",
"prov:wasGeneratedBy": "_:EmotionAnalysis1_Activity",
"onyx:hasEmotion": [
{
"onyx:hasEmotionCategory": "wna:liking"

View File

@@ -1,78 +0,0 @@
{
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
"@id": "me:Result1",
"@type": "results",
"analysis": [
"me:SAnalysis1",
"me:SgAnalysis1",
"me:EmotionAnalysis1",
"me:NER1",
{
"@type": "analysis",
"@id": "anonymous"
}
],
"entries": [
{
"@id": "http://micro.blog/status1",
"@type": [
"nif:RFC5147String",
"nif:Context"
],
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
"entities": [
{
"@id": "http://micro.blog/status1#char=5,13",
"nif:beginIndex": 5,
"nif:endIndex": 13,
"nif:anchorOf": "Microsoft",
"me:references": "http://dbpedia.org/page/Microsoft",
"prov:wasGeneratedBy": "me:NER1"
},
{
"@id": "http://micro.blog/status1#char=25,37",
"nif:beginIndex": 25,
"nif:endIndex": 37,
"nif:anchorOf": "Windows Phone",
"me:references": "http://dbpedia.org/page/Windows_Phone",
"prov:wasGeneratedBy": "me:NER1"
}
],
"suggestions": [
{
"@id": "http://micro.blog/status1#char=16,77",
"nif:beginIndex": 16,
"nif:endIndex": 77,
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
"prov:wasGeneratedBy": "me:SgAnalysis1"
}
],
"sentiments": [
{
"@id": "http://micro.blog/status1#char=80,97",
"nif:beginIndex": 80,
"nif:endIndex": 97,
"nif:anchorOf": "You'll be awesome.",
"marl:hasPolarity": "marl:Positive",
"marl:polarityValue": 0.9,
"prov:wasGeneratedBy": "me:SAnalysis1"
}
],
"emotions": [
{
"@id": "http://micro.blog/status1#char=0,109",
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
"prov:wasGeneratedBy": "me:EAnalysis1",
"onyx:hasEmotion": [
{
"onyx:hasEmotionCategory": "wna:liking"
},
{
"onyx:hasEmotionCategory": "wna:excitement"
}
]
}
]
}
]
}

View File

@@ -1,19 +1,18 @@
{
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
"@id": "http://example.com#NIFExample",
"@type": "results",
"analysis": [
],
"entries": [
{
"@id": "http://example.org#char=0,40",
"@type": [
"nif:RFC5147String",
"nif:Context"
],
"nif:beginIndex": 0,
"nif:endIndex": 40,
"nif:isString": "My favourite actress is Natalie Portman"
}
]
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
"@id": "me:Result1",
"@type": "results",
"activities": [ ],
"entries": [
{
"@id": "http://example.org#char=0,40",
"@type": [
"nif:RFC5147String",
"nif:Context"
],
"nif:beginIndex": 0,
"nif:endIndex": 40,
"nif:isString": "My favourite actress is Natalie Portman"
}
]
}

View File

@@ -1,88 +1,100 @@
{
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
"@id": "me:Result1",
"@type": "results",
"analysis": [
{
"@id": "me:SAnalysis1",
"@type": "marl:SentimentAnalysis",
"marl:maxPolarityValue": 1,
"marl:minPolarityValue": 0
},
{
"@id": "me:SgAnalysis1",
"@type": "me:SuggestionAnalysis"
},
{
"@id": "me:EmotionAnalysis1",
"@type": "me:EmotionAnalysis"
},
{
"@id": "me:NER1",
"@type": "me:NER"
}
],
"entries": [
{
"@id": "http://micro.blog/status1",
"@type": [
"nif:RFC5147String",
"nif:Context"
],
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
"entities": [
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
"@id": "me:Result1",
"@type": "results",
"activities": [
{
"@id": "http://micro.blog/status1#char=5,13",
"nif:beginIndex": 5,
"nif:endIndex": 13,
"nif:anchorOf": "Microsoft",
"me:references": "http://dbpedia.org/page/Microsoft",
"prov:wasGeneratedBy": "me:NER1"
"@id": "_:SAnalysis1_Activity",
"@type": "marl:SentimentAnalysis",
"prov:wasAssociatedWith": "me:SentimentAnalysis",
"prov:used": [
{
"name": "marl:maxPolarityValue",
"prov:value": "1"
},
{
"name": "marl:minPolarityValue",
"prov:value": "0"
}
]
},
{
"@id": "http://micro.blog/status1#char=25,37",
"nif:beginIndex": 25,
"nif:endIndex": 37,
"nif:anchorOf": "Windows Phone",
"me:references": "http://dbpedia.org/page/Windows_Phone",
"prov:wasGeneratedBy": "me:NER1"
}
],
"suggestions": [
"@id": "_:SgAnalysis1_Activity",
"prov:wasAssociatedWith": "me:SgAnalysis1",
"@type": "me:SuggestionAnalysis"
},
{
"@id": "http://micro.blog/status1#char=16,77",
"nif:beginIndex": 16,
"nif:endIndex": 77,
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
"prov:wasGeneratedBy": "me:SgAnalysis1"
}
],
"sentiments": [
"@id": "_:EmotionAnalysis1_Activity",
"@type": "me:EmotionAnalysis",
"prov:wasAssociatedWith": "me:EmotionAnalysis1"
},
{
"@id": "http://micro.blog/status1#char=80,97",
"nif:beginIndex": 80,
"nif:endIndex": 97,
"nif:anchorOf": "You'll be awesome.",
"marl:hasPolarity": "marl:Positive",
"marl:polarityValue": 0.9,
"prov:wasGeneratedBy": "me:SAnalysis1"
"@id": "_:NER1_Activity",
"@type": "me:NER",
"prov:wasAssociatedWith": "me:EmotionNER1"
}
],
"emotions": [
],
"entries": [
{
"@id": "http://micro.blog/status1#char=0,109",
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
"prov:wasGeneratedBy": "me:EAnalysis1",
"onyx:hasEmotion": [
{
"onyx:hasEmotionCategory": "wna:liking"
},
{
"onyx:hasEmotionCategory": "wna:excitement"
}
]
"@id": "http://micro.blog/status1",
"@type": [
"nif:RFC5147String",
"nif:Context"
],
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
"entities": [
{
"@id": "http://micro.blog/status1#char=5,13",
"nif:beginIndex": 5,
"nif:endIndex": 13,
"nif:anchorOf": "Microsoft",
"me:references": "http://dbpedia.org/page/Microsoft",
"prov:wasGeneratedBy": "me:NER1"
},
{
"@id": "http://micro.blog/status1#char=25,37",
"nif:beginIndex": 25,
"nif:endIndex": 37,
"nif:anchorOf": "Windows Phone",
"me:references": "http://dbpedia.org/page/Windows_Phone",
"prov:wasGeneratedBy": "me:NER1"
}
],
"suggestions": [
{
"@id": "http://micro.blog/status1#char=16,77",
"nif:beginIndex": 16,
"nif:endIndex": 77,
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
"prov:wasGeneratedBy": "me:SgAnalysis1"
}
],
"sentiments": [
{
"@id": "http://micro.blog/status1#char=80,97",
"nif:beginIndex": 80,
"nif:endIndex": 97,
"nif:anchorOf": "You'll be awesome.",
"marl:hasPolarity": "marl:Positive",
"marl:polarityValue": 0.9,
"prov:wasGeneratedBy": "me:SAnalysis1"
}
],
"emotions": [
{
"@id": "http://micro.blog/status1#char=0,109",
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
"prov:wasGeneratedBy": "me:EAnalysis1",
"onyx:hasEmotion": [
{
"onyx:hasEmotionCategory": "wna:liking"
},
{
"onyx:hasEmotionCategory": "wna:excitement"
}
]
}
]
}
]
}
]
]
}

View File

@@ -2,10 +2,11 @@
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
"@id": "me:Result1",
"@type": "results",
"analysis": [
"activities": [
{
"@id": "me:EmotionAnalysis1",
"@type": "onyx:EmotionAnalysis"
"@id": "me:EmotionAnalysis1_Activity",
"@type": "me:EmotionAnalysis1",
"prov:wasAssociatedWith": "me:EmotionAnalysis1"
}
],
"entries": [
@@ -26,7 +27,7 @@
{
"@id": "http://micro.blog/status1#char=0,109",
"nif:anchorOf": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
"prov:wasGeneratedBy": "me:EmotionAnalysis1",
"prov:wasGeneratedBy": "_:EmotionAnalysis1_Activity",
"onyx:hasEmotion": [
{
"onyx:hasEmotionCategory": "wna:liking"

View File

@@ -2,10 +2,11 @@
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
"@id": "me:Result1",
"@type": "results",
"analysis": [
"activities": [
{
"@id": "me:NER1",
"@type": "me:NERAnalysis"
"@id": "_:NER1_Activity",
"@type": "me:NERAnalysis",
"prov:wasAssociatedWith": "me:NER1"
}
],
"entries": [

View File

@@ -7,11 +7,17 @@
],
"@id": "me:Result1",
"@type": "results",
"analysis": [
"activities": [
{
"@id": "me:HesamsAnalysis",
"@id": "me:HesamsAnalysis_Activity",
"@type": "onyx:EmotionAnalysis",
"onyx:usesEmotionModel": "emovoc:pad-dimensions"
"prov:wasAssociatedWith": "me:HesamsAnalysis",
"prov:used": [
{
"name": "emotion-model",
"prov:value": "emovoc:pad-dimensions"
}
]
}
],
"entries": [
@@ -32,7 +38,7 @@
{
"@id": "Entry1#char=0,21",
"nif:anchorOf": "This is a test string",
"prov:wasGeneratedBy": "me:HesamAnalysis",
"prov:wasGeneratedBy": "_:HesamAnalysis_Activity",
"onyx:hasEmotion": [
{
"emovoc:pleasure": 0.5,

View File

@@ -2,12 +2,11 @@
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
"@id": "me:Result1",
"@type": "results",
"analysis": [
"activities": [
{
"@id": "me:SAnalysis1",
"@id": "_:SAnalysis1_Activity",
"@type": "marl:SentimentAnalysis",
"marl:maxPolarityValue": 1,
"marl:minPolarityValue": 0
"prov:wasAssociatedWith": "me:SAnalysis1"
}
],
"entries": [
@@ -30,7 +29,7 @@
"nif:anchorOf": "You'll be awesome.",
"marl:hasPolarity": "marl:Positive",
"marl:polarityValue": 0.9,
"prov:wasGeneratedBy": "me:SAnalysis1"
"prov:wasGeneratedBy": "_:SAnalysis1_Activity"
}
],
"emotionSets": [

View File

@@ -2,8 +2,12 @@
"@context": "http://mixedemotions-project.eu/ns/context.jsonld",
"@id": "me:Result1",
"@type": "results",
"analysis": [
"me:SgAnalysis1"
"activities": [
{
"@id": "_:SgAnalysis1_Activity",
"@type": "me:SuggestionAnalysis",
"prov:wasAssociatedWith": "me:SgAnalysis1"
}
],
"entries": [
{
@@ -12,7 +16,6 @@
"nif:RFC5147String",
"nif:Context"
],
"prov:wasGeneratedBy": "me:SAnalysis1",
"nif:isString": "Dear Microsoft, put your Windows Phone on your newest #open technology program. You'll be awesome. #opensource",
"entities": [
],
@@ -22,7 +25,7 @@
"nif:beginIndex": 16,
"nif:endIndex": 77,
"nif:anchorOf": "put your Windows Phone on your newest #open technology program",
"prov:wasGeneratedBy": "me:SgAnalysis1"
"prov:wasGeneratedBy": "_:SgAnalysis1_Activity"
}
],
"sentiments": [

View File

@@ -1,35 +1,35 @@
Welcome to Senpy's documentation!
=================================
.. image:: https://readthedocs.org/projects/senpy/badge/?version=latest
:target: http://senpy.readthedocs.io/en/latest/
.. image:: https://badge.fury.io/py/senpy.svg
:target: https://badge.fury.io/py/senpy
.. image:: https://lab.cluster.gsi.dit.upm.es/senpy/senpy/badges/master/build.svg
:target: https://lab.cluster.gsi.dit.upm.es/senpy/senpy/commits/master
.. image:: https://lab.cluster.gsi.dit.upm.es/senpy/senpy/badges/master/coverage.svg
:target: https://lab.cluster.gsi.dit.upm.es/senpy/senpy/commits/master
.. image:: https://img.shields.io/pypi/l/requests.svg
:target: https://lab.cluster.gsi.dit.upm.es/senpy/senpy/
.. image:: https://readthedocs.org/projects/senpy/badge/?version=latest
:target: http://senpy.readthedocs.io/en/latest/
.. image:: https://badge.fury.io/py/senpy.svg
:target: https://badge.fury.io/py/senpy
.. image:: https://lab.gsi.upm.es/senpy/senpy/badges/master/build.svg
:target: https://lab.gsi.upm.es/senpy/senpy/commits/master
.. image:: https://lab.gsi.upm.es/senpy/senpy/badges/master/coverage.svg
:target: https://lab.gsi.upm.es/senpy/senpy/commits/master
.. image:: https://img.shields.io/pypi/l/requests.svg
:target: https://lab.gsi.upm.es/senpy/senpy/
Senpy is a framework for sentiment and emotion analysis services.
Services built with senpy are interchangeable and easy to use because they share a common :doc:`apischema`.
It also simplifies service development.
Senpy services are interchangeable and easy to use because they share a common semantic :doc:`apischema`.
If you interested in consuming Senpy services, read :doc:`Quickstart`.
To get familiar with the concepts behind Senpy, and what it can offer for service developers, check out :doc:`development`.
:doc:`apischema` contains information about the semantic models and vocabularies used by Senpy.
.. image:: senpy-architecture.png
:width: 100%
:align: center
.. toctree::
:caption: Learn more about senpy:
:maxdepth: 2
:caption: Learn more about senpy:
:maxdepth: 2
senpy
demo
Quickstart.ipynb
installation
apischema
advanced
publications
senpy
installation
demo
usage
apischema
plugins
conversion
about

View File

@@ -32,27 +32,25 @@ If you want to install senpy globally, use sudo instead of the ``--user`` flag.
Docker Image
************
Build the image or use the pre-built one:
The base image of senpy comes with some builtin plugins that you can use:
.. code:: bash
docker run -ti -p 5000:5000 gsiupm/senpy --host 0.0.0.0 --default-plugins
docker run -ti -p 5000:5000 gsiupm/senpy --host 0.0.0.0
To add custom plugins, use a docker volume:
To add your custom plugins, you can use a docker volume:
.. code:: bash
docker run -ti -p 5000:5000 -v <PATH OF PLUGINS>:/plugins gsiupm/senpy --host 0.0.0.0 --default-plugins -f /plugins
docker run -ti -p 5000:5000 -v <PATH OF PLUGINS>:/plugins gsiupm/senpy --host 0.0.0.0 --plugins -f /plugins
Python 2
........
There is a Senpy version for python2 too:
There is a Senpy image for **python 2**, too:
.. code:: bash
docker run -ti -p 5000:5000 gsiupm/senpy:python2.7 --host 0.0.0.0 --default-plugins
docker run -ti -p 5000:5000 gsiupm/senpy:python2.7 --host 0.0.0.0
Alias
@@ -62,7 +60,7 @@ If you are using the docker approach regularly, it is advisable to use a script
.. code:: bash
alias senpy='docker run --rm -ti -p 5000:5000 -v $PWD:/senpy-plugins gsiupm/senpy --default-plugins'
alias senpy='docker run --rm -ti -p 5000:5000 -v $PWD:/senpy-plugins gsiupm/senpy'
Now, you may run senpy from any folder in your computer like so:

BIN
docs/playground-0.20.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 KiB

View File

@@ -110,4 +110,4 @@ Now, in a file named ``helloworld.py``:
entry.sentiments.append(sentiment)
yield entry
The complete code of the example plugin is available `here <https://lab.cluster.gsi.dit.upm.es/senpy/plugin-prueba>`__.
The complete code of the example plugin is available `here <https://lab.gsi.upm.es/senpy/plugin-prueba>`__.

View File

@@ -1,61 +1,18 @@
Developing new plugins
----------------------
This document contains the minimum to get you started with developing new analysis plugin.
For an example of conversion plugins, see :doc:`conversion`.
For a description of definition files, see :doc:`plugins-definition`.
A more step-by-step tutorial with slides is available `here <https://lab.cluster.gsi.dit.upm.es/senpy/senpy-tutorial>`__
F.A.Q.
======
.. contents:: :local:
What is a plugin?
=================
A plugin is a python object that can process entries. Given an entry, it will modify it, add annotations to it, or generate new entries.
What is an entry?
=================
Entries are objects that can be annotated.
In general, they will be a piece of text.
By default, entries are `NIF contexts <http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core/nif-core.html>`_ represented in JSON-LD format.
It is a dictionary/JSON object that looks like this:
.. code:: python
{
"@id": "<unique identifier or blank node name>",
"nif:isString": "input text",
"sentiments": [ {
...
}
],
...
}
Annotations are added to the object like this:
.. code:: python
entry = Entry()
entry.vocabulary__annotationName = 'myvalue'
entry['vocabulary:annotationName'] = 'myvalue'
entry['annotationNameURI'] = 'myvalue'
Where vocabulary is one of the prefixes defined in the default senpy context, and annotationURI is a full URI.
The value may be any valid JSON-LD dictionary.
For simplicity, senpy includes a series of models by default in the ``senpy.models`` module.
What are annotations?
=====================
#####################
They are objects just like entries.
Senpy ships with several default annotations, including: ``Sentiment``, ``Emotion``, ``EmotionSet``...jk bb
What's a plugin made of?
========================
########################
When receiving a query, senpy selects what plugin or plugins should process each entry, and in what order.
It also makes sure the every entry and the parameters provided by the user meet the plugin requirements.
@@ -65,7 +22,7 @@ Hence, two parts are necessary: 1) the code that will process the entry, and 2)
In practice, this is what a plugin looks like, tests included:
.. literalinclude:: ../senpy/plugins/example/rand_plugin.py
.. literalinclude:: ../example-plugins/rand_plugin.py
:emphasize-lines: 5-11
:language: python
@@ -73,37 +30,25 @@ In practice, this is what a plugin looks like, tests included:
The lines highlighted contain some information about the plugin.
In particular, the following information is mandatory:
* A unique name for the class. In our example, Rand.
* A unique name for the class. In our example, sentiment-random.
* The subclass/type of plugin. This is typically either `SentimentPlugin` or `EmotionPlugin`. However, new types of plugin can be created for different annotations. The only requirement is that these new types inherit from `senpy.Analysis`
* A description of the plugin. This can be done simply by adding a doc to the class.
* A version, which should get updated.
* An author name.
Plugins Code
============
The basic methods in a plugin are:
* analyse_entry: called in every user requests. It takes two parameters: ``Entry``, the entry object, and ``params``, the parameters supplied by the user. It should yield one or more ``Entry`` objects.
* activate: used to load memory-hungry resources. For instance, to train a classifier.
* deactivate: used to free up resources when the plugin is no longer needed.
Plugins are loaded asynchronously, so don't worry if the activate method takes too long. The plugin will be marked as activated once it is finished executing the method.
How does senpy find modules?
============================
############################
Senpy looks for files of two types:
* Python files of the form `senpy_<NAME>.py` or `<NAME>_plugin.py`. In these files, it will look for: 1) Instances that inherit from `senpy.Plugin`, or subclasses of `senpy.Plugin` that can be initialized without a configuration file. i.e. classes that contain all the required attributes for a plugin.
* Plugin definition files (see :doc:`advanced-plugins`)
* Plugin definition files (see :doc:`plugins-definition`)
Defining additional parameters
==============================
How can I define additional parameters for my plugin?
#####################################################
Your plugin may ask for additional parameters from the users of the service by using the attribute ``extra_params`` in your plugin definition.
Your plugin may ask for additional parameters from users by using the attribute ``extra_params`` in your plugin definition.
It takes a dictionary, where the keys are the name of the argument/parameter, and the value has the following fields:
* aliases: the different names which can be used in the request to use the parameter.
@@ -124,15 +69,16 @@ It takes a dictionary, where the keys are the name of the argument/parameter, an
Loading data and files
======================
How should I load external data and files
#########################################
Most plugins will need access to files (dictionaries, lexicons, etc.).
These files are usually heavy or under a license that does not allow redistribution.
For this reason, senpy has a `data_folder` that is separated from the source files.
The location of this folder is controlled programmatically or by setting the `SENPY_DATA` environment variable.
You can use the `self.path(filepath)` function to get the path of a given `filepath` within the data folder.
Plugins have a convenience function `self.open` which will automatically prepend the data folder to relative paths:
Plugins have a convenience function `self.open` which will automatically look for the file if it exists, or open a new one if it doesn't:
.. code:: python
@@ -144,7 +90,7 @@ Plugins have a convenience function `self.open` which will automatically prepend
file_in_data = <FILE PATH>
file_in_sources = <FILE PATH>
def activate(self):
def on activate(self):
with self.open(self.file_in_data) as f:
self._classifier = train_from_file(f)
file_in_source = os.path.join(self.get_folder(), self.file_in_sources)
@@ -155,8 +101,8 @@ Plugins have a convenience function `self.open` which will automatically prepend
It is good practice to specify the paths of these files in the plugin configuration, so the same code can be reused with different resources.
Docker image
============
Can I build a docker image for my plugin?
#########################################
Add the following dockerfile to your project to generate a docker image with your plugin:
@@ -187,7 +133,7 @@ And you can run it with:
docker run -p 5000:5000 gsiupm/exampleplugin
If the plugin uses non-source files (:ref:`loading data and files`), the recommended way is to use `SENPY_DATA` folder.
If the plugin uses non-source files (:ref:`How should I load external data and files`), the recommended way is to use `SENPY_DATA` folder.
Data can then be mounted in the container or added to the image.
The former is recommended for open source plugins with licensed resources, whereas the latter is the most convenient and can be used for private images.
@@ -204,17 +150,15 @@ Adding data to the image:
FROM gsiupm/senpy:1.0.1
COPY data /
F.A.Q.
======
What annotations can I use?
???????????????????????????
###########################
You can add almost any annotation to an entry.
The most common use cases are covered in the :doc:`apischema`.
Why does the analyse function yield instead of return?
??????????????????????????????????????????????????????
######################################################
This is so that plugins may add new entries to the response or filter some of them.
For instance, a chunker may split one entry into several.
@@ -222,7 +166,7 @@ On the other hand, a conversion plugin may leave out those entries that do not c
If I'm using a classifier, where should I train it?
???????????????????????????????????????????????????
###################################################
Training a classifier can be time time consuming. To avoid running the training unnecessarily, you can use ShelfMixin to store the classifier. For instance:
@@ -256,7 +200,7 @@ A corrupt shelf prevents the plugin from loading.
If you do not care about the data in the shelf, you can force your plugin to remove the corrupted file and load anyway, set the 'force_shelf' to True in your plugin and start it again.
How can I turn an external service into a plugin?
?????????????????????????????????????????????????
#################################################
This example ilustrate how to implement a plugin that accesses the Sentiment140 service.
@@ -292,8 +236,8 @@ This example ilustrate how to implement a plugin that accesses the Sentiment140
yield entry
Can I activate a DEBUG mode for my plugin?
???????????????????????????????????????????
How can I activate a DEBUG mode for my plugin?
###############################################
You can activate the DEBUG mode by the command-line tool using the option -d.
@@ -309,6 +253,6 @@ Additionally, with the ``--pdb`` option you will be dropped into a pdb post mort
python -m pdb yourplugin.py
Where can I find more code examples?
????????????????????????????????????
####################################
See: `<http://github.com/gsi-upm/senpy-plugins-community>`_.

View File

@@ -0,0 +1,86 @@
Quickstart for service developers
=================================
This document contains the minimum to get you started with developing new services using Senpy.
For an example of conversion plugins, see :doc:`conversion`.
For a description of definition files, see :doc:`plugins-definition`.
A more step-by-step tutorial with slides is available `here <https://lab.gsi.upm.es/senpy/senpy-tutorial>`__
.. contents:: :local:
Installation
############
First of all, you need to install the package.
See :doc:`installation` for instructions.
Once installed, the `senpy` command should be available.
Architecture
############
The main component of a sentiment analysis service is the algorithm itself. However, for the algorithm to work, it needs to get the appropriate parameters from the user, format the results according to the defined API, interact with the user whn errors occur or more information is needed, etc.
Senpy proposes a modular and dynamic architecture that allows:
* Implementing different algorithms in a extensible way, yet offering a common interface.
* Offering common services that facilitate development, so developers can focus on implementing new and better algorithms.
The framework consists of two main modules: Senpy core, which is the building block of the service, and Senpy plugins, which consist of the analysis algorithm. The next figure depicts a simplified version of the processes involved in an analysis with the Senpy framework.
.. image:: senpy-architecture.png
:width: 100%
:align: center
What is a plugin?
#################
A plugin is a python object that can process entries. Given an entry, it will modify it, add annotations to it, or generate new entries.
What is an entry?
#################
Entries are objects that can be annotated.
In general, they will be a piece of text.
By default, entries are `NIF contexts <http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core/nif-core.html>`_ represented in JSON-LD format.
It is a dictionary/JSON object that looks like this:
.. code:: python
{
"@id": "<unique identifier or blank node name>",
"nif:isString": "input text",
"sentiments": [ {
...
}
],
...
}
Annotations are added to the object like this:
.. code:: python
entry = Entry()
entry.vocabulary__annotationName = 'myvalue'
entry['vocabulary:annotationName'] = 'myvalue'
entry['annotationNameURI'] = 'myvalue'
Where vocabulary is one of the prefixes defined in the default senpy context, and annotationURI is a full URI.
The value may be any valid JSON-LD dictionary.
For simplicity, senpy includes a series of models by default in the ``senpy.models`` module.
Plugins Code
############
The basic methods in a plugin are:
* analyse_entry: called in every user requests. It takes two parameters: ``Entry``, the entry object, and ``params``, the parameters supplied by the user. It should yield one or more ``Entry`` objects.
* activate: used to load memory-hungry resources. For instance, to train a classifier.
* deactivate: used to free up resources when the plugin is no longer needed.
Plugins are loaded asynchronously, so don't worry if the activate method takes too long. The plugin will be marked as activated once it is finished executing the method.

46
docs/publications.rst Normal file
View File

@@ -0,0 +1,46 @@
Publications
============
If you use Senpy in your research, please cite `Senpy: A Pragmatic Linked Sentiment Analysis Framework <http://gsi.dit.upm.es/index.php/es/investigacion/publicaciones?view=publication&task=show&id=417>`__ (`BibTex <http://gsi.dit.upm.es/index.php/es/investigacion/publicaciones?controller=publications&task=export&format=bibtex&id=417>`__):
.. code-block:: text
Sánchez-Rada, J. F., Iglesias, C. A., Corcuera, I., & Araque, Ó. (2016, October).
Senpy: A Pragmatic Linked Sentiment Analysis Framework.
In Data Science and Advanced Analytics (DSAA),
2016 IEEE International Conference on (pp. 735-742). IEEE.
Senpy uses Onyx for emotion representation, first introduced in:
.. code-block:: text
Sánchez-Rada, J. F., & Iglesias, C. A. (2016).
Onyx: A linked data approach to emotion representation.
Information Processing & Management, 52(1), 99-114.
Senpy uses Marl for sentiment representation, which was presented in:
.. code-block:: text
Westerski, A., Iglesias Fernandez, C. A., & Tapia Rico, F. (2011).
Linked opinions: Describing sentiments on the structured web of data.
Senpy has been used extensively in the toolbox of the MixedEmotions project:
.. code-block:: text
Buitelaar, P., Wood, I. D., Arcan, M., McCrae, J. P., Abele, A., Robin, C., … Tummarello, G. (2018).
MixedEmotions: An Open-Source Toolbox for Multi-Modal Emotion Analysis.
IEEE Transactions on Multimedia.
The representation models, formats and challenges are partially covered in a chapter of the book Sentiment Analysis in Social Networks:
.. code-block:: text
Iglesias, C. A., Sánchez-Rada, J. F., Vulcu, G., & Buitelaar, P. (2017).
Linked Data Models for Sentiment and Emotion Analysis in Social Networks.
In Sentiment Analysis in Social Networks (pp. 49-69).

View File

@@ -1,2 +1,3 @@
sphinxcontrib-httpdomain>=1.4
ipykernel
nbsphinx

View File

@@ -1,54 +1,27 @@
What is Senpy?
--------------
Senpy is a framework for text analysis using Linked Data. There are three main applications of Senpy so far: sentiment and emotion analysis, user profiling and entity recoginition. Annotations and Services are compliant with NIF (NLP Interchange Format).
Senpy aims at providing a framework where analysis modules can be integrated easily as plugins, and providing a core functionality for managing tasks such as data validation, user interaction, formatting, logging, translation to linked data, etc.
The figure below summarizes the typical features in a text analysis service.
Senpy implements all the common blocks, so developers can focus on what really matters: great analysis algorithms that solve real problems.
.. image:: senpy-framework.png
:width: 60%
:align: center
Senpy for end users
===================
All services built using senpy share a common interface.
This allows users to use them (almost) interchangeably.
Senpy comes with a :ref:`built-in client`.
Senpy for service developers
============================
Senpy is a framework that turns your sentiment or emotion analysis algorithm into a full blown semantic service.
Senpy takes care of:
* Interfacing with the user: parameter validation, error handling.
* Formatting: JSON-LD, Turtle/n-triples input and output, or simple text input
* Linked Data: senpy results are semantically annotated, using a series of well established vocabularies, and sane default URIs.
* User interface: a web UI where users can explore your service and test different settings
* A client to interact with the service. Currently only available in Python.
Sharing your sentiment analysis with the world has never been easier!
Check out the :doc:`plugins` if you have developed an analysis algorithm (e.g. sentiment analysis) and you want to publish it as a service.
Architecture
============
The main component of a sentiment analysis service is the algorithm itself. However, for the algorithm to work, it needs to get the appropriate parameters from the user, format the results according to the defined API, interact with the user whn errors occur or more information is needed, etc.
Senpy proposes a modular and dynamic architecture that allows:
* Implementing different algorithms in a extensible way, yet offering a common interface.
* Offering common services that facilitate development, so developers can focus on implementing new and better algorithms.
The framework consists of two main modules: Senpy core, which is the building block of the service, and Senpy plugins, which consist of the analysis algorithm. The next figure depicts a simplified version of the processes involved in an analysis with the Senpy framework.
Senpy is a framework for sentiment and emotion analysis services.
Its goal is to produce analysis services that are interchangeable and fully interoperable.
.. image:: senpy-architecture.png
:width: 100%
:align: center
All services built using senpy share a common interface.
This allows users to use them (almost) interchangeably, with the same API and tools, simply by pointing to a different URL or changing a parameter.
The common schema also makes it easier to evaluate the performance of different algorithms and services.
In fact, Senpy has a built-in evaluation API you can use to compare results with different algorithms.
Services can also use the common interface to communicate with each other.
And higher level features can be built on top of these services, such as automatic fusion of results, emotion model conversion, and service discovery.
These benefits are not limited to new services.
The community has developed wrappers for some proprietary and commercial services (such as sentiment140 and Meaning Cloud), so you can consult them as.
Senpy comes with a built-in client in the client package.
To achieve this goal, Senpy uses a Linked Data principled approach, based on the NIF (NLP Interchange Format) specification, and open vocabularies such as Marl and Onyx.
You can learn more about this in :doc:`vocabularies`.
Check out :doc:`development` if you have developed an analysis algorithm (e.g. sentiment analysis) and you want to publish it as a service.

View File

@@ -5,10 +5,11 @@ The senpy server is launched via the `senpy` command:
.. code:: text
usage: senpy [-h] [--level logging_level] [--debug] [--default-plugins]
[--host HOST] [--port PORT] [--plugins-folder PLUGINS_FOLDER]
[--only-install] [--only-list] [--data-folder DATA_FOLDER]
[--threaded] [--version]
usage: senpy [-h] [--level logging_level] [--log-format log_format] [--debug]
[--no-default-plugins] [--host HOST] [--port PORT]
[--plugins-folder PLUGINS_FOLDER] [--only-install] [--only-test]
[--test] [--only-list] [--data-folder DATA_FOLDER]
[--no-threaded] [--no-deps] [--version] [--allow-fail]
Run a Senpy server
@@ -16,20 +17,25 @@ The senpy server is launched via the `senpy` command:
-h, --help show this help message and exit
--level logging_level, -l logging_level
Logging level
--log-format log_format
Logging format
--debug, -d Run the application in debug mode
--default-plugins Load the default plugins
--no-default-plugins Do not load the default plugins
--host HOST Use 0.0.0.0 to accept requests from any host.
--port PORT, -p PORT Port to listen on.
--plugins-folder PLUGINS_FOLDER, -f PLUGINS_FOLDER
Where to look for plugins.
--only-install, -i Do not run a server, only install plugin dependencies
--only-test Do not run a server, just test all plugins
--test, -t Test all plugins before launching the server
--only-list, --list Do not run a server, only list plugins found
--data-folder DATA_FOLDER, --data DATA_FOLDER
Where to look for data. It be set with the SENPY_DATA
environment variable as well.
--threaded Run a threaded server
--no-threaded Run the server without threading
--no-deps, -n Skip installing dependencies
--version, -v Output the senpy version and exit
--allow-fail, --fail Do not exit if some plugins fail to activate
When launched, the server will recursively look for plugins in the specified plugins folder (the current working directory by default).
@@ -40,9 +46,9 @@ Let's run senpy with the default plugins:
.. code:: bash
senpy -f . --default-plugins
senpy -f .
Now go to `http://localhost:5000 <http://localhost:5000>`_, you should be greeted by the senpy playground:
Now open your browser and go to `http://localhost:5000 <http://localhost:5000>`_, where you should be greeted by the senpy playground:
.. image:: senpy-playground.png
:width: 100%
@@ -51,9 +57,9 @@ Now go to `http://localhost:5000 <http://localhost:5000>`_, you should be greete
The playground is a user-friendly way to test your plugins, but you can always use the service directly: `http://localhost:5000/api?input=hello <http://localhost:5000/api?input=hello>`_.
By default, senpy will listen only on the `127.0.0.1` address.
That means you can only access the API from your (or localhost).
You can listen on a different address using the `--host` flag (e.g., 0.0.0.0).
By default, senpy will listen only on `127.0.0.1`.
That means you can only access the API from your PC (i.e. localhost).
You can listen on a different address using the `--host` flag (e.g., 0.0.0.0, to allow any computer to access it).
The default port is 5000.
You can change it with the `--port` flag.

View File

@@ -1,15 +0,0 @@
Usage
-----
First of all, you need to install the package.
See :doc:`installation` for instructions.
Once installed, the `senpy` command should be available.
.. toctree::
:maxdepth: 1
server
SenpyClientUse
commandline

View File

@@ -1,6 +1,6 @@
This is a collection of plugins that exemplify certain aspects of plugin development with senpy.
The first series of plugins the `basic` ones.
The first series of plugins are the `basic` ones.
Their starting point is a classification function defined in `basic.py`.
They all include testing and running them as a script will run all tests.
In ascending order of customization, the plugins are:
@@ -19,5 +19,5 @@ In rest of the plugins show advanced topics:
All of the plugins in this folder include a set of test cases and they are periodically tested with the latest version of senpy.
Additioanlly, for an example of stand-alone plugin that can be tested and deployed with docker, take a look at: lab.cluster.gsi.dit.upm.es/senpy/plugin-example
Additioanlly, for an example of stand-alone plugin that can be tested and deployed with docker, take a look at: lab.gsi.upm.es/senpy/plugin-example
bbm

View File

@@ -1,5 +1,5 @@
#!/usr/local/bin/python
# coding: utf-8
# -*- coding: utf-8 -*-
emoticons = {
'pos': [':)', ':]', '=)', ':D'],
@@ -7,17 +7,19 @@ emoticons = {
}
emojis = {
'pos': ['😁', '😂', '😃', '😄', '😆', '😅', '😄' '😍'],
'neg': ['😢', '😡', '😠', '😞', '😖', '😔', '😓', '😒']
'pos': [u'😁', u'😂', u'😃', u'😄', u'😆', u'😅', u'😄', u'😍'],
'neg': [u'😢', u'😡', u'😠', u'😞', u'😖', u'😔', u'😓', u'😒']
}
def get_polarity(text, dictionaries=[emoticons, emojis]):
polarity = 'marl:Neutral'
print('Input for get_polarity', text)
for dictionary in dictionaries:
for label, values in dictionary.items():
for emoticon in values:
if emoticon and emoticon in text:
polarity = label
break
print('Polarity', polarity)
return polarity

View File

@@ -1,5 +1,5 @@
#!/usr/local/bin/python
# coding: utf-8
# -*- coding: utf-8 -*-
from senpy import easy_test, models, plugins
@@ -18,13 +18,13 @@ class BasicAnalyseEntry(plugins.SentimentPlugin):
'default': 'marl:Neutral'
}
def analyse_entry(self, entry, params):
def analyse_entry(self, entry, activity):
polarity = basic.get_polarity(entry.text)
polarity = self.mappings.get(polarity, self.mappings['default'])
s = models.Sentiment(marl__hasPolarity=polarity)
s.prov(self)
s.prov(activity)
entry.sentiments.append(s)
yield entry

View File

@@ -1,5 +1,5 @@
#!/usr/local/bin/python
# coding: utf-8
# -*- coding: utf-8 -*-
from senpy import easy_test, SentimentBox
@@ -12,15 +12,13 @@ class BasicBox(SentimentBox):
author = '@balkian'
version = '0.1'
mappings = {
'pos': 'marl:Positive',
'neg': 'marl:Negative',
'default': 'marl:Neutral'
}
def predict_one(self, input):
output = basic.get_polarity(input)
return self.mappings.get(output, self.mappings['default'])
def predict_one(self, features, **kwargs):
output = basic.get_polarity(features[0])
if output == 'pos':
return [1, 0, 0]
if output == 'neg':
return [0, 0, 1]
return [0, 1, 0]
test_cases = [{
'input': 'Hello :)',

View File

@@ -1,37 +1,36 @@
#!/usr/local/bin/python
# coding: utf-8
# -*- coding: utf-8 -*-
from senpy import easy_test, SentimentBox, MappingMixin
from senpy import easy_test, SentimentBox
import basic
class Basic(MappingMixin, SentimentBox):
class Basic(SentimentBox):
'''Provides sentiment annotation using a lexicon'''
author = '@balkian'
version = '0.1'
mappings = {
'pos': 'marl:Positive',
'neg': 'marl:Negative',
'default': 'marl:Neutral'
}
def predict_one(self, input):
return basic.get_polarity(input)
def predict_one(self, features, **kwargs):
output = basic.get_polarity(features[0])
if output == 'pos':
return [1, 0, 0]
if output == 'neu':
return [0, 1, 0]
return [0, 0, 1]
test_cases = [{
'input': 'Hello :)',
'input': u'Hello :)',
'polarity': 'marl:Positive'
}, {
'input': 'So sad :(',
'input': u'So sad :(',
'polarity': 'marl:Negative'
}, {
'input': 'Yay! Emojis 😁',
'input': u'Yay! Emojis 😁',
'polarity': 'marl:Positive'
}, {
'input': 'But no emoticons 😢',
'input': u'But no emoticons 😢',
'polarity': 'marl:Negative'
}]

View File

@@ -1,5 +1,5 @@
#!/usr/local/bin/python
# coding: utf-8
# -*- coding: utf-8 -*-
from senpy import easy_test, models, plugins
@@ -16,7 +16,7 @@ class Dictionary(plugins.SentimentPlugin):
mappings = {'pos': 'marl:Positive', 'neg': 'marl:Negative'}
def analyse_entry(self, entry, params):
def analyse_entry(self, entry, *args, **kwargs):
polarity = basic.get_polarity(entry.text, self.dictionaries)
if polarity in self.mappings:
polarity = self.mappings[polarity]

View File

@@ -6,12 +6,13 @@ from senpy.models import EmotionSet, Emotion, Entry
class EmoRand(EmotionPlugin):
'''A sample plugin that returns a random emotion annotation'''
name = 'emotion-random'
author = '@balkian'
version = '0.1'
url = "https://github.com/gsi-upm/senpy-plugins-community"
onyx__usesEmotionModel = "emoml:big6"
def analyse_entry(self, entry, params):
def analyse_entry(self, entry, activity):
category = "emoml:big6happiness"
number = max(-1, min(1, random.gauss(0, 0.5)))
if number > 0:
@@ -19,7 +20,7 @@ class EmoRand(EmotionPlugin):
emotionSet = EmotionSet()
emotion = Emotion({"onyx:hasEmotionCategory": category})
emotionSet.onyx__hasEmotion.append(emotion)
emotionSet.prov__wasGeneratedBy = self.id
emotionSet.prov(activity)
entry.emotions.append(emotionSet)
yield entry
@@ -27,6 +28,6 @@ class EmoRand(EmotionPlugin):
params = dict()
results = list()
for i in range(100):
res = next(self.analyse_entry(Entry(nif__isString="Hello"), params))
res = next(self.analyse_entry(Entry(nif__isString="Hello"), self.activity(params)))
res.validate()
results.append(res.emotions[0]['onyx:hasEmotion'][0]['onyx:hasEmotionCategory'])

View File

@@ -1,5 +1,5 @@
#!/usr/local/bin/python
# coding: utf-8
# -*- coding: utf-8 -*-
from senpy import easy_test, models, plugins
@@ -25,7 +25,8 @@ class ParameterizedDictionary(plugins.SentimentPlugin):
}
}
def analyse_entry(self, entry, params):
def analyse_entry(self, entry, activity):
params = activity.params
positive_words = params['positive-words'].split(',')
negative_words = params['negative-words'].split(',')
dictionary = {
@@ -35,7 +36,7 @@ class ParameterizedDictionary(plugins.SentimentPlugin):
polarity = basic.get_polarity(entry.text, [dictionary])
s = models.Sentiment(marl__hasPolarity=polarity)
s.prov(self)
s.prov(activity)
entry.sentiments.append(s)
yield entry

View File

@@ -2,15 +2,16 @@ import random
from senpy import SentimentPlugin, Sentiment, Entry
class Rand(SentimentPlugin):
class RandSent(SentimentPlugin):
'''A sample plugin that returns a random sentiment annotation'''
name = 'sentiment-random'
author = "@balkian"
version = '0.1'
url = "https://github.com/gsi-upm/senpy-plugins-community"
marl__maxPolarityValue = '1'
marl__minPolarityValue = "-1"
def analyse_entry(self, entry, params):
def analyse_entry(self, entry, activity):
polarity_value = max(-1, min(1, random.gauss(0.2, 0.2)))
polarity = "marl:Neutral"
if polarity_value > 0:
@@ -19,7 +20,7 @@ class Rand(SentimentPlugin):
polarity = "marl:Negative"
sentiment = Sentiment(marl__hasPolarity=polarity,
marl__polarityValue=polarity_value)
sentiment.prov(self)
sentiment.prov(activity)
entry.sentiments.append(sentiment)
yield entry
@@ -28,8 +29,9 @@ class Rand(SentimentPlugin):
params = dict()
results = list()
for i in range(50):
activity = self.activity(params)
res = next(self.analyse_entry(Entry(nif__isString="Hello"),
params))
activity))
res.validate()
results.append(res.sentiments[0]['marl:hasPolarity'])
assert 'marl:Positive' in results

View File

@@ -1,25 +1,20 @@
from senpy import SentimentBox, MappingMixin, easy_test
from senpy import SentimentBox, easy_test
from mypipeline import pipeline
class PipelineSentiment(MappingMixin, SentimentBox):
'''
This is a pipeline plugin that wraps a classifier defined in another module
(mypipeline).
'''
class PipelineSentiment(SentimentBox):
'''This is a pipeline plugin that wraps a classifier defined in another module
(mypipeline).'''
author = '@balkian'
version = 0.1
maxPolarityValue = 1
minPolarityValue = -1
mappings = {
1: 'marl:Positive',
-1: 'marl:Negative'
}
def predict_one(self, input):
return pipeline.predict([input, ])[0]
def predict_one(self, features, **kwargs):
if pipeline.predict(features) > 0:
return [1, 0, 0]
return [0, 0, 1]
test_cases = [
{

View File

@@ -1 +1 @@
gsitk
gsitk>0.1.9.1

View File

@@ -15,8 +15,6 @@ spec:
- name: senpy-latest
image: $IMAGEWTAG
imagePullPolicy: Always
args:
- "--default-plugins"
resources:
limits:
memory: "512Mi"

View File

@@ -12,3 +12,10 @@ spec:
backend:
serviceName: senpy-latest
servicePort: 5000
- host: latest.senpy.gsi.upm.es
http:
paths:
- path: /
backend:
serviceName: senpy-latest
servicePort: 5000

View File

@@ -11,5 +11,6 @@ rdflib
rdflib-jsonld
numpy
scipy
scikit-learn
scikit-learn>=0.20
responses
jmespath

View File

@@ -40,8 +40,14 @@ def main():
'-l',
metavar='logging_level',
type=str,
default="WARN",
default="INFO",
help='Logging level')
parser.add_argument(
'--log-format',
metavar='log_format',
type=str,
default='%(asctime)s %(levelname)-10s %(name)-30s \t %(message)s',
help='Logging format')
parser.add_argument(
'--debug',
'-d',
@@ -49,10 +55,10 @@ def main():
default=False,
help='Run the application in debug mode')
parser.add_argument(
'--default-plugins',
'--no-default-plugins',
action='store_true',
default=False,
help='Load the default plugins')
help='Do not load the default plugins')
parser.add_argument(
'--host',
type=str,
@@ -68,7 +74,7 @@ def main():
'--plugins-folder',
'-f',
type=str,
default='.',
action='append',
help='Where to look for plugins.')
parser.add_argument(
'--only-install',
@@ -100,10 +106,10 @@ def main():
default=None,
help='Where to look for data. It be set with the SENPY_DATA environment variable as well.')
parser.add_argument(
'--threaded',
action='store_false',
default=True,
help='Run a threaded server')
'--no-threaded',
action='store_true',
default=False,
help='Run a single-threaded server')
parser.add_argument(
'--no-deps',
'-n',
@@ -123,30 +129,42 @@ def main():
default=False,
help='Do not exit if some plugins fail to activate')
args = parser.parse_args()
print('Senpy version {}'.format(senpy.__version__))
print(sys.version)
if args.version:
print('Senpy version {}'.format(senpy.__version__))
print(sys.version)
exit(1)
rl = logging.getLogger()
rl.setLevel(getattr(logging, args.level))
logger_handler = rl.handlers[0]
# First, generic formatter:
logger_handler.setFormatter(logging.Formatter(args.log_format))
app = Flask(__name__)
app.debug = args.debug
sp = Senpy(app, args.plugins_folder,
default_plugins=args.default_plugins,
sp = Senpy(app,
plugin_folder=None,
default_plugins=not args.no_default_plugins,
data_folder=args.data_folder)
folders = list(args.plugins_folder) if args.plugins_folder else []
if not folders:
folders.append(".")
for p in folders:
sp.add_folder(p)
plugins = sp.plugins(plugin_type=None, is_activated=False)
maxname = max(len(x.name) for x in plugins)
maxversion = max(len(str(x.version)) for x in plugins)
print('Found {} plugins:'.format(len(plugins)))
for plugin in plugins:
import inspect
fpath = inspect.getfile(plugin.__class__)
print('\t{: <{maxname}} @ {: <{maxversion}} -> {}'.format(plugin.name,
plugin.version,
fpath,
maxname=maxname,
maxversion=maxversion))
if args.only_list:
plugins = sp.plugins()
maxname = max(len(x.name) for x in plugins)
maxversion = max(len(x.version) for x in plugins)
print('Found {} plugins:'.format(len(plugins)))
for plugin in plugins:
import inspect
fpath = inspect.getfile(plugin.__class__)
print('\t{: <{maxname}} @ {: <{maxversion}} -> {}'.format(plugin.name,
plugin.version,
fpath,
maxname=maxname,
maxversion=maxversion))
return
if not args.no_deps:
sp.install_deps()
@@ -160,10 +178,13 @@ def main():
print('Senpy version {}'.format(senpy.__version__))
print('Server running on port %s:%d. Ctrl+C to quit' % (args.host,
args.port))
app.run(args.host,
args.port,
threaded=args.threaded,
debug=app.debug)
try:
app.run(args.host,
args.port,
threaded=not args.no_threaded,
debug=app.debug)
except KeyboardInterrupt:
print('Bye!')
sp.deactivate_all()

View File

@@ -3,27 +3,33 @@ from .models import Error, Results, Entry, from_string
import logging
logger = logging.getLogger(__name__)
boolean = [True, False]
processors = {
'string_to_tuple': lambda p: p if isinstance(p, (tuple, list)) else tuple(p.split(','))
}
API_PARAMS = {
"algorithm": {
"aliases": ["algorithms", "a", "algo"],
"required": False,
"required": True,
"default": 'default',
"processor": 'string_to_tuple',
"description": ("Algorithms that will be used to process the request."
"It may be a list of comma-separated names."),
},
"expanded-jsonld": {
"@id": "expanded-jsonld",
"aliases": ["expanded"],
"description": "use JSON-LD expansion to get full URIs",
"aliases": ["expanded", "expanded_jsonld"],
"options": boolean,
"required": True,
"default": False
},
"with_parameters": {
"with-parameters": {
"aliases": ['withparameters',
'with-parameters'],
'with_parameters'],
"description": "include initial parameters in the response",
"options": boolean,
"default": False,
"required": True
@@ -32,9 +38,67 @@ API_PARAMS = {
"@id": "outformat",
"aliases": ["o"],
"default": "json-ld",
"description": """The data can be semantically formatted (JSON-LD, turtle or n-triples),
given as a list of comma-separated fields (see the fields option) or constructed from a Jinja2
template (see the template option).""",
"required": True,
"options": ["json-ld", "turtle", "ntriples"],
},
"template": {
"@id": "template",
"required": False,
"description": """Jinja2 template for the result. The input data for the template will
be the results as a dictionary.
For example:
Consider the results before templating:
```
[{
"@type": "entry",
"onyx:hasEmotionSet": [],
"nif:isString": "testing the template",
"marl:hasOpinion": [
{
"@type": "sentiment",
"marl:hasPolarity": "marl:Positive"
}
]
}]
```
And the template:
```
{% for entry in entries %}
{{ entry["nif:isString"] | upper }},{{entry.sentiments[0]["marl:hasPolarity"].split(":")[1]}}
{% endfor %}
```
The final result would be:
```
TESTING THE TEMPLATE,Positive
```
"""
},
"fields": {
"@id": "fields",
"required": False,
"description": """A jmespath selector, that can be used to extract a new dictionary, array or value
from the results.
jmespath is a powerful query language for json and/or dictionaries.
It allows you to change the structure (and data) of your objects through queries.
e.g., the following expression gets a list of `[emotion label, intensity]` for each entry:
`entries[]."onyx:hasEmotionSet"[]."onyx:hasEmotion"[]["onyx:hasEmotionCategory","onyx:hasEmotionIntensity"]`
For more information, see: https://jmespath.org
"""
},
"help": {
"@id": "help",
"description": "Show additional help to know more about the possible parameters",
@@ -43,14 +107,41 @@ API_PARAMS = {
"options": boolean,
"default": False
},
"emotionModel": {
"verbose": {
"@id": "verbose",
"description": "Show all properties in the result",
"aliases": ["v"],
"required": True,
"options": boolean,
"default": False
},
"aliases": {
"@id": "aliases",
"description": "Replace JSON properties with their aliases",
"aliases": [],
"required": True,
"options": boolean,
"default": False
},
"emotion-model": {
"@id": "emotionModel",
"aliases": ["emoModel"],
"description": """Emotion model to use in the response.
Senpy will try to convert the output to this model automatically.
Examples: `wna:liking` and `emoml:big6`.
""",
"aliases": ["emoModel", "emotionModel"],
"required": False
},
"conversion": {
"@id": "conversion",
"description": "How to show the elements that have (not) been converted",
"description": """How to show the elements that have (not) been converted.
* full: converted and original elements will appear side-by-side
* filtered: only converted elements will be shown
* nested: converted elements will be shown, and they will include a link to the original element
(using `prov:wasGeneratedBy`).
""",
"required": True,
"options": ["filtered", "nested", "full"],
"default": "full"
@@ -60,9 +151,10 @@ API_PARAMS = {
EVAL_PARAMS = {
"algorithm": {
"aliases": ["plug", "p", "plugins", "algorithms", 'algo', 'a', 'plugin'],
"description": "Plugins to be evaluated",
"description": "Plugins to evaluate",
"required": True,
"help": "See activated plugins in /plugins"
"help": "See activated plugins in /plugins",
"processor": API_PARAMS['algorithm']['processor']
},
"dataset": {
"aliases": ["datasets", "data", "d"],
@@ -73,18 +165,19 @@ EVAL_PARAMS = {
}
PLUGINS_PARAMS = {
"plugin_type": {
"plugin-type": {
"@id": "pluginType",
"description": 'What kind of plugins to list',
"aliases": ["pluginType"],
"aliases": ["pluginType", "plugin_type"],
"required": True,
"default": 'analysisPlugin'
}
}
WEB_PARAMS = {
"inHeaders": {
"aliases": ["headers"],
"in-headers": {
"aliases": ["headers", "inheaders", "inHeaders", "in-headers", "in_headers"],
"description": "Only include the JSON-LD context in the headers",
"required": True,
"default": False,
"options": boolean
@@ -92,8 +185,8 @@ WEB_PARAMS = {
}
CLI_PARAMS = {
"plugin_folder": {
"aliases": ["folder"],
"plugin-folder": {
"aliases": ["folder", "plugin_folder"],
"required": True,
"default": "."
},
@@ -108,6 +201,7 @@ NIF_PARAMS = {
},
"intype": {
"@id": "intype",
"description": "input type",
"aliases": ["t"],
"required": False,
"default": "direct",
@@ -115,6 +209,7 @@ NIF_PARAMS = {
},
"informat": {
"@id": "informat",
"description": "input format",
"aliases": ["f"],
"required": False,
"default": "text",
@@ -122,17 +217,20 @@ NIF_PARAMS = {
},
"language": {
"@id": "language",
"description": "language of the input",
"aliases": ["l"],
"required": False,
},
"prefix": {
"@id": "prefix",
"description": "prefix to use for new entities",
"aliases": ["p"],
"required": True,
"default": "",
},
"urischeme": {
"@id": "urischeme",
"description": "scheme for NIF URIs",
"aliases": ["u"],
"required": False,
"default": "RFC5147String",
@@ -140,6 +238,15 @@ NIF_PARAMS = {
}
}
BUILTIN_PARAMS = {}
for d in [
NIF_PARAMS, CLI_PARAMS, WEB_PARAMS, PLUGINS_PARAMS, EVAL_PARAMS,
API_PARAMS
]:
for k, v in d.items():
BUILTIN_PARAMS[k] = v
def parse_params(indict, *specs):
if not specs:
@@ -154,7 +261,7 @@ def parse_params(indict, *specs):
if alias in indict and alias != param:
outdict[param] = indict[alias]
del outdict[alias]
continue
break
if param not in outdict:
if "default" in options:
# We assume the default is correct
@@ -162,9 +269,11 @@ def parse_params(indict, *specs):
elif options.get("required", False):
wrong_params[param] = spec[param]
continue
if 'processor' in options:
outdict[param] = processors[options['processor']](outdict[param])
if "options" in options:
if options["options"] == boolean:
outdict[param] = outdict[param] in [None, True, 'true', '1']
outdict[param] = str(outdict[param]).lower() in ['true', '1', '']
elif outdict[param] not in options["options"]:
wrong_params[param] = spec[param]
if wrong_params:
@@ -175,31 +284,126 @@ def parse_params(indict, *specs):
parameters=outdict,
errors=wrong_params)
raise message
if 'algorithm' in outdict and not isinstance(outdict['algorithm'], list):
outdict['algorithm'] = list(outdict['algorithm'].split(','))
return outdict
def parse_extra_params(request, plugin=None):
params = request.parameters.copy()
if plugin:
extra_params = parse_params(params, plugin.get('extra_params', {}))
params.update(extra_params)
def get_all_params(plugins, *specs):
'''Return a list of parameters for a given set of specifications and plugins.'''
dic = {}
for s in specs:
dic.update(s)
dic.update(get_extra_params(plugins))
return dic
def get_extra_params(plugins):
'''Get a list of possible parameters given a list of plugins'''
params = {}
extra_params = {}
for plugin in plugins:
this_params = plugin.get('extra_params', {})
for k, v in this_params.items():
if k not in extra_params:
extra_params[k] = {}
extra_params[k][plugin.name] = v
for k, v in extra_params.items(): # Resolve conflicts
if len(v) == 1: # Add the extra options that do not collide
params[k] = list(v.values())[0]
else:
required = False
aliases = None
options = None
default = None
nodefault = False # Set when defaults are not compatible
for plugin, opt in v.items():
params['{}.{}'.format(plugin, k)] = opt
required = required or opt.get('required', False)
newaliases = set(opt.get('aliases', []))
if aliases is None:
aliases = newaliases
else:
aliases = aliases & newaliases
if 'options' in opt:
newoptions = set(opt['options'])
options = newoptions if options is None else options & newoptions
if 'default' in opt:
newdefault = opt['default']
if newdefault:
if default is None and not nodefault:
default = newdefault
elif newdefault != default:
nodefault = True
default = None
# Check for incompatibilities
if options != set():
params[k] = {
'default': default,
'aliases': list(aliases),
'required': required,
'options': list(options)
}
return params
def parse_analyses(params, plugins):
'''
Parse the given parameters individually for each plugin, and get a list of the parameters that
belong to each of the plugins. Each item can then be used in the plugin.analyse_entries method.
'''
analysis_list = []
for i, plugin in enumerate(plugins):
if not plugin:
continue
this_params = filter_params(params, plugin, i)
parsed = parse_params(this_params, plugin.get('extra_params', {}))
analysis = plugin.activity(parsed)
analysis_list.append(analysis)
return analysis_list
def filter_params(params, plugin, ith=-1):
'''
Get the values within params that apply to a plugin.
More specific names override more general names, in this order:
<index_order>.parameter > <plugin.name>.parameter > parameter
Example:
>>> filter_params({'0.hello': True, 'hello': False}, Plugin(), 0)
{ '0.hello': True, 'hello': True}
'''
thisparams = {}
if ith >= 0:
ith = '{}.'.format(ith)
else:
ith = ""
for k, v in params.items():
if ith and k.startswith(str(ith)):
thisparams[k[len(ith):]] = v
elif k.startswith(plugin.name):
thisparams[k[len(plugin.name) + 1:]] = v
elif k not in thisparams:
thisparams[k] = v
return thisparams
def parse_call(params):
'''Return a results object based on the parameters used in a call/request.
'''
Return a results object based on the parameters used in a call/request.
'''
params = parse_params(params, NIF_PARAMS)
if params['informat'] == 'text':
results = Results()
entry = Entry(nif__isString=params['input'],
id='#') # Use @base
entry = Entry(nif__isString=params['input'], id='prefix:') # Use @base
results.entries.append(entry)
elif params['informat'] == 'json-ld':
results = from_string(params['input'], cls=Results)
else: # pragma: no cover
raise NotImplementedError('Informat {} is not implemented'.format(params['informat']))
raise NotImplementedError('Informat {} is not implemented'.format(
params['informat']))
results.parameters = params
return results

View File

@@ -24,6 +24,8 @@ from . import api
from .version import __version__
from functools import wraps
from .gsitk_compat import GSITK_AVAILABLE
import logging
import json
import base64
@@ -63,44 +65,44 @@ def get_params(req):
return indict
def encoded_url(url=None, base=None):
def encode_url(url=None):
code = ''
if not url:
if request.method == 'GET':
url = request.full_path[1:] # Remove the first slash
else:
hash(frozenset(request.parameters.items()))
code = 'hash:{}'.format(hash)
url = request.parameters.get('prefix', request.full_path[1:] + '#')
return code or base64.urlsafe_b64encode(url.encode()).decode()
code = code or base64.urlsafe_b64encode(url.encode()).decode()
if base:
return base + code
return url_for('api.decode', code=code, _external=True)
def url_for_code(code, base=None):
# if base:
# return base + code
# return url_for('api.decode', code=code, _external=True)
# This was producing unique yet very long URIs, which wasn't ideal for visualization.
return 'http://senpy.invalid/'
def decoded_url(code, base=None):
if code.startswith('hash:'):
raise Exception('Can not decode a URL for a POST request')
base = base or request.url_root
path = base64.urlsafe_b64decode(code.encode()).decode()
if path[:4] == 'http':
return path
base = base or request.url_root
return base + path
@demo_blueprint.route('/')
def index():
ev = str(get_params(request).get('evaluation', False))
evaluation_enabled = ev.lower() not in ['false', 'no', 'none']
# ev = str(get_params(request).get('evaluation', True))
# evaluation_enabled = ev.lower() not in ['false', 'no', 'none']
evaluation_enabled = GSITK_AVAILABLE
return render_template("index.html",
evaluation=evaluation_enabled,
version=__version__)
@api_blueprint.route('/contexts/<entity>.jsonld')
def context(entity="context"):
@api_blueprint.route('/contexts/<code>')
def context(code=''):
context = Response._context
context['@vocab'] = url_for('ns.index', _external=True)
context['@base'] = url_for('api.decode', code=code, _external=True)
context['endpoint'] = url_for('api.api_root', _external=True)
return jsonify({"@context": context})
@@ -130,26 +132,59 @@ def schema(schema="definitions"):
def basic_api(f):
default_params = {
'inHeaders': False,
'in-headers': False,
'expanded-jsonld': False,
'outformat': None,
'with_parameters': True,
'with-parameters': True,
}
@wraps(f)
def decorated_function(*args, **kwargs):
raw_params = get_params(request)
logger.info('Getting request: {}'.format(raw_params))
# logger.info('Getting request: {}'.format(raw_params))
logger.debug('Getting request. Params: {}'.format(raw_params))
headers = {'X-ORIGINAL-PARAMS': json.dumps(raw_params)}
params = default_params
mime = request.accept_mimetypes\
.best_match(MIMETYPES.keys(),
DEFAULT_MIMETYPE)
mimeformat = MIMETYPES.get(mime, DEFAULT_FORMAT)
outformat = mimeformat
try:
params = api.parse_params(raw_params, api.WEB_PARAMS, api.API_PARAMS)
outformat = params.get('outformat', mimeformat)
if hasattr(request, 'parameters'):
request.parameters.update(params)
else:
request.parameters = params
response = f(*args, **kwargs)
if 'parameters' in response and not params['with-parameters']:
del response.parameters
logger.debug('Response: {}'.format(response))
prefix = params.get('prefix')
code = encode_url(prefix)
return response.flask(
in_headers=params['in-headers'],
headers=headers,
prefix=prefix or url_for_code(code),
base=prefix,
context_uri=url_for('api.context',
code=code,
_external=True),
outformat=outformat,
expanded=params['expanded-jsonld'],
template=params.get('template'),
verbose=params['verbose'],
aliases=params['aliases'],
fields=params.get('fields'))
except (Exception) as ex:
if current_app.debug or current_app.config['TESTING']:
raise
@@ -159,45 +194,49 @@ def basic_api(f):
response = ex
response.parameters = raw_params
logger.exception(ex)
if 'parameters' in response and not params['with_parameters']:
del response.parameters
logger.info('Response: {}'.format(response))
mime = request.accept_mimetypes\
.best_match(MIMETYPES.keys(),
DEFAULT_MIMETYPE)
mimeformat = MIMETYPES.get(mime, DEFAULT_FORMAT)
outformat = params['outformat'] or mimeformat
return response.flask(
in_headers=params['inHeaders'],
headers=headers,
prefix=params.get('prefix', encoded_url()),
context_uri=url_for('api.context',
entity=type(response).__name__,
_external=True),
outformat=outformat,
expanded=params['expanded-jsonld'])
return response.flask(
outformat=outformat,
expanded=params['expanded-jsonld'],
verbose=params.get('verbose', True),
)
return decorated_function
@api_blueprint.route('/', defaults={'plugin': None}, methods=['POST', 'GET'])
@api_blueprint.route('/<path:plugin>', methods=['POST', 'GET'])
@api_blueprint.route('/', defaults={'plugins': None}, methods=['POST', 'GET'])
@api_blueprint.route('/<path:plugins>', methods=['POST', 'GET'])
@basic_api
def api_root(plugin):
def api_root(plugins):
if plugins:
if request.parameters['algorithm'] != api.API_PARAMS['algorithm']['default']:
raise Error('You cannot specify the algorithm with a parameter and a URL variable.'
' Please, remove one of them')
plugins = plugins.replace('+', ',').replace('/', ',')
plugins = api.processors['string_to_tuple'](plugins)
else:
plugins = request.parameters['algorithm']
print(plugins)
sp = current_app.senpy
plugins = sp.get_plugins(plugins)
if request.parameters['help']:
dic = dict(api.API_PARAMS, **api.NIF_PARAMS)
response = Help(valid_parameters=dic)
apis = [api.WEB_PARAMS, api.API_PARAMS, api.NIF_PARAMS]
# Verbose is set to False as default, but we want it to default to
# True for help. This checks the original value, to make sure it wasn't
# set by default.
if not request.parameters['verbose'] and get_params(request).get('verbose'):
apis = []
if request.parameters['algorithm'] == ['default', ]:
plugins = []
allparameters = api.get_all_params(plugins, *apis)
response = Help(valid_parameters=allparameters)
return response
req = api.parse_call(request.parameters)
if plugin:
plugin = plugin.replace('+', '/')
plugin = plugin.split('/')
req.parameters['algorithm'] = plugin
return current_app.senpy.analyse(req)
analyses = api.parse_analyses(req.parameters, plugins)
results = current_app.senpy.analyse(req, analyses)
return results
@api_blueprint.route('/evaluate/', methods=['POST', 'GET'])
@@ -218,8 +257,8 @@ def evaluate():
def plugins():
sp = current_app.senpy
params = api.parse_params(request.parameters, api.PLUGINS_PARAMS)
ptype = params.get('plugin_type')
plugins = list(sp.plugins(plugin_type=ptype))
ptype = params.get('plugin-type')
plugins = list(sp.analysis_plugins(plugin_type=ptype))
dic = Plugins(plugins=plugins)
return dic

View File

@@ -1,3 +1,5 @@
from __future__ import print_function
import sys
from .models import Error
from .extensions import Senpy
@@ -27,14 +29,14 @@ def main_function(argv):
api.CLI_PARAMS,
api.API_PARAMS,
api.NIF_PARAMS)
plugin_folder = params['plugin_folder']
default_plugins = params.get('default-plugins', False)
plugin_folder = params['plugin-folder']
default_plugins = not params.get('no-default-plugins', False)
sp = Senpy(default_plugins=default_plugins, plugin_folder=plugin_folder)
request = api.parse_call(params)
algos = request.parameters.get('algorithm', None)
algos = sp.get_plugins(request.parameters.get('algorithm', None))
if algos:
for algo in algos:
sp.activate_plugin(algo)
sp.activate_plugin(algo.name)
else:
sp.activate_all()
res = sp.analyse(request)
@@ -48,7 +50,7 @@ def main():
res = main_function(sys.argv[1:])
print(res.serialize())
except Error as err:
print(err.serialize())
print(err.serialize(), file=sys.stderr)
sys.exit(2)

View File

@@ -25,7 +25,11 @@ class Client(object):
def request(self, path=None, method='GET', **params):
url = '{}{}'.format(self.endpoint.rstrip('/'), path)
response = requests.request(method=method, url=url, params=params)
if method == 'POST':
response = requests.post(url=url, data=params)
else:
response = requests.request(method=method, url=url, params=params)
try:
resp = models.from_dict(response.json())
except Exception as ex:

View File

@@ -6,8 +6,8 @@ from future import standard_library
standard_library.install_aliases()
from . import plugins, api
from .plugins import Plugin, evaluate
from .models import Error, AggregatedEvaluation
from .plugins import AnalysisPlugin
from .blueprints import api_blueprint, demo_blueprint, ns_blueprint
from threading import Thread
@@ -17,7 +17,6 @@ import copy
import errno
import logging
from . import gsitk_compat
logger = logging.getLogger(__name__)
@@ -25,6 +24,7 @@ logger = logging.getLogger(__name__)
class Senpy(object):
""" Default Senpy extension for Flask """
def __init__(self,
app=None,
plugin_folder=".",
@@ -50,11 +50,12 @@ class Senpy(object):
self.add_folder('plugins', from_root=True)
else:
# Add only conversion plugins
self.add_folder(os.path.join('plugins', 'conversion'),
self.add_folder(os.path.join('plugins', 'postprocessing'),
from_root=True)
self.app = app
if app is not None:
self.init_app(app)
self._conversion_candidates = {}
def init_app(self, app):
""" Initialise a flask app to add plugins to its context """
@@ -75,31 +76,55 @@ class Senpy(object):
def add_plugin(self, plugin):
self._plugins[plugin.name.lower()] = plugin
self._conversion_candidates = {}
def delete_plugin(self, plugin):
del self._plugins[plugin.name.lower()]
def plugins(self, **kwargs):
def plugins(self, plugin_type=None, is_activated=True, **kwargs):
""" Return the plugins registered for a given application. Filtered by criteria """
return list(plugins.pfilter(self._plugins, **kwargs))
return sorted(plugins.pfilter(self._plugins,
plugin_type=plugin_type,
is_activated=is_activated,
**kwargs),
key=lambda x: x.id)
def get_plugin(self, name, default=None):
if name == 'default':
return self.default_plugin
plugin = name.lower()
if plugin in self._plugins:
return self._plugins[plugin]
elif name == 'conversion':
return None
results = self.plugins(id='endpoint:plugins/{}'.format(name))
if name.lower() in self._plugins:
return self._plugins[name.lower()]
if not results:
return Error(message="Plugin not found", status=404)
return results[0]
results = self.plugins(id='endpoint:plugins/{}'.format(name.lower()),
plugin_type=None)
if results:
return results[0]
@property
def analysis_plugins(self):
""" Return only the analysis plugins """
return self.plugins(plugin_type='analysisPlugin')
results = self.plugins(id=name,
plugin_type=None)
if results:
return results[0]
msg = ("Plugin not found: '{}'\n"
"Make sure it is ACTIVATED\n"
"Valid algorithms: {}").format(name,
self._plugins.keys())
raise Error(message=msg, status=404)
def get_plugins(self, name):
try:
name = name.split(',')
except AttributeError:
pass # Assume it is a tuple or a list
return tuple(self.get_plugin(n) for n in name)
def analysis_plugins(self, **kwargs):
""" Return only the analysis plugins that are active"""
candidates = self.plugins(**kwargs)
return list(plugins.pfilter(candidates, plugin_type=AnalysisPlugin))
def add_folder(self, folder, from_root=False):
""" Find plugins in this folder and add them to this instance """
@@ -114,73 +139,140 @@ class Senpy(object):
else:
raise AttributeError("Not a folder or does not exist: %s", folder)
def _get_plugins(self, request):
if not self.analysis_plugins:
raise Error(
status=404,
message=("No plugins found."
" Please install one."))
algos = request.parameters.get('algorithm', None)
if not algos:
if self.default_plugin:
algos = [self.default_plugin.name, ]
else:
raise Error(
status=404,
message="No default plugin found, and None provided")
plugins = list()
for algo in algos:
algo = algo.lower()
if algo not in self._plugins:
msg = ("The algorithm '{}' is not valid\n"
"Valid algorithms: {}").format(algo,
self._plugins.keys())
logger.debug(msg)
raise Error(
status=404,
message=msg)
plugins.append(self._plugins[algo])
return plugins
def _process_entries(self, entries, req, plugins):
def _process(self, req, pending, done=None):
"""
Recursively process the entries with the first plugin in the list, and pass the results
to the rest of the plugins.
"""
if not plugins:
for i in entries:
yield i
return
plugin = plugins[0]
specific_params = api.parse_extra_params(req, plugin)
req.analysis.append({'plugin': plugin,
'parameters': specific_params})
results = plugin.analyse_entries(entries, specific_params)
for i in self._process_entries(results, req, plugins[1:]):
yield i
done = done or []
if not pending:
return req
analysis = pending[0]
results = analysis.run(req)
results.activities.append(analysis)
done += analysis
return self._process(results, pending[1:], done)
def install_deps(self):
plugins.install_deps(*self.plugins())
logger.info('Installing dependencies')
# If a plugin is activated, its dependencies should already be installed
# Otherwise, it would've failed to activate.
plugins.install_deps(*self.plugins(is_activated=False))
def analyse(self, request):
def analyse(self, request, analyses=None):
"""
Main method that analyses a request, either from CLI or HTTP.
It takes a processed request, provided by the user, as returned
by api.parse_call().
"""
if not self.plugins():
raise Error(
status=404,
message=("No plugins found."
" Please install one."))
if analyses is None:
plugins = self.get_plugins(request.parameters['algorithm'])
analyses = api.parse_analyses(request.parameters, plugins)
logger.debug("analysing request: {}".format(request))
entries = request.entries
request.entries = []
plugins = self._get_plugins(request)
results = request
for i in self._process_entries(entries, results, plugins):
results.entries.append(i)
self.convert_emotions(results)
logger.debug("Returning analysis result: {}".format(results))
results.analysis = [i['plugin'].id for i in results.analysis]
results = self._process(request, analyses)
logger.debug("Got analysis result: {}".format(results))
results = self.postprocess(results, analyses)
logger.debug("Returning post-processed result: {}".format(results))
return results
def convert_emotions(self, resp, analyses):
"""
Conversion of all emotions in a response **in place**.
In addition to converting from one model to another, it has
to include the conversion plugin to the analysis list.
Needless to say, this is far from an elegant solution, but it works.
@todo refactor and clean up
"""
logger.debug("Converting emotions")
if 'parameters' not in resp:
logger.debug("NO PARAMETERS")
return resp
params = resp['parameters']
toModel = params.get('emotion-model', None)
if not toModel:
logger.debug("NO tomodel PARAMETER")
return resp
logger.debug('Asked for model: {}'.format(toModel))
output = params.get('conversion', None)
newentries = []
done = []
for i in resp.entries:
if output == "full":
newemotions = copy.deepcopy(i.emotions)
else:
newemotions = []
for j in i.emotions:
activity = j['prov:wasGeneratedBy']
act = resp.activity(activity)
if not act:
raise Error('Could not find the emotion model for {}'.format(activity))
fromModel = act.plugin['onyx:usesEmotionModel']
if toModel == fromModel:
continue
candidate = self._conversion_candidate(fromModel, toModel)
if not candidate:
e = Error(('No conversion plugin found for: '
'{} -> {}'.format(fromModel, toModel)),
status=404)
e.original_response = resp
e.parameters = params
raise e
analysis = candidate.activity(params)
done.append(analysis)
for k in candidate.convert(j, fromModel, toModel, params):
k.prov__wasGeneratedBy = analysis.id
if output == 'nested':
k.prov__wasDerivedFrom = j
newemotions.append(k)
i.emotions = newemotions
newentries.append(i)
resp.entries = newentries
return resp
def _conversion_candidate(self, fromModel, toModel):
if not self._conversion_candidates:
candidates = {}
for conv in self.plugins(plugin_type=plugins.EmotionConversion):
for pair in conv.onyx__doesConversion:
logging.debug(pair)
key = (pair['onyx:conversionFrom'], pair['onyx:conversionTo'])
if key not in candidates:
candidates[key] = []
candidates[key].append(conv)
self._conversion_candidates = candidates
key = (fromModel, toModel)
if key not in self._conversion_candidates:
return None
return self._conversion_candidates[key][0]
def postprocess(self, response, analyses):
'''
Transform the results from the analysis plugins.
It has some pre-defined post-processing like emotion conversion,
and it also allows plugins to auto-select themselves.
'''
response = self.convert_emotions(response, analyses)
for plug in self.plugins(plugin_type=plugins.PostProcessing):
if plug.check(response, response.activities):
activity = plug.activity(response.parameters)
response = plug.process(response, activity)
return response
def _get_datasets(self, request):
if not self.datasets:
raise Error(
@@ -191,8 +283,8 @@ class Senpy(object):
for dataset in datasets_name:
if dataset not in self.datasets:
logger.debug(("The dataset '{}' is not valid\n"
"Valid datasets: {}").format(dataset,
self.datasets.keys()))
"Valid datasets: {}").format(
dataset, self.datasets.keys()))
raise Error(
status=404,
message="The dataset '{}' is not valid".format(dataset))
@@ -218,78 +310,24 @@ class Senpy(object):
results = AggregatedEvaluation()
results.parameters = params
datasets = self._get_datasets(results)
plugins = self._get_plugins(results)
for eval in evaluate(plugins, datasets):
plugs = []
for plugname in params['algorithm']:
plugs = self.get_plugins(plugname)
for plug in plugs:
if not isinstance(plug, plugins.Evaluable):
raise Exception('Plugin {} can not be evaluated', plug.id)
for eval in plugins.evaluate(plugs, datasets):
results.evaluations.append(eval)
if 'with_parameters' not in results.parameters:
if 'with-parameters' not in results.parameters:
del results.parameters
logger.debug("Returning evaluation result: {}".format(results))
return results
def _conversion_candidates(self, fromModel, toModel):
candidates = self.plugins(plugin_type='emotionConversionPlugin')
for candidate in candidates:
for pair in candidate.onyx__doesConversion:
logging.debug(pair)
if pair['onyx:conversionFrom'] == fromModel \
and pair['onyx:conversionTo'] == toModel:
yield candidate
def convert_emotions(self, resp):
"""
Conversion of all emotions in a response **in place**.
In addition to converting from one model to another, it has
to include the conversion plugin to the analysis list.
Needless to say, this is far from an elegant solution, but it works.
@todo refactor and clean up
"""
plugins = [i['plugin'] for i in resp.analysis]
params = resp.parameters
toModel = params.get('emotionModel', None)
if not toModel:
return
logger.debug('Asked for model: {}'.format(toModel))
output = params.get('conversion', None)
candidates = {}
for plugin in plugins:
try:
fromModel = plugin.get('onyx:usesEmotionModel', None)
candidates[plugin.id] = next(self._conversion_candidates(fromModel, toModel))
logger.debug('Analysis plugin {} uses model: {}'.format(plugin.id, fromModel))
except StopIteration:
e = Error(('No conversion plugin found for: '
'{} -> {}'.format(fromModel, toModel)),
status=404)
e.original_response = resp
e.parameters = params
raise e
newentries = []
for i in resp.entries:
if output == "full":
newemotions = copy.deepcopy(i.emotions)
else:
newemotions = []
for j in i.emotions:
plugname = j['prov:wasGeneratedBy']
candidate = candidates[plugname]
resp.analysis.append({'plugin': candidate,
'parameters': params})
for k in candidate.convert(j, fromModel, toModel, params):
k.prov__wasGeneratedBy = candidate.id
if output == 'nested':
k.prov__wasDerivedFrom = j
newemotions.append(k)
i.emotions = newemotions
newentries.append(i)
resp.entries = newentries
@property
def default_plugin(self):
if not self._default or not self._default.is_activated:
candidates = self.plugins(plugin_type='analysisPlugin',
is_activated=True)
candidates = self.analysis_plugins()
if len(candidates) > 0:
self._default = candidates[0]
else:
@@ -299,7 +337,7 @@ class Senpy(object):
@default_plugin.setter
def default_plugin(self, value):
if isinstance(value, Plugin):
if isinstance(value, plugins.Plugin):
if not value.is_activated:
raise AttributeError('The default plugin has to be activated.')
self._default = value
@@ -324,22 +362,15 @@ class Senpy(object):
ps.append(self.deactivate_plugin(plug, sync=sync))
return ps
def _set_active(self, plugin, active=True, *args, **kwargs):
''' We're using a variable in the plugin itself to activate/deactivate plugins.\
Note that plugins may activate themselves by setting this variable.
'''
plugin.is_activated = active
def _activate(self, plugin):
success = False
with plugin._lock:
if plugin.is_activated:
return
plugin.activate()
plugin._activate()
msg = "Plugin activated: {}".format(plugin.name)
logger.info(msg)
success = True
self._set_active(plugin, success)
success = plugin.is_activated
return success
def activate_plugin(self, plugin_name, sync=True):
@@ -351,7 +382,8 @@ class Senpy(object):
logger.info("Activating plugin: {}".format(plugin.name))
if sync or not getattr(plugin, 'async', True) or getattr(plugin, 'sync', False):
if sync or not getattr(plugin, 'async', True) or getattr(
plugin, 'sync', False):
return self._activate(plugin)
else:
th = Thread(target=partial(self._activate, plugin))
@@ -362,7 +394,7 @@ class Senpy(object):
with plugin._lock:
if not plugin.is_activated:
return
plugin.deactivate()
plugin._deactivate()
logger.info("Plugin deactivated: {}".format(plugin.name))
def deactivate_plugin(self, plugin_name, sync=True):
@@ -372,12 +404,11 @@ class Senpy(object):
message="Plugin not found: {}".format(plugin_name), status=404)
plugin = self._plugins[plugin_name]
self._set_active(plugin, False)
if sync or not getattr(plugin, 'async', True) or not getattr(plugin, 'sync', False):
self._deactivate(plugin)
if sync or not getattr(plugin, 'async', True) or not getattr(
plugin, 'sync', False):
plugin._deactivate()
else:
th = Thread(target=partial(self._deactivate, plugin))
th = Thread(target=plugin.deactivate)
th.start()
return th

View File

@@ -16,16 +16,16 @@ def raise_exception(*args, **kwargs):
try:
gsitk_distro = get_distribution("gsitk")
GSITK_VERSION = parse_version(gsitk_distro.version)
GSITK_AVAILABLE = GSITK_VERSION > parse_version("0.1.9.1") # Earlier versions have a bug
except DistributionNotFound:
GSITK_AVAILABLE = False
GSITK_VERSION = ()
if GSITK_AVAILABLE:
from gsitk.datasets.datasets import DatasetManager
from gsitk.evaluation.evaluation import Evaluation as Eval
from gsitk.evaluation.evaluation import Evaluation as Eval # noqa: F401
from gsitk.evaluation.evaluation import EvalPipeline # noqa: F401
from sklearn.pipeline import Pipeline
modules = locals()
else:
GSITK_AVAILABLE = True
except (DistributionNotFound, ImportError) as err:
logger.debug('Error importing GSITK: {}'.format(err))
logger.warning(IMPORTMSG)
GSITK_AVAILABLE = False
GSITK_VERSION = ()
DatasetManager = Eval = Pipeline = raise_exception

View File

@@ -34,6 +34,7 @@ class BaseMeta(ABCMeta):
def __new__(mcs, name, bases, attrs, **kwargs):
register_afterwards = False
defaults = {}
aliases = {}
attrs = mcs.expand_with_schema(name, attrs)
if 'schema' in attrs:
@@ -41,17 +42,21 @@ class BaseMeta(ABCMeta):
for base in bases:
if hasattr(base, '_defaults'):
defaults.update(getattr(base, '_defaults'))
if hasattr(base, '_aliases'):
aliases.update(getattr(base, '_aliases'))
info, rest = mcs.split_attrs(attrs)
for i in list(info.keys()):
if isinstance(info[i], _Alias):
fget, fset, fdel = make_property(info[i].indict)
rest[i] = property(fget=fget, fset=fset, fdel=fdel)
aliases[i] = info[i].indict
if info[i].default is not None:
defaults[i] = info[i].default
else:
defaults[i] = info[i]
rest['_defaults'] = defaults
rest['_aliases'] = aliases
cls = super(BaseMeta, mcs).__new__(mcs, name, tuple(bases), rest)
@@ -85,7 +90,8 @@ class BaseMeta(ABCMeta):
schema = json.load(f)
resolver = jsonschema.RefResolver(schema_path, schema)
attrs['@type'] = "".join((name[0].lower(), name[1:]))
if '@type' not in attrs:
attrs['@type'] = name
attrs['_schema_file'] = schema_file
attrs['schema'] = schema
attrs['_validator'] = jsonschema.Draft4Validator(schema, resolver=resolver)
@@ -139,9 +145,11 @@ class BaseMeta(ABCMeta):
return temp
def make_property(key):
def make_property(key, default=None):
def fget(self):
if default:
return self.get(key, copy.copy(default))
return self[key]
def fdel(self):
@@ -167,7 +175,7 @@ class CustomDict(MutableMapping, object):
'''
_defaults = {}
_map_attr_key = {'id': '@id'}
_aliases = {'id': '@id'}
def __init__(self, *args, **kwargs):
super(CustomDict, self).__init__()
@@ -176,13 +184,13 @@ class CustomDict(MutableMapping, object):
for arg in args:
self.update(arg)
for k, v in kwargs.items():
self[self._attr_to_key(k)] = v
self[k] = v
return self
def serializable(self):
def serializable(self, **kwargs):
def ser_or_down(item):
if hasattr(item, 'serializable'):
return item.serializable()
return item.serializable(**kwargs)
elif isinstance(item, dict):
temp = dict()
for kp in item:
@@ -194,10 +202,9 @@ class CustomDict(MutableMapping, object):
else:
return item
return ser_or_down(self.as_dict())
return ser_or_down(self.as_dict(**kwargs))
def __getitem__(self, key):
key = self._key_to_attr(key)
return self.__dict__[key]
def __setitem__(self, key, value):
@@ -205,9 +212,23 @@ class CustomDict(MutableMapping, object):
key = self._key_to_attr(key)
return setattr(self, key, value)
def as_dict(self):
return {self._attr_to_key(k): v for k, v in self.__dict__.items()
if not self._internal_key(k)}
def __delitem__(self, key):
key = self._key_to_attr(key)
del self.__dict__[key]
def as_dict(self, verbose=True, aliases=False):
attrs = self.__dict__.keys()
if not verbose and hasattr(self, '_terse_keys'):
attrs = self._terse_keys + ['@type', '@id']
res = {k: getattr(self, k) for k in attrs
if not self._internal_key(k) and hasattr(self, k)}
if not aliases:
return res
for k, ok in self._aliases.items():
if ok in res:
res[k] = getattr(res, ok)
del res[ok]
return res
def __iter__(self):
return (k for k in self.__dict__ if not self._internal_key(k))
@@ -215,43 +236,52 @@ class CustomDict(MutableMapping, object):
def __len__(self):
return len(self.__dict__)
def __delitem__(self, key):
del self.__dict__[key]
def update(self, other):
for k, v in other.items():
self[k] = v
def _attr_to_key(self, key):
key = key.replace("__", ":", 1)
key = self._map_attr_key.get(key, key)
key = self._aliases.get(key, key)
return key
def _key_to_attr(self, key):
if self._internal_key(key):
return key
key = key.replace(":", "__", 1)
if key in self._aliases:
key = self._aliases[key]
else:
key = key.replace(":", "__", 1)
return key
def __getattr__(self, key):
try:
return self.__dict__[self._attr_to_key(key)]
except KeyError:
raise AttributeError
nkey = self._attr_to_key(key)
if nkey in self.__dict__:
return self.__dict__[nkey]
elif nkey == key:
raise AttributeError("Key not found: {}".format(key))
return getattr(self, nkey)
def __setattr__(self, key, value):
super(CustomDict, self).__setattr__(self._attr_to_key(key), value)
def __delattr__(self, key):
super(CustomDict, self).__delattr__(self._attr_to_key(key))
@staticmethod
def _internal_key(key):
return key[0] == '_'
def __str__(self):
return str(self.serializable())
return json.dumps(self.serializable(), sort_keys=True, indent=4)
def __repr__(self):
return str(self.serializable())
return json.dumps(self.serializable(), sort_keys=True, indent=4)
_Alias = namedtuple('Alias', 'indict')
_Alias = namedtuple('Alias', ['indict', 'default'])
def alias(key):
return _Alias(key)
def alias(key, default=None):
return _Alias(key, default)

View File

@@ -12,6 +12,8 @@ standard_library.install_aliases()
from future.utils import with_metaclass
from past.builtins import basestring
from jinja2 import Environment, BaseLoader
import time
import copy
import json
@@ -21,6 +23,7 @@ from flask import Response as FlaskResponse
from pyld import jsonld
import logging
import jmespath
logging.getLogger('rdflib').setLevel(logging.WARN)
logger = logging.getLogger(__name__)
@@ -31,8 +34,9 @@ from rdflib import Graph
from .meta import BaseMeta, CustomDict, alias
DEFINITIONS_FILE = 'definitions.json'
CONTEXT_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'schemas', 'context.jsonld')
CONTEXT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'schemas',
'context.jsonld')
def get_schema_path(schema_file, absolute=False):
@@ -121,24 +125,21 @@ class BaseModel(with_metaclass(BaseMeta, CustomDict)):
'''
schema_file = DEFINITIONS_FILE
# schema_file = DEFINITIONS_FILE
_context = base_context["@context"]
def __init__(self, *args, **kwargs):
auto_id = kwargs.pop('_auto_id', True)
auto_id = kwargs.pop('_auto_id', False)
super(BaseModel, self).__init__(*args, **kwargs)
if auto_id:
self.id
if '@type' not in self:
logger.warn('Created an instance of an unknown model')
@property
def id(self):
if '@id' not in self:
self['@id'] = '_:{}_{}'.format(type(self).__name__, time.time())
self['@id'] = 'prefix:{}_{}'.format(type(self).__name__, time.time())
return self['@id']
@id.setter
@@ -174,24 +175,33 @@ class BaseModel(with_metaclass(BaseMeta, CustomDict)):
headers=headers,
mimetype=mimetype)
def serialize(self, format='json-ld', with_mime=False, **kwargs):
js = self.jsonld(**kwargs)
content = json.dumps(js, indent=2, sort_keys=True)
if format == 'json-ld':
def serialize(self, format='json-ld', with_mime=False,
template=None, prefix=None, fields=None, **kwargs):
js = self.jsonld(prefix=prefix, **kwargs)
if template is not None:
rtemplate = Environment(loader=BaseLoader).from_string(template)
content = rtemplate.render(**self)
mimetype = 'text'
elif fields is not None:
# Emulate field selection by constructing a template
content = json.dumps(jmespath.search(fields, js))
mimetype = 'text'
elif format == 'json-ld':
content = json.dumps(js, indent=2, sort_keys=True)
mimetype = "application/json"
elif format in ['turtle', 'ntriples']:
content = json.dumps(js, indent=2, sort_keys=True)
logger.debug(js)
base = kwargs.get('prefix')
context = [self._context, {'prefix': prefix, '@base': prefix}]
g = Graph().parse(
data=content,
format='json-ld',
base=base,
context=[self._context,
{'@base': base}])
prefix=prefix,
context=context)
logger.debug(
'Parsing with prefix: {}'.format(kwargs.get('prefix')))
content = g.serialize(format=format,
base=base).decode('utf-8')
prefix=prefix).decode('utf-8')
mimetype = 'text/{}'.format(format)
else:
raise Error('Unknown outformat: {}'.format(format))
@@ -204,14 +214,25 @@ class BaseModel(with_metaclass(BaseMeta, CustomDict)):
with_context=False,
context_uri=None,
prefix=None,
expanded=False):
base=None,
expanded=False,
**kwargs):
result = self.serializable()
result = self.serializable(**kwargs)
if expanded:
result = jsonld.expand(
result, options={'base': prefix,
'expandContext': self._context})[0]
result,
options={
'expandContext': [
self._context,
{
'prefix': prefix,
'endpoint': prefix
}
]
}
)[0]
if not with_context:
try:
del result['@context']
@@ -239,7 +260,7 @@ def subtypes():
return BaseMeta._subtypes
def from_dict(indict, cls=None):
def from_dict(indict, cls=None, warn=True):
if not cls:
target = indict.get('@type', None)
cls = BaseModel
@@ -247,6 +268,10 @@ def from_dict(indict, cls=None):
cls = subtypes()[target]
except KeyError:
pass
if cls == BaseModel and warn:
logger.warning('Created an instance of an unknown model')
outdict = dict()
for k, v in indict.items():
if k == '@context':
@@ -266,22 +291,24 @@ def from_string(string, **kwargs):
return from_dict(json.loads(string), **kwargs)
def from_json(injson):
def from_json(injson, **kwargs):
indict = json.loads(injson)
return from_dict(indict)
return from_dict(indict, **kwargs)
class Entry(BaseModel):
schema = 'entry'
text = alias('nif:isString')
sentiments = alias('marl:hasOpinion', [])
emotions = alias('onyx:hasEmotionSet', [])
class Sentiment(BaseModel):
schema = 'sentiment'
polarity = alias('marl:hasPolarity')
polarityValue = alias('marl:hasPolarityValue')
polarityValue = alias('marl:polarityValue')
class Error(BaseModel, Exception):
@@ -301,7 +328,173 @@ class Error(BaseModel, Exception):
return Exception.__hash__(self)
# Add the remaining schemas programmatically
class AggregatedEvaluation(BaseModel):
schema = 'aggregatedEvaluation'
evaluations = alias('senpy:evaluations', [])
class Dataset(BaseModel):
schema = 'dataset'
class Datasets(BaseModel):
schema = 'datasets'
datasets = []
class Emotion(BaseModel):
schema = 'emotion'
class EmotionConversion(BaseModel):
schema = 'emotionConversion'
class EmotionConversionPlugin(BaseModel):
schema = 'emotionConversionPlugin'
class EmotionAnalysis(BaseModel):
schema = 'emotionAnalysis'
class EmotionModel(BaseModel):
schema = 'emotionModel'
onyx__hasEmotionCategory = []
class EmotionPlugin(BaseModel):
schema = 'emotionPlugin'
class EmotionSet(BaseModel):
schema = 'emotionSet'
onyx__hasEmotion = []
class Evaluation(BaseModel):
schema = 'evaluation'
metrics = alias('senpy:metrics', [])
class Entity(BaseModel):
schema = 'entity'
class Help(BaseModel):
schema = 'help'
class Metric(BaseModel):
schema = 'metric'
class Parameter(BaseModel):
schema = 'parameter'
class Plugins(BaseModel):
schema = 'plugins'
plugins = []
class Response(BaseModel):
schema = 'response'
class Results(BaseModel):
schema = 'results'
_terse_keys = ['entries', ]
activities = []
entries = []
def activity(self, id):
for i in self.activities:
if i.id == id:
return i
return None
class SentimentPlugin(BaseModel):
schema = 'sentimentPlugin'
class Suggestion(BaseModel):
schema = 'suggestion'
class Topic(BaseModel):
schema = 'topic'
class Analysis(BaseModel):
'''
A prov:Activity that results of executing a Plugin on an entry with a set of
parameters.
'''
schema = 'analysis'
parameters = alias('prov:used', [])
algorithm = alias('prov:wasAssociatedWith', [])
@property
def params(self):
outdict = {}
outdict['algorithm'] = self.algorithm
for param in self.parameters:
outdict[param['name']] = param['value']
return outdict
@params.setter
def params(self, value):
for k, v in value.items():
for param in self.parameters:
if param.name == k:
param.value = v
break
else:
self.parameters.append(Parameter(name=k, value=v)) # noqa: F821
def param(self, key, default=None):
for param in self.parameters:
if param['name'] == key:
return param['value']
return default
@property
def plugin(self):
return self._plugin
@plugin.setter
def plugin(self, value):
self._plugin = value
self['prov:wasAssociatedWith'] = value.id
def run(self, request):
return self.plugin.process(request, self)
class Plugin(BaseModel):
schema = 'plugin'
extra_params = {}
def activity(self, parameters=None):
'''Generate an Analysis (prov:Activity) from this plugin and the given parameters'''
a = Analysis()
a.plugin = self
if parameters:
a.params = parameters
return a
# More classes could be added programmatically
def _class_from_schema(name, schema=None, schema_file=None, base_classes=None):
base_classes = base_classes or []
@@ -321,31 +514,3 @@ def _add_class_from_schema(*args, **kwargs):
generatedClass = _class_from_schema(*args, **kwargs)
globals()[generatedClass.__name__] = generatedClass
del generatedClass
for i in [
'aggregatedEvaluation',
'analysis',
'dataset',
'datasets',
'emotion',
'emotionConversion',
'emotionConversionPlugin',
'emotionAnalysis',
'emotionModel',
'emotionPlugin',
'emotionSet',
'evaluation',
'entity',
'help',
'metric',
'plugin',
'plugins',
'response',
'results',
'sentimentPlugin',
'suggestion',
'topic',
]:
_add_class_from_schema(i)

View File

@@ -1,7 +1,8 @@
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
from future import standard_library
standard_library.install_aliases()
from future.utils import with_metaclass
from functools import partial
@@ -10,7 +11,6 @@ import os
import re
import pickle
import logging
import copy
import pprint
import inspect
@@ -19,14 +19,18 @@ import subprocess
import importlib
import yaml
import threading
import nltk
import multiprocessing
import pkg_resources
from nltk import download
from textwrap import dedent
from sklearn.base import TransformerMixin, BaseEstimator
from itertools import product
from .. import models, utils
from .. import api
from .. import gsitk_compat
from .. import testing
logger = logging.getLogger(__name__)
@@ -34,28 +38,33 @@ class PluginMeta(models.BaseMeta):
_classes = {}
def __new__(mcs, name, bases, attrs, **kwargs):
plugin_type = []
if hasattr(bases[0], 'plugin_type'):
plugin_type += bases[0].plugin_type
plugin_type.append(name)
alias = attrs.get('name', name)
attrs['plugin_type'] = plugin_type
plugin_type = set()
for base in bases:
if hasattr(base, '_plugin_type'):
plugin_type |= base._plugin_type
plugin_type.add(name)
alias = attrs.get('name', name).lower()
attrs['_plugin_type'] = plugin_type
logger.debug('Adding new plugin class', name, bases, attrs, plugin_type)
attrs['name'] = alias
if 'description' not in attrs:
doc = attrs.get('__doc__', None)
if doc:
attrs['description'] = doc
attrs['description'] = dedent(doc)
else:
logger.warn(('Plugin {} does not have a description. '
'Please, add a short summary to help other developers').format(name))
logger.warning(
('Plugin {} does not have a description. '
'Please, add a short summary to help other developers'
).format(name))
cls = super(PluginMeta, mcs).__new__(mcs, name, bases, attrs)
if alias in mcs._classes:
if os.environ.get('SENPY_TESTING', ""):
raise Exception(('The type of plugin {} already exists. '
'Please, choose a different name').format(name))
raise Exception(
('The type of plugin {} already exists. '
'Please, choose a different name').format(name))
else:
logger.warn('Overloading plugin class: {}'.format(alias))
logger.warning('Overloading plugin class: {}'.format(alias))
mcs._classes[alias] = cls
return cls
@@ -77,6 +86,9 @@ class Plugin(with_metaclass(PluginMeta, models.Plugin)):
'''
_terse_keys = ['name', '@id', '@type', 'author', 'description',
'extra_params', 'is_activated', 'url', 'version']
def __init__(self, info=None, data_folder=None, **kwargs):
"""
Provides a canonical name for plugins and serves as base for other
@@ -87,10 +99,12 @@ class Plugin(with_metaclass(PluginMeta, models.Plugin)):
if info:
self.update(info)
self.validate()
self.id = 'endpoint:plugins/{}_{}'.format(self['name'], self['version'])
self.id = 'endpoint:plugins/{}_{}'.format(self['name'],
self['version'])
self.is_activated = False
self._lock = threading.Lock()
self._directory = os.path.abspath(os.path.dirname(inspect.getfile(self.__class__)))
self._directory = os.path.abspath(
os.path.dirname(inspect.getfile(self.__class__)))
data_folder = data_folder or os.getcwd()
subdir = os.path.join(data_folder, self.name)
@@ -118,33 +132,84 @@ class Plugin(with_metaclass(PluginMeta, models.Plugin)):
if x not in self:
missing.append(x)
if missing:
raise models.Error('Missing configuration parameters: {}'.format(missing))
raise models.Error(
'Missing configuration parameters: {}'.format(missing))
def get_folder(self):
return os.path.dirname(inspect.getfile(self.__class__))
def _activate(self):
self.activate()
self.is_activated = True
def _deactivate(self):
self.is_activated = False
self.deactivate()
def activate(self):
pass
def deactivate(self):
pass
def process(self, request, activity, **kwargs):
"""
An implemented plugin should override this method.
Here, we assume that a process_entries method exists.
"""
newentries = list(
self.process_entries(request.entries, activity))
request.entries = newentries
return request
def process_entries(self, entries, activity):
for entry in entries:
self.log.debug('Processing entry with plugin {}: {}'.format(
self, entry))
results = self.process_entry(entry, activity)
if inspect.isgenerator(results):
for result in results:
yield result
else:
yield results
def process_entry(self, entry, activity):
"""
This base method is here to adapt plugins which only
implement the *process* function.
Note that this method may yield an annotated entry or a list of
entries (e.g. in a tokenizer)
"""
raise NotImplementedError(
'You need to implement process, process_entries or process_entry in your plugin'
)
def test(self, test_cases=None):
if not test_cases:
if not hasattr(self, 'test_cases'):
raise AttributeError(('Plugin {} [{}] does not have any defined '
'test cases').format(self.id,
inspect.getfile(self.__class__)))
raise AttributeError(
('Plugin {} [{}] does not have any defined '
'test cases').format(self.id,
inspect.getfile(self.__class__)))
test_cases = self.test_cases
for case in test_cases:
try:
fmt = 'case: {}'.format(case.get('name', case))
if 'name' in case:
self.log.info('Test case: {}'.format(case['name']))
self.log.debug('Test case:\n\t{}'.format(
pprint.pformat(fmt)))
self.test_case(case)
self.log.debug('Test case passed:\n{}'.format(pprint.pformat(case)))
except Exception as ex:
self.log.warn('Test case failed:\n{}'.format(pprint.pformat(case)))
self.log.warning('Test case failed:\n{}'.format(
pprint.pformat(case)))
raise
def test_case(self, case, mock=testing.MOCK_REQUESTS):
if 'entry' not in case and 'input' in case:
entry = models.Entry(_auto_id=False)
entry.nif__isString = case['input']
case['entry'] = entry
entry = models.Entry(case['entry'])
given_parameters = case.get('params', case.get('parameters', {}))
expected = case.get('expected', None)
@@ -152,21 +217,27 @@ class Plugin(with_metaclass(PluginMeta, models.Plugin)):
responses = case.get('responses', [])
try:
params = api.parse_params(given_parameters, self.extra_params)
request = models.Response()
parameters = api.parse_params(given_parameters,
self.extra_params)
request.entries = [
entry,
]
method = partial(self.analyse_entries, [entry, ], params)
activity = self.activity(parameters)
method = partial(self.process, request, activity)
if mock:
res = list(method())
res = method()
else:
with testing.patch_all_requests(responses):
res = list(method())
res = method()
if not isinstance(expected, list):
expected = [expected]
utils.check_template(res, expected)
for r in res:
r.validate()
utils.check_template(res.entries, expected)
res.validate()
except models.Error:
if should_fail:
return
@@ -180,11 +251,15 @@ class Plugin(with_metaclass(PluginMeta, models.Plugin)):
return alternative
raise IOError('File does not exist: {}'.format(fname))
def path(self, fpath):
if not os.path.isabs(fpath):
fpath = os.path.join(self.data_folder, fpath)
return fpath
def open(self, fpath, mode='r'):
if 'w' in mode:
# When writing, only use absolute paths or data_folder
if not os.path.isabs(fpath):
fpath = os.path.join(self.data_folder, fpath)
fpath = self.path(fpath)
else:
fpath = self.find_file(fpath)
@@ -198,48 +273,41 @@ class Plugin(with_metaclass(PluginMeta, models.Plugin)):
SenpyPlugin = Plugin
class Analysis(Plugin):
class Analyser(Plugin):
'''
A subclass of Plugin that analyses text and provides an annotation.
'''
def analyse(self, *args, **kwargs):
raise NotImplementedError(
'Your plugin should implement either analyse or analyse_entry')
# Deprecated
def analyse(self, request, activity):
return super(Analyser, self).process(request, activity)
def analyse_entry(self, entry, parameters):
""" An implemented plugin should override this method.
This base method is here to adapt old style plugins which only
implement the *analyse* function.
Note that this method may yield an annotated entry or a list of
entries (e.g. in a tokenizer)
"""
text = entry['nif:isString']
params = copy.copy(parameters)
params['input'] = text
results = self.analyse(**params)
for i in results.entries:
# Deprecated
def analyse_entries(self, entries, activity):
for i in super(Analyser, self).process_entries(entries, activity):
yield i
def analyse_entries(self, entries, parameters):
for entry in entries:
self.log.debug('Analysing entry with plugin {}: {}'.format(self, entry))
results = self.analyse_entry(entry, parameters)
if inspect.isgenerator(results):
for result in results:
yield result
else:
yield results
def process(self, request, activity, **kwargs):
return self.analyse(request, activity)
def test_case(self, case):
if 'entry' not in case and 'input' in case:
entry = models.Entry(_auto_id=False)
entry.nif__isString = case['input']
case['entry'] = entry
super(Analysis, self).test_case(case)
def process_entries(self, entries, activity):
for i in self.analyse_entries(entries, activity):
yield i
def process_entry(self, entry, activity, **kwargs):
if hasattr(self, 'analyse_entry'):
for i in self.analyse_entry(entry, activity):
yield i
else:
super(Analyser, self).process_entry(entry, activity, **kwargs)
AnalysisPlugin = Analysis
AnalysisPlugin = Analyser
class Transformation(AnalysisPlugin):
'''Empty'''
pass
class Conversion(Plugin):
@@ -247,106 +315,183 @@ class Conversion(Plugin):
A subclass of Plugins that convert between different annotation models.
e.g. a conversion of emotion models, or normalization of sentiment values.
'''
pass
def process(self, response, parameters, plugins=None, **kwargs):
plugins = plugins or []
newentries = []
for entry in response.entries:
newentries.append(
self.convert_entry(entry, parameters, plugins))
response.entries = newentries
return response
def convert_entry(self, entry, parameters, conversions_applied):
raise NotImplementedError(
'You should implement a way to convert each entry, or a custom process method'
)
ConversionPlugin = Conversion
class SentimentPlugin(Analysis, models.SentimentPlugin):
class Evaluable(Plugin):
'''
Common class for plugins that can be evaluated with GSITK.
They should implement the methods below.
'''
def as_pipe(self):
raise Exception('Implement the as_pipe function')
def evaluate_func(self, X, activity=None):
raise Exception('Implement the evaluate_func function')
class SentimentPlugin(Analyser, Evaluable, models.SentimentPlugin):
'''
Sentiment plugins provide sentiment annotation (using Marl)
'''
minPolarityValue = 0
maxPolarityValue = 1
_terse_keys = Analyser._terse_keys + ['minPolarityValue', 'maxPolarityValue']
def test_case(self, case):
if 'polarity' in case:
expected = case.get('expected', {})
s = models.Sentiment(_auto_id=False)
s.marl__hasPolarity = case['polarity']
if 'sentiments' not in expected:
expected['sentiments'] = []
expected['sentiments'].append(s)
if 'marl:hasOpinion' not in expected:
expected['marl:hasOpinion'] = []
expected['marl:hasOpinion'].append(s)
case['expected'] = expected
super(SentimentPlugin, self).test_case(case)
def normalize(self, value, minValue, maxValue):
nv = minValue + (value - self.minPolarityValue) * (
self.maxPolarityValue - self.minPolarityValue) / (maxValue - minValue)
return nv
class EmotionPlugin(Analysis, models.EmotionPlugin):
def as_pipe(self):
pipe = gsitk_compat.Pipeline([('senpy-plugin', ScikitWrapper(self))])
pipe.name = self.id
return pipe
def evaluate_func(self, X, activity=None):
if activity is None:
parameters = api.parse_params({},
self.extra_params)
activity = self.activity(parameters)
entries = []
for feat in X:
if isinstance(feat, list):
feat = ' '.join(feat)
entries.append(models.Entry(nif__isString=feat))
labels = []
for e in self.process_entries(entries, activity):
sent = e.sentiments[0].polarity
label = -1
if sent == 'marl:Positive':
label = 1
elif sent == 'marl:Negative':
label = -1
labels.append(label)
return labels
class EmotionPlugin(Analyser, models.EmotionPlugin):
'''
Emotion plugins provide emotion annotation (using Onyx)
'''
minEmotionValue = 0
maxEmotionValue = 1
_terse_keys = Analyser._terse_keys + ['minEmotionValue', 'maxEmotionValue']
class EmotionConversion(Conversion):
'''
A subclass of Conversion that converts emotion annotations using different models
'''
pass
def can_convert(self, fromModel, toModel):
'''
Whether this plugin can convert from fromModel to toModel.
If fromModel is None, it is interpreted as "any Model"
'''
for pair in self.onyx__doesConversion:
if (pair['onyx:conversionTo'] == toModel) and \
((fromModel is None) or (pair['onyx:conversionFrom'] == fromModel)):
return True
return False
EmotionConversionPlugin = EmotionConversion
class Box(AnalysisPlugin):
class PostProcessing(Plugin):
'''
A plugin that converts the output of other plugins (post-processing).
'''
def check(self, request, plugins):
'''Should this plugin be run for this request?'''
return False
class Box(Analyser):
'''
Black box plugins delegate analysis to a function.
The flow is like so:
The flow is like this:
.. code-block::
entry --> input() --> predict_one() --> output() --> entry'
entries --> to_features() --> predict_many() --> to_entry() --> entries'
In other words: their ``input`` method convers a query (entry and a set of parameters) into
the input to the box method. The ``output`` method convers the results given by the box into
an entry that senpy can handle.
In other words: their ``to_features`` method converts a query (entry and a set of parameters)
into the input to the `predict_one` method, which only uses an array of features.
The ``to_entry`` method converts the results given by the box into an entry that senpy can
handle.
'''
def input(self, entry, params=None):
def to_features(self, entry, activity=None):
'''Transforms a query (entry+param) into an input for the black box'''
return entry
def output(self, output, entry=None, params=None):
def to_entry(self, features, entry=None, activity=None):
'''Transforms the results of the black box into an entry'''
return output
return entry
def predict_one(self, input):
raise NotImplementedError('You should define the behavior of this plugin')
def predict_one(self, features, activity=None):
raise NotImplementedError(
'You should define the behavior of this plugin')
def analyse_entries(self, entries, params):
def predict_many(self, features, activity=None):
results = []
for feat in features:
results.append(self.predict_one(features=feat, activity=activity))
return results
def process_entry(self, entry, activity):
for i in self.process_entries([entry], activity):
yield i
def process_entries(self, entries, activity):
features = []
for entry in entries:
input = self.input(entry=entry, params=params)
results = self.predict_one(input=input)
yield self.output(output=results, entry=entry, params=params)
features.append(self.to_features(entry=entry, activity=activity))
results = self.predict_many(features=features, activity=activity)
def fit(self, X=None, y=None):
return self
def transform(self, X):
return [self.predict_one(x) for x in X]
def predict(self, X):
return self.transform(X)
def fit_transform(self, X, y):
self.fit(X, y)
return self.transform(X)
def as_pipe(self):
pipe = gsitk_compat.Pipeline([('plugin', self)])
pipe.name = self.name
return pipe
for (result, entry) in zip(results, entries):
yield self.to_entry(features=result, entry=entry, activity=activity)
class TextBox(Box):
'''A black box plugin that takes only text as input'''
def input(self, entry, params):
entry = super(TextBox, self).input(entry, params)
return entry['nif:isString']
def to_features(self, entry, activity):
return [entry['nif:isString']]
class SentimentBox(TextBox, SentimentPlugin):
@@ -354,17 +499,35 @@ class SentimentBox(TextBox, SentimentPlugin):
A box plugin where the output is only a polarity label or a tuple (polarity, polarityValue)
'''
def output(self, output, entry, **kwargs):
s = models.Sentiment()
try:
label, value = output
except ValueError:
label, value = output, None
s.prov(self)
s.polarity = label
if value is not None:
s.polarityValue = value
entry.sentiments.append(s)
classes = ['marl:Positive', 'marl:Neutral', 'marl:Negative']
binary = True
def to_entry(self, features, entry, activity, **kwargs):
if len(features) != len(self.classes):
raise models.Error('The number of features ({}) does not match the classes '
'(plugin.classes ({})'.format(len(features), len(self.classes)))
minValue = activity.param('marl:minPolarityValue', 0)
maxValue = activity.param('marl:minPolarityValue', 1)
activity['marl:minPolarityValue'] = minValue
activity['marl:maxPolarityValue'] = maxValue
for k, v in zip(self.classes, features):
s = models.Sentiment()
if self.binary:
if not v: # Carry on if the value is 0
continue
s['marl:hasPolarity'] = k
else:
if v is not None:
s['marl:hasPolarity'] = k
nv = self.normalize(v, minValue, maxValue)
s['marl:polarityValue'] = nv
s.prov(activity)
entry.sentiments.append(s)
return entry
@@ -373,19 +536,27 @@ class EmotionBox(TextBox, EmotionPlugin):
A box plugin where the output is only an a tuple of emotion labels
'''
def output(self, output, entry, **kwargs):
if not isinstance(output, list):
output = [output]
EMOTIONS = []
with_intensity = True
def to_entry(self, features, entry, activity, **kwargs):
s = models.EmotionSet()
entry.emotions.append(s)
for label in output:
if len(features) != len(self.EMOTIONS):
raise Exception(('The number of classes in the plugin and the number of features '
'do not match'))
for label, intensity in zip(self.EMOTIONS, features):
e = models.Emotion(onyx__hasEmotionCategory=label)
s.append(e)
if self.with_intensity:
e.onyx__hasEmotionIntensity = intensity
s.onyx__hasEmotion.append(e)
s.prov(activity)
entry.emotions.append(s)
return entry
class MappingMixin(object):
@property
def mappings(self):
return self._mappings
@@ -394,12 +565,15 @@ class MappingMixin(object):
def mappings(self, value):
self._mappings = value
def output(self, output, entry, params):
output = self.mappings.get(output,
self.mappings.get('default', output))
return super(MappingMixin, self).output(output=output,
entry=entry,
params=params)
def to_entry(self, features, entry, activity):
features = list(features)
for i, feat in enumerate(features):
features[i] = self.mappings.get(feat,
self.mappings.get('default',
feat))
return super(MappingMixin, self).to_entry(features=features,
entry=entry,
activity=activity)
class ShelfMixin(object):
@@ -412,7 +586,8 @@ class ShelfMixin(object):
with self.open(self.shelf_file, 'rb') as p:
self._sh = pickle.load(p)
except (IndexError, EOFError, pickle.UnpicklingError):
self.log.warning('Corrupted shelf file: {}'.format(self.shelf_file))
self.log.warning('Corrupted shelf file: {}'.format(
self.shelf_file))
if not self.get('force_shelf', False):
raise
return self._sh
@@ -445,7 +620,7 @@ class ShelfMixin(object):
pickle.dump(self._sh, f)
def pfilter(plugins, plugin_type=Analysis, **kwargs):
def pfilter(plugins, plugin_type=Analyser, **kwargs):
""" Filter plugins by different criteria """
if isinstance(plugins, models.Plugins):
plugins = plugins.plugins
@@ -460,19 +635,20 @@ def pfilter(plugins, plugin_type=Analysis, **kwargs):
plugin_type = plugin_type[0].upper() + plugin_type[1:]
pclass = globals()[plugin_type]
logger.debug('Class: {}'.format(pclass))
candidates = filter(lambda x: isinstance(x, pclass),
plugins)
candidates = filter(lambda x: isinstance(x, pclass), plugins)
except KeyError:
raise models.Error('{} is not a valid type'.format(plugin_type))
else:
candidates = plugins
if 'name' in kwargs:
kwargs['name'] = kwargs['name'].lower()
logger.debug(candidates)
def matches(plug):
res = all(getattr(plug, k, None) == v for (k, v) in kwargs.items())
logger.debug(
"matching {} with {}: {}".format(plug.name, kwargs, res))
logger.debug("matching {} with {}: {}".format(plug.name, kwargs, res))
return res
if kwargs:
@@ -491,32 +667,49 @@ def load_module(name, root=None):
def _log_subprocess_output(process):
for line in iter(process.stdout.readline, b''):
logger.info('%r', line)
logger.info('%s', line.decode())
for line in iter(process.stderr.readline, b''):
logger.error('%r', line)
logger.error('%s', line.decode())
def missing_requirements(reqs):
queue = []
pool = multiprocessing.Pool(4)
for req in reqs:
res = pool.apply_async(pkg_resources.get_distribution, (req,))
queue.append((req, res))
missing = []
for req, job in queue:
try:
job.get(1)
except Exception:
missing.append(req)
return missing
def install_deps(*plugins):
installed = False
nltk_resources = set()
requirements = []
for info in plugins:
requirements = info.get('requirements', [])
if requirements:
pip_args = [sys.executable, '-m', 'pip', 'install']
for req in requirements:
pip_args.append(req)
logger.info('Installing requirements: ' + str(requirements))
process = subprocess.Popen(pip_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_log_subprocess_output(process)
exitcode = process.wait()
installed = True
if exitcode != 0:
raise models.Error("Dependencies not properly installed: {}".format(pip_args))
requirements += missing_requirements(requirements)
nltk_resources |= set(info.get('nltk_resources', []))
installed |= nltk.download(list(nltk_resources))
if requirements:
logger.info('Installing requirements: ' + str(requirements))
pip_args = [sys.executable, '-m', 'pip', 'install']
for req in requirements:
pip_args.append(req)
process = subprocess.Popen(
pip_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_log_subprocess_output(process)
exitcode = process.wait()
installed = True
if exitcode != 0:
raise models.Error(
"Dependencies not properly installed: {}".format(pip_args))
installed |= download(list(nltk_resources))
return installed
@@ -556,7 +749,7 @@ def from_folder(folders, loader=from_path, **kwargs):
def from_info(info, root=None, install_on_fail=True, **kwargs):
if any(x not in info for x in ('module',)):
if any(x not in info for x in ('module', )):
raise ValueError('Plugin info is not valid: {}'.format(info))
module = info["module"]
@@ -574,7 +767,7 @@ def from_info(info, root=None, install_on_fail=True, **kwargs):
def parse_plugin_info(fpath):
logger.debug("Parsing plugin info: {}".format(fpath))
with open(fpath, 'r') as f:
info = yaml.load(f)
info = yaml.load(f, Loader=yaml.FullLoader)
info['_path'] = fpath
return info
@@ -593,7 +786,8 @@ def one_from_module(module, root, info, **kwargs):
if '@type' in info:
cls = PluginMeta.from_type(info['@type'])
return cls(info=info, **kwargs)
instance = next(from_module(module=module, root=root, info=info, **kwargs), None)
instance = next(
from_module(module=module, root=root, info=info, **kwargs), None)
if not instance:
raise Exception("No valid plugin for: {}".format(module))
return instance
@@ -617,7 +811,8 @@ def _instances_in_module(module):
def _from_module_name(module, root, info=None, **kwargs):
module = load_module(module, root)
for plugin in _from_loaded_module(module=module, root=root, info=info, **kwargs):
for plugin in _from_loaded_module(
module=module, root=root, info=info, **kwargs):
yield plugin
@@ -628,13 +823,34 @@ def _from_loaded_module(module, info=None, **kwargs):
yield instance
cached_evs = {}
def evaluate(plugins, datasets, **kwargs):
ev = gsitk_compat.Eval(tuples=None,
datasets=datasets,
pipelines=[plugin.as_pipe() for plugin in plugins])
ev.evaluate()
results = ev.results
evaluations = evaluations_to_JSONLD(results, **kwargs)
for plug in plugins:
if not hasattr(plug, 'as_pipe'):
raise models.Error('Plugin {} cannot be evaluated'.format(plug.name))
tuples = list(product(plugins, datasets))
missing = []
for (p, d) in tuples:
if (p.id, d) not in cached_evs:
pipe = p.as_pipe()
missing.append(gsitk_compat.EvalPipeline(pipe, d))
if missing:
ev = gsitk_compat.Eval(tuples=missing, datasets=datasets)
ev.evaluate()
results = ev.results
new_ev = evaluations_to_JSONLD(results, **kwargs)
for ev in new_ev:
dataset = ev.evaluatesOn
model = ev.evaluates.rstrip('__' + dataset)
cached_evs[(model, dataset)] = ev
evaluations = []
print(tuples, 'Cached evs', cached_evs)
for (p, d) in tuples:
print('Adding', d, p)
evaluations.append(cached_evs[(p.id, d)])
return evaluations
@@ -647,7 +863,7 @@ def evaluations_to_JSONLD(results, flatten=False):
metric_names = ['accuracy', 'precision_macro', 'recall_macro',
'f1_macro', 'f1_weighted', 'f1_micro', 'f1_macro']
for index, row in results.iterrows():
for index, row in results.fillna('Not Available').iterrows():
evaluation = models.Evaluation()
if row.get('CV', True):
evaluation['@type'] = ['StaticCV', 'Evaluation']
@@ -663,10 +879,29 @@ def evaluations_to_JSONLD(results, flatten=False):
# We should probably discontinue this representation
for name in metric_names:
metric = models.Metric()
metric['@id'] = 'Metric' + str(i)
metric['@type'] = name.capitalize()
metric.value = row[name]
evaluation.metrics.append(metric)
i += 1
evaluations.append(evaluation)
return evaluations
class ScikitWrapper(BaseEstimator, TransformerMixin):
def __init__(self, plugin=None):
self.plugin = plugin
def fit(self, X=None, y=None):
if self.plugin is not None and not self.plugin.is_activated:
self.plugin.activate()
return self
def transform(self, X):
return self.plugin.evaluate_func(X, None)
def predict(self, X):
return self.transform(X)
def fit_transform(self, X, y):
self.fit(X, y)
return self.transform(X)

View File

@@ -1,34 +0,0 @@
import random
from senpy.plugins import EmotionPlugin
from senpy.models import EmotionSet, Emotion, Entry
class EmoRand(EmotionPlugin):
name = "emoRand"
description = 'A sample plugin that returns a random emotion annotation'
author = '@balkian'
version = '0.1'
url = "https://github.com/gsi-upm/senpy-plugins-community"
requirements = {}
onyx__usesEmotionModel = "emoml:big6"
def analyse_entry(self, entry, params):
category = "emoml:big6happiness"
number = max(-1, min(1, random.gauss(0, 0.5)))
if number > 0:
category = "emoml:big6anger"
emotionSet = EmotionSet()
emotion = Emotion({"onyx:hasEmotionCategory": category})
emotionSet.onyx__hasEmotion.append(emotion)
emotionSet.prov__wasGeneratedBy = self.id
entry.emotions.append(emotionSet)
yield entry
def test(self):
params = dict()
results = list()
for i in range(100):
res = next(self.analyse_entry(Entry(nif__isString="Hello"), params))
res.validate()
results.append(res.emotions[0]['onyx:hasEmotion'][0]['onyx:hasEmotionCategory'])

View File

@@ -1,19 +1,26 @@
from senpy.plugins import AnalysisPlugin
from senpy.plugins import Transformation
from senpy.models import Entry
from nltk.tokenize.punkt import PunktSentenceTokenizer
from nltk.tokenize.simple import LineTokenizer
import nltk
class Split(AnalysisPlugin):
'''description: A sample plugin that chunks input text'''
class Split(Transformation):
'''
A plugin that chunks input text, into paragraphs or sentences.
It does not provide any sort of annotation, and it is meant to precede
other annotation plugins, when the annotation of individual sentences
(or paragraphs) is required.
'''
author = ["@militarpancho", '@balkian']
version = '0.2'
version = '0.3'
url = "https://github.com/gsi-upm/senpy"
nltk_resources = ['punkt']
extra_params = {
'delimiter': {
'description': 'Split text into paragraphs or sentences.',
'aliases': ['type', 't'],
'required': False,
'default': 'sentence',
@@ -21,24 +28,24 @@ class Split(AnalysisPlugin):
},
}
def activate(self):
nltk.download('punkt')
def analyse_entry(self, entry, params):
def analyse_entry(self, entry, activity):
yield entry
chunker_type = params["delimiter"]
chunker_type = activity.params["delimiter"]
original_text = entry['nif:isString']
if chunker_type == "sentence":
tokenizer = PunktSentenceTokenizer()
if chunker_type == "paragraph":
tokenizer = LineTokenizer()
chars = list(tokenizer.span_tokenize(original_text))
for i, chunk in enumerate(tokenizer.tokenize(original_text)):
print(chunk)
if len(chars) == 1:
# This sentence was already split
return
for i, chunk in enumerate(chars):
start, end = chunk
e = Entry()
e['nif:isString'] = chunk
e['nif:isString'] = original_text[start:end]
if entry.id:
e.id = entry.id + "#char={},{}".format(chars[i][0], chars[i][1])
e.id = entry.id + "#char={},{}".format(start, end)
yield e
test_cases = [

View File

@@ -103,7 +103,9 @@ class CentroidConversion(EmotionConversionPlugin):
for i in emotionSet.onyx__hasEmotion:
e.onyx__hasEmotion.append(self._backwards_conversion(i))
else:
raise Error('EMOTION MODEL NOT KNOWN')
raise Error('EMOTION MODEL NOT KNOWN. '
'Cannot convert from {} to {}'.format(fromModel,
toModel))
yield e
def test(self, info=None):

View File

@@ -1,6 +1,6 @@
---
name: Ekman2FSRE
module: senpy.plugins.conversion.emotion.centroids
module: senpy.plugins.postprocessing.emotion.centroids
description: Plugin to convert emotion sets from Ekman to VAD
version: 0.2
# No need to specify onyx:doesConversion because centroids.py adds it automatically from centroids_direction

View File

@@ -1,6 +1,6 @@
---
name: Ekman2PAD
module: senpy.plugins.conversion.emotion.centroids
module: senpy.plugins.postprocessing.emotion.centroids
description: Plugin to convert emotion sets from Ekman to VAD
version: 0.2
# No need to specify onyx:doesConversion because centroids.py adds it automatically from centroids_direction
@@ -31,7 +31,7 @@ centroids_direction:
- emoml:pad
aliases: # These are aliases for any key in the centroid, to avoid repeating a long name several times
A: emoml:pad-dimensions:arousal
V: emoml:pad-dimensions:pleasure
V: emoml:pad-dimensions:valence
D: emoml:pad-dimensions:dominance
anger: emoml:big6anger
disgust: emoml:big6disgust

View File

@@ -0,0 +1,191 @@
from senpy import PostProcessing, easy_test
class MaxEmotion(PostProcessing):
'''Plugin to extract the emotion with highest value from an EmotionSet'''
author = '@dsuarezsouto'
version = '0.1'
def process_entry(self, entry, activity):
if len(entry.emotions) < 1:
yield entry
return
set_emotions = entry.emotions[0]['onyx:hasEmotion']
# If there is only one emotion, do not modify it
if len(set_emotions) < 2:
yield entry
return
max_emotion = set_emotions[0]
# Extract max emotion from the set emotions (emotion with highest intensity)
for tmp_emotion in set_emotions:
if tmp_emotion['onyx:hasEmotionIntensity'] > max_emotion[
'onyx:hasEmotionIntensity']:
max_emotion = tmp_emotion
if max_emotion['onyx:hasEmotionIntensity'] == 0:
max_emotion['onyx:hasEmotionCategory'] = "neutral"
max_emotion['onyx:hasEmotionIntensity'] = 1.0
entry.emotions[0]['onyx:hasEmotion'] = [max_emotion]
entry.emotions[0]['prov:wasGeneratedBy'] = activity.id
yield entry
def check(self, request, plugins):
return 'maxemotion' in request.parameters and self not in plugins
# Test Cases:
# 1 Normal Situation.
# 2 Case to return a Neutral Emotion.
test_cases = [
{
"name": "If there are several emotions within an emotion set, reduce it to one.",
"entry": {
"@type":
"entry",
"onyx:hasEmotionSet": [
{
"@id":
"Emotions0",
"@type":
"emotionSet",
"onyx:hasEmotion": [
{
"@id": "_:Emotion_1538121033.74",
"@type": "emotion",
"onyx:hasEmotionCategory": "anger",
"onyx:hasEmotionIntensity": 0
},
{
"@id": "_:Emotion_1538121033.74",
"@type": "emotion",
"onyx:hasEmotionCategory": "joy",
"onyx:hasEmotionIntensity": 0.3333333333333333
},
{
"@id": "_:Emotion_1538121033.74",
"@type": "emotion",
"onyx:hasEmotionCategory": "negative-fear",
"onyx:hasEmotionIntensity": 0
},
{
"@id": "_:Emotion_1538121033.74",
"@type": "emotion",
"onyx:hasEmotionCategory": "sadness",
"onyx:hasEmotionIntensity": 0
},
{
"@id": "_:Emotion_1538121033.74",
"@type": "emotion",
"onyx:hasEmotionCategory": "disgust",
"onyx:hasEmotionIntensity": 0
}
]
}
],
"nif:isString":
"Test"
},
'expected': {
"@type":
"entry",
"onyx:hasEmotionSet": [
{
"@id":
"Emotions0",
"@type":
"emotionSet",
"onyx:hasEmotion": [
{
"@id": "_:Emotion_1538121033.74",
"@type": "emotion",
"onyx:hasEmotionCategory": "joy",
"onyx:hasEmotionIntensity": 0.3333333333333333
}
]
}
],
"nif:isString":
"Test"
}
},
{
"name":
"If the maximum emotion has an intensity of 0, return a neutral emotion.",
"entry": {
"@type":
"entry",
"onyx:hasEmotionSet": [{
"@id":
"Emotions0",
"@type":
"emotionSet",
"onyx:hasEmotion": [
{
"@id": "_:Emotion_1538121033.74",
"@type": "emotion",
"onyx:hasEmotionCategory": "anger",
"onyx:hasEmotionIntensity": 0
},
{
"@id": "_:Emotion_1538121033.74",
"@type": "emotion",
"onyx:hasEmotionCategory": "joy",
"onyx:hasEmotionIntensity": 0
},
{
"@id":
"_:Emotion_1538121033.74",
"@type":
"emotion",
"onyx:hasEmotionCategory":
"negative-fear",
"onyx:hasEmotionIntensity":
0
},
{
"@id": "_:Emotion_1538121033.74",
"@type": "emotion",
"onyx:hasEmotionCategory":
"sadness",
"onyx:hasEmotionIntensity": 0
},
{
"@id": "_:Emotion_1538121033.74",
"@type": "emotion",
"onyx:hasEmotionCategory":
"disgust",
"onyx:hasEmotionIntensity": 0
}]
}],
"nif:isString":
"Test"
},
'expected': {
"@type":
"entry",
"onyx:hasEmotionSet": [{
"@id":
"Emotions0",
"@type":
"emotionSet",
"onyx:hasEmotion": [{
"@id": "_:Emotion_1538121033.74",
"@type": "emotion",
"onyx:hasEmotionCategory": "neutral",
"onyx:hasEmotionIntensity": 1
}]
}],
"nif:isString":
"Test"
}
}
]
if __name__ == '__main__':
easy_test()

View File

@@ -1,13 +1,12 @@
import requests
import json
from senpy.plugins import SentimentPlugin
from senpy.models import Sentiment
from senpy.plugins import SentimentBox
ENDPOINT = 'http://www.sentiment140.com/api/bulkClassifyJson'
class Sentiment140(SentimentPlugin):
class Sentiment140(SentimentBox):
'''Connects to the sentiment140 free API: http://sentiment140.com'''
author = "@balkian"
@@ -16,43 +15,40 @@ class Sentiment140(SentimentPlugin):
extra_params = {
'language': {
"@id": 'lang_sentiment140',
'description': 'language of the text',
'aliases': ['language', 'l'],
'required': False,
'required': True,
'default': 'auto',
'options': ['es', 'en', 'auto']
}
}
maxPolarityValue = 1
minPolarityValue = 0
classes = ['marl:Positive', 'marl:Neutral', 'marl:Negative']
binary = True
def predict_many(self, features, activity):
lang = activity.params["language"]
data = []
for feature in features:
data.append({'text': feature[0]})
def analyse_entry(self, entry, params):
lang = params["language"]
res = requests.post(ENDPOINT,
json.dumps({
"language": lang,
"data": [{
"text": entry['nif:isString']
}]
"data": data
}))
p = params.get("prefix", None)
polarity_value = self.maxPolarityValue * int(
res.json()["data"][0]["polarity"]) * 0.25
polarity = "marl:Neutral"
neutral_value = self.maxPolarityValue / 2.0
if polarity_value > neutral_value:
polarity = "marl:Positive"
elif polarity_value < neutral_value:
polarity = "marl:Negative"
sentiment = Sentiment(
prefix=p,
marl__hasPolarity=polarity,
marl__polarityValue=polarity_value)
sentiment.prov__wasGeneratedBy = self.id
entry.sentiments.append(sentiment)
entry.language = lang
yield entry
for res in res.json()["data"]:
polarity = int(res['polarity'])
neutral_value = 2
if polarity > neutral_value:
yield [1, 0, 0]
continue
elif polarity < neutral_value:
yield [0, 0, 1]
continue
yield [0, 1, 0]
test_cases = [
{
@@ -62,7 +58,7 @@ class Sentiment140(SentimentPlugin):
'params': {},
'expected': {
"nif:isString": "I love Titanic",
'sentiments': [
'marl:hasOpinion': [
{
'marl:hasPolarity': 'marl:Positive',
}

View File

@@ -11,14 +11,12 @@
"$ref": "context.json"
},
"@type": {
"default": "AggregatedEvaluation"
},
"@id": {
"description": "ID of the aggregated evaluation",
"type": "string"
},
"evaluations": {
"default": [],
"type": "array",
"items": {
"anyOf": [

View File

@@ -9,7 +9,19 @@
"@type": {
"type": "string",
"description": "Type of the analysis. e.g. marl:SentimentAnalysis"
},
"prov:wasAssociatedWith": {
"@type": "string",
"description": "Algorithm/plugin that was used"
},
"prov:used": {
"description": "Parameters of the algorithm",
"@type": "array",
"type": "array",
"items": {
"$ref": "parameter.json"
}
}
},
"required": ["@id", "@type"]
"required": ["@type", "prov:wasAssociatedWith"]
}

View File

@@ -1,8 +1,8 @@
{
"@context": {
"@vocab": "http://www.gsi.dit.upm.es/ontologies/senpy#",
"@vocab": "http://www.gsi.upm.es/onto/senpy/ns#",
"dc": "http://dublincore.org/2012/06/14/dcelements#",
"me": "http://www.mixedemotions-project.eu/ns/model#",
"senpy": "http://www.gsi.upm.es/onto/senpy/ns#",
"prov": "http://www.w3.org/ns/prov#",
"nif": "http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core#",
"marl": "http://www.gsi.dit.upm.es/ontologies/marl/ns#",
@@ -16,10 +16,10 @@
"@container": "@set"
},
"entities": {
"@id": "me:hasEntities"
"@id": "senpy:hasEntities"
},
"suggestions": {
"@id": "me:hasSuggestions",
"@id": "senpy:hasSuggestions",
"@container": "@set"
},
"onyx:hasEmotion": {
@@ -40,8 +40,8 @@
"@id": "prov:used",
"@container": "@set"
},
"analysis": {
"@id": "AnalysisInvolved",
"activities": {
"@id": "prov:wasInformedBy",
"@type": "@id",
"@container": "@set"
},
@@ -65,6 +65,13 @@
},
"onyx:conversionTo": {
"@type": "@id"
}
},
"parameters": {
"@type": "Parameter"
},
"errors": {
"@type": "ParameterError"
},
"prefix": "http://senpy.invalid/"
}
}

View File

@@ -7,7 +7,6 @@
"properties": {
"datasets": {
"type": "array",
"default": [],
"items": {
"$ref": "dataset.json"
}

View File

@@ -19,8 +19,7 @@
"type": "array",
"items": {
"$ref": "emotion.json"
},
"default": []
}
}
},
"required": ["@id", "onyx:hasEmotion"]

View File

@@ -12,13 +12,12 @@
"type": "array",
"items": {
"$ref": "emotion.json"
},
"default": []
}
},
"prov:wasGeneratedBy": {
"type": "string",
"description": "The ID of the analysis that generated this Emotion. The full object should be included in the \"analysis\" property of the root object"
}
},
"required": ["@id", "prov:wasGeneratedBy", "onyx:hasEmotion"]
"required": ["prov:wasGeneratedBy", "onyx:hasEmotion"]
}

View File

@@ -9,31 +9,14 @@
"description": "String contained in this Context. Alternative: nif:isString",
"type": "string"
},
"sentiments": {
"marl:hasOpinion": {
"type": "array",
"items": {"$ref": "sentiment.json" },
"default": []
"items": {"$ref": "sentiment.json" }
},
"emotions": {
"onyx:hasEmotionSet": {
"type": "array",
"items": {"$ref": "emotionSet.json" },
"default": []
},
"entities": {
"type": "array",
"items": {"$ref": "entity.json" },
"default": []
},
"topics": {
"type": "array",
"items": {"$ref": "topic.json" },
"default": []
},
"suggestions": {
"type": "array",
"items": {"$ref": "suggestion.json" },
"default": []
"items": {"$ref": "emotionSet.json" }
}
},
"required": ["@id", "nif:isString"]
"required": ["nif:isString"]
}

View File

@@ -6,8 +6,7 @@
"type": "string"
},
"@type": {
"type": "array",
"default": "Evaluation"
"type": "array"
},
"metrics": {

View File

@@ -0,0 +1,16 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Parameters for a senpy analysis",
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Name of the parameter"
},
"prov:value": {
"@type": "any",
"description": "Value of the parameter"
}
},
"required": ["name", "prov:value"]
}

View File

@@ -24,8 +24,7 @@
"description": "Sub-type of plugin. e.g. sentimentPlugin"
},
"extra_params": {
"type": "object",
"default": {}
"type": "object"
}
}
}

View File

@@ -7,9 +7,15 @@
"properties": {
"plugins": {
"type": "array",
"default": [],
"items": {
"$ref": "plugin.json"
"anyOf": [
{
"type": "string"
},
{
"$ref": "plugin.json"
}
]
}
}
}

View File

@@ -1,5 +1,6 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"name": "Entry",
"allOf": [
{"$ref": "response.json"},
{
@@ -10,36 +11,25 @@
"@context": {
"$ref": "context.json"
},
"@type": {
"default": "results"
},
"@id": {
"description": "ID of the analysis",
"type": "string"
},
"analysis": {
"default": [],
"activities": {
"type": "array",
"items": {
"anyOf": [
{
"$ref": "analysis.json"
},{
"type": "string"
}
]
"$ref": "analysis.json"
}
},
"entries": {
"type": "array",
"default": [],
"items": {
"$ref": "entry.json"
}
}
},
"required": ["@id", "analysis", "entries"]
"required": ["@id", "activities", "entries"]
}
]
}

View File

@@ -19,5 +19,5 @@
"description": "The ID of the analysis that generated this Sentiment. The full object should be included in the \"analysis\" property of the root object"
}
},
"required": ["@id", "prov:wasGeneratedBy"]
"required": ["prov:wasGeneratedBy"]
}

View File

@@ -1,347 +0,0 @@
/*!
* Bootstrap v3.1.1 (http://getbootstrap.com)
* Copyright 2011-2014 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
*/
.btn-default,
.btn-primary,
.btn-success,
.btn-info,
.btn-warning,
.btn-danger {
text-shadow: 0 -1px 0 rgba(0, 0, 0, .2);
-webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 1px rgba(0, 0, 0, .075);
box-shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 1px rgba(0, 0, 0, .075);
}
.btn-default:active,
.btn-primary:active,
.btn-success:active,
.btn-info:active,
.btn-warning:active,
.btn-danger:active,
.btn-default.active,
.btn-primary.active,
.btn-success.active,
.btn-info.active,
.btn-warning.active,
.btn-danger.active {
-webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);
box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);
}
.btn:active,
.btn.active {
background-image: none;
}
.btn-default {
text-shadow: 0 1px 0 #fff;
background-image: -webkit-linear-gradient(top, #fff 0%, #e0e0e0 100%);
background-image: linear-gradient(to bottom, #fff 0%, #e0e0e0 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
background-repeat: repeat-x;
border-color: #dbdbdb;
border-color: #ccc;
}
.btn-default:hover,
.btn-default:focus {
background-color: #e0e0e0;
background-position: 0 -15px;
}
.btn-default:active,
.btn-default.active {
background-color: #e0e0e0;
border-color: #dbdbdb;
}
.btn-primary {
background-image: -webkit-linear-gradient(top, #428bca 0%, #2d6ca2 100%);
background-image: linear-gradient(to bottom, #428bca 0%, #2d6ca2 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff2d6ca2', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
background-repeat: repeat-x;
border-color: #2b669a;
}
.btn-primary:hover,
.btn-primary:focus {
background-color: #2d6ca2;
background-position: 0 -15px;
}
.btn-primary:active,
.btn-primary.active {
background-color: #2d6ca2;
border-color: #2b669a;
}
.btn-success {
background-image: -webkit-linear-gradient(top, #5cb85c 0%, #419641 100%);
background-image: linear-gradient(to bottom, #5cb85c 0%, #419641 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
background-repeat: repeat-x;
border-color: #3e8f3e;
}
.btn-success:hover,
.btn-success:focus {
background-color: #419641;
background-position: 0 -15px;
}
.btn-success:active,
.btn-success.active {
background-color: #419641;
border-color: #3e8f3e;
}
.btn-info {
background-image: -webkit-linear-gradient(top, #5bc0de 0%, #2aabd2 100%);
background-image: linear-gradient(to bottom, #5bc0de 0%, #2aabd2 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
background-repeat: repeat-x;
border-color: #28a4c9;
}
.btn-info:hover,
.btn-info:focus {
background-color: #2aabd2;
background-position: 0 -15px;
}
.btn-info:active,
.btn-info.active {
background-color: #2aabd2;
border-color: #28a4c9;
}
.btn-warning {
background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #eb9316 100%);
background-image: linear-gradient(to bottom, #f0ad4e 0%, #eb9316 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
background-repeat: repeat-x;
border-color: #e38d13;
}
.btn-warning:hover,
.btn-warning:focus {
background-color: #eb9316;
background-position: 0 -15px;
}
.btn-warning:active,
.btn-warning.active {
background-color: #eb9316;
border-color: #e38d13;
}
.btn-danger {
background-image: -webkit-linear-gradient(top, #d9534f 0%, #c12e2a 100%);
background-image: linear-gradient(to bottom, #d9534f 0%, #c12e2a 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
background-repeat: repeat-x;
border-color: #b92c28;
}
.btn-danger:hover,
.btn-danger:focus {
background-color: #c12e2a;
background-position: 0 -15px;
}
.btn-danger:active,
.btn-danger.active {
background-color: #c12e2a;
border-color: #b92c28;
}
.thumbnail,
.img-thumbnail {
-webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, .075);
box-shadow: 0 1px 2px rgba(0, 0, 0, .075);
}
.dropdown-menu > li > a:hover,
.dropdown-menu > li > a:focus {
background-color: #e8e8e8;
background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);
background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);
background-repeat: repeat-x;
}
.dropdown-menu > .active > a,
.dropdown-menu > .active > a:hover,
.dropdown-menu > .active > a:focus {
background-color: #357ebd;
background-image: -webkit-linear-gradient(top, #428bca 0%, #357ebd 100%);
background-image: linear-gradient(to bottom, #428bca 0%, #357ebd 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0);
background-repeat: repeat-x;
}
.navbar-default {
background-image: -webkit-linear-gradient(top, #fff 0%, #f8f8f8 100%);
background-image: linear-gradient(to bottom, #fff 0%, #f8f8f8 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
background-repeat: repeat-x;
border-radius: 4px;
-webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 5px rgba(0, 0, 0, .075);
box-shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 5px rgba(0, 0, 0, .075);
}
.navbar-default .navbar-nav > .active > a {
background-image: -webkit-linear-gradient(top, #ebebeb 0%, #f3f3f3 100%);
background-image: linear-gradient(to bottom, #ebebeb 0%, #f3f3f3 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff3f3f3', GradientType=0);
background-repeat: repeat-x;
-webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, .075);
box-shadow: inset 0 3px 9px rgba(0, 0, 0, .075);
}
.navbar-brand,
.navbar-nav > li > a {
text-shadow: 0 1px 0 rgba(255, 255, 255, .25);
}
.navbar-inverse {
background-image: -webkit-linear-gradient(top, #3c3c3c 0%, #222 100%);
background-image: linear-gradient(to bottom, #3c3c3c 0%, #222 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);
filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
background-repeat: repeat-x;
}
.navbar-inverse .navbar-nav > .active > a {
background-image: -webkit-linear-gradient(top, #222 0%, #282828 100%);
background-image: linear-gradient(to bottom, #222 0%, #282828 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff222222', endColorstr='#ff282828', GradientType=0);
background-repeat: repeat-x;
-webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, .25);
box-shadow: inset 0 3px 9px rgba(0, 0, 0, .25);
}
.navbar-inverse .navbar-brand,
.navbar-inverse .navbar-nav > li > a {
text-shadow: 0 -1px 0 rgba(0, 0, 0, .25);
}
.navbar-static-top,
.navbar-fixed-top,
.navbar-fixed-bottom {
border-radius: 0;
}
.alert {
text-shadow: 0 1px 0 rgba(255, 255, 255, .2);
-webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .25), 0 1px 2px rgba(0, 0, 0, .05);
box-shadow: inset 0 1px 0 rgba(255, 255, 255, .25), 0 1px 2px rgba(0, 0, 0, .05);
}
.alert-success {
background-image: -webkit-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%);
background-image: linear-gradient(to bottom, #dff0d8 0%, #c8e5bc 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);
background-repeat: repeat-x;
border-color: #b2dba1;
}
.alert-info {
background-image: -webkit-linear-gradient(top, #d9edf7 0%, #b9def0 100%);
background-image: linear-gradient(to bottom, #d9edf7 0%, #b9def0 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);
background-repeat: repeat-x;
border-color: #9acfea;
}
.alert-warning {
background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%);
background-image: linear-gradient(to bottom, #fcf8e3 0%, #f8efc0 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);
background-repeat: repeat-x;
border-color: #f5e79e;
}
.alert-danger {
background-image: -webkit-linear-gradient(top, #f2dede 0%, #e7c3c3 100%);
background-image: linear-gradient(to bottom, #f2dede 0%, #e7c3c3 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);
background-repeat: repeat-x;
border-color: #dca7a7;
}
.progress {
background-image: -webkit-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%);
background-image: linear-gradient(to bottom, #ebebeb 0%, #f5f5f5 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);
background-repeat: repeat-x;
}
.progress-bar {
background-image: -webkit-linear-gradient(top, #428bca 0%, #3071a9 100%);
background-image: linear-gradient(to bottom, #428bca 0%, #3071a9 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3071a9', GradientType=0);
background-repeat: repeat-x;
}
.progress-bar-success {
background-image: -webkit-linear-gradient(top, #5cb85c 0%, #449d44 100%);
background-image: linear-gradient(to bottom, #5cb85c 0%, #449d44 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);
background-repeat: repeat-x;
}
.progress-bar-info {
background-image: -webkit-linear-gradient(top, #5bc0de 0%, #31b0d5 100%);
background-image: linear-gradient(to bottom, #5bc0de 0%, #31b0d5 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);
background-repeat: repeat-x;
}
.progress-bar-warning {
background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #ec971f 100%);
background-image: linear-gradient(to bottom, #f0ad4e 0%, #ec971f 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);
background-repeat: repeat-x;
}
.progress-bar-danger {
background-image: -webkit-linear-gradient(top, #d9534f 0%, #c9302c 100%);
background-image: linear-gradient(to bottom, #d9534f 0%, #c9302c 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);
background-repeat: repeat-x;
}
.list-group {
border-radius: 4px;
-webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, .075);
box-shadow: 0 1px 2px rgba(0, 0, 0, .075);
}
.list-group-item.active,
.list-group-item.active:hover,
.list-group-item.active:focus {
text-shadow: 0 -1px 0 #3071a9;
background-image: -webkit-linear-gradient(top, #428bca 0%, #3278b3 100%);
background-image: linear-gradient(to bottom, #428bca 0%, #3278b3 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3278b3', GradientType=0);
background-repeat: repeat-x;
border-color: #3278b3;
}
.panel {
-webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, .05);
box-shadow: 0 1px 2px rgba(0, 0, 0, .05);
}
.panel-default > .panel-heading {
background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);
background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);
background-repeat: repeat-x;
}
.panel-primary > .panel-heading {
background-image: -webkit-linear-gradient(top, #428bca 0%, #357ebd 100%);
background-image: linear-gradient(to bottom, #428bca 0%, #357ebd 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0);
background-repeat: repeat-x;
}
.panel-success > .panel-heading {
background-image: -webkit-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%);
background-image: linear-gradient(to bottom, #dff0d8 0%, #d0e9c6 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);
background-repeat: repeat-x;
}
.panel-info > .panel-heading {
background-image: -webkit-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%);
background-image: linear-gradient(to bottom, #d9edf7 0%, #c4e3f3 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);
background-repeat: repeat-x;
}
.panel-warning > .panel-heading {
background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%);
background-image: linear-gradient(to bottom, #fcf8e3 0%, #faf2cc 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);
background-repeat: repeat-x;
}
.panel-danger > .panel-heading {
background-image: -webkit-linear-gradient(top, #f2dede 0%, #ebcccc 100%);
background-image: linear-gradient(to bottom, #f2dede 0%, #ebcccc 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);
background-repeat: repeat-x;
}
.well {
background-image: -webkit-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%);
background-image: linear-gradient(to bottom, #e8e8e8 0%, #f5f5f5 100%);
filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);
background-repeat: repeat-x;
border-color: #dcdcdc;
-webkit-box-shadow: inset 0 1px 3px rgba(0, 0, 0, .05), 0 1px 0 rgba(255, 255, 255, .1);
box-shadow: inset 0 1px 3px rgba(0, 0, 0, .05), 0 1px 0 rgba(255, 255, 255, .1);
}
/*# sourceMappingURL=bootstrap-theme.css.map */

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More