1
0
mirror of https://github.com/gsi-upm/senpy synced 2024-12-22 04:58:12 +00:00

Release 0.20

This commit is contained in:
J. Fernando Sánchez 2019-04-04 12:36:35 +02:00
parent 8a516d927e
commit 9758a2977f
3 changed files with 24 additions and 7 deletions

View File

@ -6,6 +6,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
## 0.20
### Added
* Objects can control the keys that will be used in `serialize`/`jsonld`/`as_dict` by specifying a list of keys in `terse_keys`.
e.g.
@ -27,6 +29,7 @@ e.g.
* Plugin and parameter descriptions are now formatted with (showdown)[https://github.com/showdownjs/showdown].
* The web UI requests extra_parameters from the server. This is useful for pipelines. See #52
* First batch of semantic tests (using SPARQL)
* `Plugin.path()` method to get a file path from a relative path (using the senpy data folder)
### Changed
* `install_deps` now checks what requirements are already met before installing with pip.

View File

@ -251,11 +251,15 @@ class Plugin(with_metaclass(PluginMeta, models.Plugin)):
return alternative
raise IOError('File does not exist: {}'.format(fname))
def path(self, fpath):
if not os.path.isabs(fpath):
fpath = os.path.join(self.data_folder, fpath)
return fpath
def open(self, fpath, mode='r'):
if 'w' in mode:
# When writing, only use absolute paths or data_folder
if not os.path.isabs(fpath):
fpath = os.path.join(self.data_folder, fpath)
fpath = self.path(fpath)
else:
fpath = self.find_file(fpath)
@ -381,7 +385,9 @@ class SentimentPlugin(Analyser, Evaluable, models.SentimentPlugin):
activity = self.activity(parameters)
entries = []
for feat in X:
entries.append(models.Entry(nif__isString=feat[0]))
if isinstance(feat, list):
feat = ' '.join(feat)
entries.append(models.Entry(nif__isString=feat))
labels = []
for e in self.process_entries(entries, activity):
sent = e.sentiments[0].polarity

View File

@ -320,24 +320,32 @@ class PluginsTest(TestCase):
for i in range(50):
testdata.append(["good", 1])
for i in range(50):
testdata.append(["bad", 0])
testdata.append(["bad", -1])
dataset = pd.DataFrame(testdata, columns=['text', 'polarity'])
class DummyPlugin(plugins.SentimentBox):
description = 'Plugin to test evaluation'
version = 0
classes = ['marl:Positive', 'marl:Negative']
def predict_one(self, features, **kwargs):
return 0
print(features[0])
return [0, 1]
class SmartPlugin(plugins.SentimentBox):
description = 'Plugin to test evaluation'
version = 0
classes = ['marl:Positive', 'marl:Negative']
def predict_one(self, features, **kwargs):
print(features[0])
if features[0] == 'good':
return 1
return 0
print('positive')
return [1, 0]
print('negative')
return [0, 1]
dpipe = DummyPlugin()
results = plugins.evaluate(datasets={'testdata': dataset}, plugins=[dpipe], flatten=True)