Compare commits
10 Commits
0.20.2.pos
...
0.20.8
Author | SHA1 | Date | |
---|---|---|---|
|
bf481f0f88 | ||
|
a40aa55b6a | ||
|
50cba751a6 | ||
|
dfb6d13649 | ||
|
5559d37e57 | ||
|
2116fe6f38 | ||
|
affeeb9643 | ||
|
42ddc02318 | ||
|
cab9a3440b | ||
|
db505da49c |
@@ -1,5 +1,7 @@
|
||||
**/soil_output
|
||||
.*
|
||||
**/.*
|
||||
**/__pycache__
|
||||
__pycache__
|
||||
*.pyc
|
||||
**/backup
|
||||
|
3
.gitignore
vendored
@@ -8,4 +8,5 @@ soil_output
|
||||
docs/_build*
|
||||
build/*
|
||||
dist/*
|
||||
prof
|
||||
prof
|
||||
backup
|
@@ -37,7 +37,7 @@ push_pypi:
|
||||
- echo $CI_COMMIT_TAG > soil/VERSION
|
||||
- pip install twine
|
||||
- python setup.py sdist bdist_wheel
|
||||
- TWINE_PASSWORD=${PYPI_PASSWORD} TWINE_USERNAME={PYPI_USERNAME} python -m twine upload --ignore-existing dist/*
|
||||
- TWINE_PASSWORD=$PYPI_PASSWORD TWINE_USERNAME=$PYPI_USERNAME python -m twine upload dist/*
|
||||
|
||||
check_pypi:
|
||||
only:
|
||||
@@ -48,3 +48,6 @@ check_pypi:
|
||||
stage: check_published
|
||||
script:
|
||||
- pip install soil==$CI_COMMIT_TAG
|
||||
# Allow PYPI to update its index before we try to install
|
||||
when: delayed
|
||||
start_in: 2 minutes
|
||||
|
41
CHANGELOG.md
@@ -3,6 +3,47 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [UNRELEASED]
|
||||
|
||||
## [0.20.8]
|
||||
### Changed
|
||||
* Tsih bumped to version 0.1.8
|
||||
### Fixed
|
||||
* Mentions to `id` in docs. It should be `state_id` now.
|
||||
* Fixed bug: environment agents were not being added to the simulation
|
||||
|
||||
## [0.20.7]
|
||||
### Changed
|
||||
* Creating a `time.When` from another `time.When` does not nest them anymore (it returns the argument)
|
||||
### Fixed
|
||||
* Bug with time.NEVER/time.INFINITY
|
||||
## [0.20.6]
|
||||
### Fixed
|
||||
* Agents now return `time.INFINITY` when dead, instead of 'inf'
|
||||
* `soil.__init__` does not re-export built-in time (change in `soil.simulation`. It used to create subtle import conflicts when importing soil.time.
|
||||
* Parallel simulations were broken because lambdas cannot be pickled properly, which is needed for multiprocessing.
|
||||
### Changed
|
||||
* Some internal simulation methods do not accept `*args` anymore, to avoid ambiguity and bugs.
|
||||
## [0.20.5]
|
||||
### Changed
|
||||
* Defaults are now set in the agent __init__, not in the environment. This decouples both classes a bit more, and it is more intuitive
|
||||
## [0.20.4]
|
||||
### Added
|
||||
* Agents can now be given any kwargs, which will be used to set their state
|
||||
* Environments have a default logger `self.logger` and a log method, just like agents
|
||||
## [0.20.3]
|
||||
### Fixed
|
||||
* Default state values are now deepcopied again.
|
||||
* Seeds for environments only concatenate the trial id (i.e., a number), to provide repeatable results.
|
||||
* `Environment.run` now calls `Environment.step`, to allow for easy overloading of the environment step
|
||||
### Removed
|
||||
* Datacollectors are not being used for now.
|
||||
* `time.TimedActivation.step` does not use an `until` parameter anymore.
|
||||
### Changed
|
||||
* Simulations now run right up to `until` (open interval)
|
||||
* Time instants (`time.When`) don't need to be floats anymore. Now we can avoid precision issues with big numbers by using ints.
|
||||
* Rabbits simulation is more idiomatic (using subclasses)
|
||||
|
||||
## [0.20.2]
|
||||
### Fixed
|
||||
* CI/CD testing issues
|
||||
|
Before Width: | Height: | Size: 7.0 KiB After Width: | Height: | Size: 8.3 KiB |
Before Width: | Height: | Size: 14 KiB |
Before Width: | Height: | Size: 14 KiB |
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 29 KiB |
Before Width: | Height: | Size: 14 KiB |
Before Width: | Height: | Size: 16 KiB |
Before Width: | Height: | Size: 16 KiB |
Before Width: | Height: | Size: 15 KiB |
Before Width: | Height: | Size: 15 KiB |
Before Width: | Height: | Size: 16 KiB |
Before Width: | Height: | Size: 16 KiB |
Before Width: | Height: | Size: 16 KiB |
Before Width: | Height: | Size: 16 KiB |
Before Width: | Height: | Size: 13 KiB After Width: | Height: | Size: 33 KiB |
Before Width: | Height: | Size: 13 KiB |
Before Width: | Height: | Size: 13 KiB |
Before Width: | Height: | Size: 13 KiB |
Before Width: | Height: | Size: 13 KiB |
Before Width: | Height: | Size: 13 KiB |
Before Width: | Height: | Size: 13 KiB |
Before Width: | Height: | Size: 13 KiB |
Before Width: | Height: | Size: 13 KiB |
Before Width: | Height: | Size: 13 KiB |
BIN
docs/output_58_0.png
Normal file
After Width: | Height: | Size: 33 KiB |
BIN
docs/output_58_1.png
Normal file
After Width: | Height: | Size: 32 KiB |
BIN
docs/output_58_2.png
Normal file
After Width: | Height: | Size: 29 KiB |
BIN
docs/output_58_3.png
Normal file
After Width: | Height: | Size: 33 KiB |
BIN
docs/output_58_4.png
Normal file
After Width: | Height: | Size: 35 KiB |
BIN
docs/output_60_0.png
Normal file
After Width: | Height: | Size: 28 KiB |
BIN
docs/output_60_1.png
Normal file
After Width: | Height: | Size: 37 KiB |
BIN
docs/output_60_2.png
Normal file
After Width: | Height: | Size: 34 KiB |
BIN
docs/output_60_3.png
Normal file
After Width: | Height: | Size: 33 KiB |
BIN
docs/output_60_4.png
Normal file
After Width: | Height: | Size: 37 KiB |
Before Width: | Height: | Size: 15 KiB |
BIN
docs/output_62_0.png
Normal file
After Width: | Height: | Size: 28 KiB |
BIN
docs/output_62_1.png
Normal file
After Width: | Height: | Size: 28 KiB |
BIN
docs/output_62_2.png
Normal file
After Width: | Height: | Size: 28 KiB |
BIN
docs/output_62_3.png
Normal file
After Width: | Height: | Size: 28 KiB |
BIN
docs/output_62_4.png
Normal file
After Width: | Height: | Size: 28 KiB |
Before Width: | Height: | Size: 14 KiB |
Before Width: | Height: | Size: 14 KiB |
Before Width: | Height: | Size: 5.3 KiB |
BIN
docs/output_68_1.png
Normal file
After Width: | Height: | Size: 952 KiB |
BIN
docs/output_70_1.png
Normal file
After Width: | Height: | Size: 29 KiB |
Before Width: | Height: | Size: 17 KiB After Width: | Height: | Size: 23 KiB |
Before Width: | Height: | Size: 17 KiB |
Before Width: | Height: | Size: 16 KiB |
Before Width: | Height: | Size: 11 KiB |
Before Width: | Height: | Size: 19 KiB |
BIN
docs/output_77_0.png
Normal file
After Width: | Height: | Size: 45 KiB |
BIN
docs/output_81_0.png
Normal file
After Width: | Height: | Size: 42 KiB |
BIN
docs/output_82_1.png
Normal file
After Width: | Height: | Size: 25 KiB |
BIN
docs/output_83_0.png
Normal file
After Width: | Height: | Size: 53 KiB |
@@ -1 +1 @@
|
||||
ipython==7.23
|
||||
ipython==7.31.1
|
||||
|
80808
examples/Untitled.ipynb
@@ -2,13 +2,12 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2017-11-08T16:22:30.732107Z",
|
||||
"start_time": "2017-11-08T17:22:30.059855+01:00"
|
||||
},
|
||||
"collapsed": true
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -28,24 +27,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2017-11-08T16:22:35.580593Z",
|
||||
"start_time": "2017-11-08T17:22:35.542745+01:00"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Populating the interactive namespace from numpy and matplotlib\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pylab inline\n",
|
||||
"%matplotlib inline\n",
|
||||
"\n",
|
||||
"from soil import *"
|
||||
]
|
||||
@@ -66,7 +57,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2017-11-08T16:22:37.242327Z",
|
||||
@@ -86,7 +77,7 @@
|
||||
" prob_neighbor_spread: 0.0\r\n",
|
||||
" prob_tv_spread: 0.01\r\n",
|
||||
"interval: 1\r\n",
|
||||
"max_time: 30\r\n",
|
||||
"max_time: 300\r\n",
|
||||
"name: Sim_all_dumb\r\n",
|
||||
"network_agents:\r\n",
|
||||
"- agent_type: DumbViewer\r\n",
|
||||
@@ -110,7 +101,7 @@
|
||||
" prob_neighbor_spread: 0.0\r\n",
|
||||
" prob_tv_spread: 0.01\r\n",
|
||||
"interval: 1\r\n",
|
||||
"max_time: 30\r\n",
|
||||
"max_time: 300\r\n",
|
||||
"name: Sim_half_herd\r\n",
|
||||
"network_agents:\r\n",
|
||||
"- agent_type: DumbViewer\r\n",
|
||||
@@ -142,18 +133,18 @@
|
||||
" prob_neighbor_spread: 0.0\r\n",
|
||||
" prob_tv_spread: 0.01\r\n",
|
||||
"interval: 1\r\n",
|
||||
"max_time: 30\r\n",
|
||||
"max_time: 300\r\n",
|
||||
"name: Sim_all_herd\r\n",
|
||||
"network_agents:\r\n",
|
||||
"- agent_type: HerdViewer\r\n",
|
||||
" state:\r\n",
|
||||
" has_tv: true\r\n",
|
||||
" id: neutral\r\n",
|
||||
" state_id: neutral\r\n",
|
||||
" weight: 1\r\n",
|
||||
"- agent_type: HerdViewer\r\n",
|
||||
" state:\r\n",
|
||||
" has_tv: true\r\n",
|
||||
" id: neutral\r\n",
|
||||
" state_id: neutral\r\n",
|
||||
" weight: 1\r\n",
|
||||
"network_params:\r\n",
|
||||
" generator: barabasi_albert_graph\r\n",
|
||||
@@ -169,13 +160,13 @@
|
||||
" prob_tv_spread: 0.01\r\n",
|
||||
" prob_neighbor_cure: 0.1\r\n",
|
||||
"interval: 1\r\n",
|
||||
"max_time: 30\r\n",
|
||||
"max_time: 300\r\n",
|
||||
"name: Sim_wise_herd\r\n",
|
||||
"network_agents:\r\n",
|
||||
"- agent_type: HerdViewer\r\n",
|
||||
" state:\r\n",
|
||||
" has_tv: true\r\n",
|
||||
" id: neutral\r\n",
|
||||
" state_id: neutral\r\n",
|
||||
" weight: 1\r\n",
|
||||
"- agent_type: WiseViewer\r\n",
|
||||
" state:\r\n",
|
||||
@@ -195,13 +186,13 @@
|
||||
" prob_tv_spread: 0.01\r\n",
|
||||
" prob_neighbor_cure: 0.1\r\n",
|
||||
"interval: 1\r\n",
|
||||
"max_time: 30\r\n",
|
||||
"max_time: 300\r\n",
|
||||
"name: Sim_all_wise\r\n",
|
||||
"network_agents:\r\n",
|
||||
"- agent_type: WiseViewer\r\n",
|
||||
" state:\r\n",
|
||||
" has_tv: true\r\n",
|
||||
" id: neutral\r\n",
|
||||
" state_id: neutral\r\n",
|
||||
" weight: 1\r\n",
|
||||
"- agent_type: WiseViewer\r\n",
|
||||
" state:\r\n",
|
||||
@@ -225,7 +216,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2017-11-08T18:07:46.781745Z",
|
||||
@@ -233,7 +224,24 @@
|
||||
},
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "ValueError",
|
||||
"evalue": "No objects to concatenate",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
|
||||
"Cell \u001b[0;32mIn[4], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m evodumb \u001b[38;5;241m=\u001b[39m \u001b[43manalysis\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mread_data\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43msoil_output/Sim_all_dumb/\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mprocess\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43manalysis\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_count\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mgroup\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mkeys\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mid\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m;\n",
|
||||
"File \u001b[0;32m/mnt/data/home/j/git/lab.gsi/soil/soil/soil/analysis.py:14\u001b[0m, in \u001b[0;36mread_data\u001b[0;34m(group, *args, **kwargs)\u001b[0m\n\u001b[1;32m 12\u001b[0m iterable \u001b[38;5;241m=\u001b[39m _read_data(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m 13\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m group:\n\u001b[0;32m---> 14\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mgroup_trials\u001b[49m\u001b[43m(\u001b[49m\u001b[43miterable\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 15\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 16\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mlist\u001b[39m(iterable)\n",
|
||||
"File \u001b[0;32m/mnt/data/home/j/git/lab.gsi/soil/soil/soil/analysis.py:201\u001b[0m, in \u001b[0;36mgroup_trials\u001b[0;34m(trials, aggfunc)\u001b[0m\n\u001b[1;32m 199\u001b[0m trials \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mlist\u001b[39m(trials)\n\u001b[1;32m 200\u001b[0m trials \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mlist\u001b[39m(\u001b[38;5;28mmap\u001b[39m(\u001b[38;5;28;01mlambda\u001b[39;00m x: x[\u001b[38;5;241m1\u001b[39m] \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(x, \u001b[38;5;28mtuple\u001b[39m) \u001b[38;5;28;01melse\u001b[39;00m x, trials))\n\u001b[0;32m--> 201\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mpd\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mconcat\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtrials\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241m.\u001b[39mgroupby(level\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0\u001b[39m)\u001b[38;5;241m.\u001b[39magg(aggfunc)\u001b[38;5;241m.\u001b[39mreorder_levels([\u001b[38;5;241m2\u001b[39m, \u001b[38;5;241m0\u001b[39m,\u001b[38;5;241m1\u001b[39m] ,axis\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1\u001b[39m)\n",
|
||||
"File \u001b[0;32m/mnt/data/home/j/git/lab.gsi/soil/soil/.env-v0.20/lib/python3.8/site-packages/pandas/util/_decorators.py:331\u001b[0m, in \u001b[0;36mdeprecate_nonkeyword_arguments.<locals>.decorate.<locals>.wrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 325\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(args) \u001b[38;5;241m>\u001b[39m num_allow_args:\n\u001b[1;32m 326\u001b[0m warnings\u001b[38;5;241m.\u001b[39mwarn(\n\u001b[1;32m 327\u001b[0m msg\u001b[38;5;241m.\u001b[39mformat(arguments\u001b[38;5;241m=\u001b[39m_format_argument_list(allow_args)),\n\u001b[1;32m 328\u001b[0m \u001b[38;5;167;01mFutureWarning\u001b[39;00m,\n\u001b[1;32m 329\u001b[0m stacklevel\u001b[38;5;241m=\u001b[39mfind_stack_level(),\n\u001b[1;32m 330\u001b[0m )\n\u001b[0;32m--> 331\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
|
||||
"File \u001b[0;32m/mnt/data/home/j/git/lab.gsi/soil/soil/.env-v0.20/lib/python3.8/site-packages/pandas/core/reshape/concat.py:368\u001b[0m, in \u001b[0;36mconcat\u001b[0;34m(objs, axis, join, ignore_index, keys, levels, names, verify_integrity, sort, copy)\u001b[0m\n\u001b[1;32m 146\u001b[0m \u001b[38;5;129m@deprecate_nonkeyword_arguments\u001b[39m(version\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, allowed_args\u001b[38;5;241m=\u001b[39m[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mobjs\u001b[39m\u001b[38;5;124m\"\u001b[39m])\n\u001b[1;32m 147\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mconcat\u001b[39m(\n\u001b[1;32m 148\u001b[0m objs: Iterable[NDFrame] \u001b[38;5;241m|\u001b[39m Mapping[HashableT, NDFrame],\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 157\u001b[0m copy: \u001b[38;5;28mbool\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m,\n\u001b[1;32m 158\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m DataFrame \u001b[38;5;241m|\u001b[39m Series:\n\u001b[1;32m 159\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 160\u001b[0m \u001b[38;5;124;03m Concatenate pandas objects along a particular axis.\u001b[39;00m\n\u001b[1;32m 161\u001b[0m \n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 366\u001b[0m \u001b[38;5;124;03m 1 3 4\u001b[39;00m\n\u001b[1;32m 367\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m--> 368\u001b[0m op \u001b[38;5;241m=\u001b[39m \u001b[43m_Concatenator\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 369\u001b[0m \u001b[43m \u001b[49m\u001b[43mobjs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 370\u001b[0m \u001b[43m \u001b[49m\u001b[43maxis\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43maxis\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 371\u001b[0m \u001b[43m \u001b[49m\u001b[43mignore_index\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mignore_index\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 372\u001b[0m \u001b[43m \u001b[49m\u001b[43mjoin\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mjoin\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 373\u001b[0m \u001b[43m \u001b[49m\u001b[43mkeys\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mkeys\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 374\u001b[0m \u001b[43m \u001b[49m\u001b[43mlevels\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlevels\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 375\u001b[0m \u001b[43m \u001b[49m\u001b[43mnames\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mnames\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 376\u001b[0m \u001b[43m \u001b[49m\u001b[43mverify_integrity\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverify_integrity\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 377\u001b[0m \u001b[43m \u001b[49m\u001b[43mcopy\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcopy\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 378\u001b[0m \u001b[43m \u001b[49m\u001b[43msort\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43msort\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 379\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 381\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m op\u001b[38;5;241m.\u001b[39mget_result()\n",
|
||||
"File \u001b[0;32m/mnt/data/home/j/git/lab.gsi/soil/soil/.env-v0.20/lib/python3.8/site-packages/pandas/core/reshape/concat.py:425\u001b[0m, in \u001b[0;36m_Concatenator.__init__\u001b[0;34m(self, objs, axis, join, keys, levels, names, ignore_index, verify_integrity, copy, sort)\u001b[0m\n\u001b[1;32m 422\u001b[0m objs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mlist\u001b[39m(objs)\n\u001b[1;32m 424\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(objs) \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[0;32m--> 425\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mNo objects to concatenate\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 427\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m keys \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 428\u001b[0m objs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mlist\u001b[39m(com\u001b[38;5;241m.\u001b[39mnot_none(\u001b[38;5;241m*\u001b[39mobjs))\n",
|
||||
"\u001b[0;31mValueError\u001b[0m: No objects to concatenate"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"evodumb = analysis.read_data('soil_output/Sim_all_dumb/', process=analysis.get_count, group=True, keys=['id']);"
|
||||
]
|
||||
@@ -721,9 +729,9 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"display_name": "venv-soil",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
"name": "venv-soil"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -735,7 +743,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.2"
|
||||
"version": "3.8.10"
|
||||
},
|
||||
"toc": {
|
||||
"colors": {
|
||||
|
@@ -17,7 +17,7 @@ class DumbViewer(FSM):
|
||||
def neutral(self):
|
||||
if self['has_tv']:
|
||||
if prob(self.env['prob_tv_spread']):
|
||||
self.set_state(self.infected)
|
||||
return self.infected
|
||||
|
||||
@state
|
||||
def infected(self):
|
||||
@@ -26,6 +26,12 @@ class DumbViewer(FSM):
|
||||
neighbor.infect()
|
||||
|
||||
def infect(self):
|
||||
'''
|
||||
This is not a state. It is a function that other agents can use to try to
|
||||
infect this agent. DumbViewer always gets infected, but other agents like
|
||||
HerdViewer might not become infected right away
|
||||
'''
|
||||
|
||||
self.set_state(self.infected)
|
||||
|
||||
|
||||
@@ -35,12 +41,13 @@ class HerdViewer(DumbViewer):
|
||||
'''
|
||||
|
||||
def infect(self):
|
||||
'''Notice again that this is NOT a state. See DumbViewer.infect for reference'''
|
||||
infected = self.count_neighboring_agents(state_id=self.infected.id)
|
||||
total = self.count_neighboring_agents()
|
||||
prob_infect = self.env['prob_neighbor_spread'] * infected/total
|
||||
self.debug('prob_infect', prob_infect)
|
||||
if prob(prob_infect):
|
||||
self.set_state(self.infected.id)
|
||||
self.set_state(self.infected)
|
||||
|
||||
|
||||
class WiseViewer(HerdViewer):
|
||||
@@ -75,5 +82,5 @@ class WiseViewer(HerdViewer):
|
||||
1.0)
|
||||
prob_cure = self.env['prob_neighbor_cure'] * (cured/infected)
|
||||
if prob(prob_cure):
|
||||
return self.cure()
|
||||
return self.cured
|
||||
return self.set_state(super().infected)
|
||||
|
@@ -18,7 +18,9 @@ class MyAgent(agents.FSM):
|
||||
@agents.default_state
|
||||
@agents.state
|
||||
def neutral(self):
|
||||
self.info('I am running')
|
||||
self.debug('I am running')
|
||||
if agents.prob(0.2):
|
||||
self.info('This runs 2/10 times on average')
|
||||
|
||||
|
||||
s = Simulation(name='Programmatic',
|
||||
@@ -29,10 +31,10 @@ s = Simulation(name='Programmatic',
|
||||
dry_run=True)
|
||||
|
||||
|
||||
# By default, logging will only print WARNING logs (and above).
|
||||
# You need to choose a lower logging level to get INFO/DEBUG traces
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
envs = s.run()
|
||||
|
||||
s.dump_yaml()
|
||||
|
||||
for env in envs:
|
||||
env.dump_csv()
|
||||
# Uncomment this to output the simulation to a YAML file
|
||||
# s.dump_yaml('simulation.yaml')
|
||||
|
@@ -12,8 +12,6 @@ class Genders(Enum):
|
||||
|
||||
class RabbitModel(FSM):
|
||||
|
||||
level = logging.INFO
|
||||
|
||||
defaults = {
|
||||
'age': 0,
|
||||
'gender': Genders.male.value,
|
||||
@@ -36,6 +34,17 @@ class RabbitModel(FSM):
|
||||
if self['age'] >= self.sexual_maturity:
|
||||
self.debug('I am fertile!')
|
||||
return self.fertile
|
||||
@state
|
||||
def fertile(self):
|
||||
raise Exception("Each subclass should define its fertile state")
|
||||
|
||||
@state
|
||||
def dead(self):
|
||||
self.info('Agent {} is dying'.format(self.id))
|
||||
self.die()
|
||||
|
||||
|
||||
class Male(RabbitModel):
|
||||
|
||||
@state
|
||||
def fertile(self):
|
||||
@@ -47,20 +56,26 @@ class RabbitModel(FSM):
|
||||
return
|
||||
|
||||
# Males try to mate
|
||||
for f in self.get_agents(state_id=self.fertile.id, gender=Genders.female.value, limit_neighbors=False, limit=self.max_females):
|
||||
for f in self.get_agents(state_id=Female.fertile.id,
|
||||
agent_type=Female,
|
||||
limit_neighbors=False,
|
||||
limit=self.max_females):
|
||||
r = random()
|
||||
if r < self['mating_prob']:
|
||||
self.impregnate(f)
|
||||
break # Take a break
|
||||
|
||||
def impregnate(self, whom):
|
||||
if self['gender'] == Genders.female.value:
|
||||
raise NotImplementedError('Females cannot impregnate')
|
||||
whom['pregnancy'] = 0
|
||||
whom['mate'] = self.id
|
||||
whom.set_state(whom.pregnant)
|
||||
self.debug('{} impregnating: {}. {}'.format(self.id, whom.id, whom.state))
|
||||
|
||||
class Female(RabbitModel):
|
||||
@state
|
||||
def fertile(self):
|
||||
# Just wait for a Male
|
||||
pass
|
||||
|
||||
@state
|
||||
def pregnant(self):
|
||||
self['age'] += 1
|
||||
@@ -90,11 +105,9 @@ class RabbitModel(FSM):
|
||||
|
||||
@state
|
||||
def dead(self):
|
||||
self.info('Agent {} is dying'.format(self.id))
|
||||
super().dead()
|
||||
if 'pregnancy' in self and self['pregnancy'] > -1:
|
||||
self.info('A mother has died carrying a baby!!')
|
||||
self.die()
|
||||
return
|
||||
|
||||
|
||||
class RandomAccident(NetworkAgent):
|
||||
|
@@ -1,23 +1,21 @@
|
||||
---
|
||||
load_module: rabbit_agents
|
||||
name: rabbits_example
|
||||
max_time: 150
|
||||
max_time: 1000
|
||||
interval: 1
|
||||
seed: MySeed
|
||||
agent_type: RabbitModel
|
||||
agent_type: rabbit_agents.RabbitModel
|
||||
environment_agents:
|
||||
- agent_type: RandomAccident
|
||||
- agent_type: rabbit_agents.RandomAccident
|
||||
environment_params:
|
||||
prob_death: 0.001
|
||||
default_state:
|
||||
mating_prob: 0.01
|
||||
mating_prob: 0.1
|
||||
topology:
|
||||
nodes:
|
||||
- id: 1
|
||||
state:
|
||||
gender: female
|
||||
agent_type: rabbit_agents.Male
|
||||
- id: 0
|
||||
state:
|
||||
gender: male
|
||||
agent_type: rabbit_agents.Female
|
||||
directed: true
|
||||
links: []
|
||||
|
@@ -6,4 +6,4 @@ pandas>=0.23
|
||||
SALib>=1.3
|
||||
Jinja2
|
||||
Mesa>=0.8
|
||||
tsih>=0.1.5
|
||||
tsih>=0.1.9
|
||||
|
@@ -1 +1 @@
|
||||
0.20.1
|
||||
0.20.8
|
@@ -65,6 +65,10 @@ def main():
|
||||
|
||||
logger.info('Loading config file: {}'.format(args.file))
|
||||
|
||||
if args.pdb:
|
||||
args.synchronous = True
|
||||
|
||||
|
||||
try:
|
||||
exporters = list(args.exporter or ['default', ])
|
||||
if args.csv:
|
||||
|
@@ -35,7 +35,9 @@ class BaseAgent(Agent):
|
||||
unique_id,
|
||||
model,
|
||||
name=None,
|
||||
interval=None):
|
||||
interval=None,
|
||||
**kwargs
|
||||
):
|
||||
# Check for REQUIRED arguments
|
||||
# Initialize agent parameters
|
||||
if isinstance(unique_id, Agent):
|
||||
@@ -52,6 +54,12 @@ class BaseAgent(Agent):
|
||||
|
||||
if hasattr(self, 'level'):
|
||||
self.logger.setLevel(self.level)
|
||||
for (k, v) in self.defaults.items():
|
||||
if not hasattr(self, k) or getattr(self, k) is None:
|
||||
setattr(self, k, deepcopy(v))
|
||||
|
||||
for (k, v) in kwargs.items():
|
||||
setattr(self, k, v)
|
||||
|
||||
|
||||
# TODO: refactor to clean up mesa compatibility
|
||||
@@ -137,6 +145,7 @@ class BaseAgent(Agent):
|
||||
self.alive = False
|
||||
if remove:
|
||||
self.remove_node(self.id)
|
||||
return time.NEVER
|
||||
|
||||
def step(self):
|
||||
if not self.alive:
|
||||
@@ -272,7 +281,7 @@ def default_state(func):
|
||||
|
||||
class MetaFSM(type):
|
||||
def __init__(cls, name, bases, nmspc):
|
||||
super(MetaFSM, cls).__init__(name, bases, nmspc)
|
||||
super().__init__(name, bases, nmspc)
|
||||
states = {}
|
||||
# Re-use states from inherited classes
|
||||
default_state = None
|
||||
@@ -305,18 +314,16 @@ class FSM(NetworkAgent, metaclass=MetaFSM):
|
||||
|
||||
def step(self):
|
||||
self.debug(f'Agent {self.unique_id} @ state {self.state_id}')
|
||||
try:
|
||||
interval = super().step()
|
||||
except DeadAgent:
|
||||
return time.When('inf')
|
||||
interval = super().step()
|
||||
if 'id' not in self.state:
|
||||
# if 'id' in self.state:
|
||||
# self.set_state(self.state['id'])
|
||||
if self.default_state:
|
||||
self.set_state(self.default_state.id)
|
||||
else:
|
||||
raise Exception('{} has no valid state id or default state'.format(self))
|
||||
return self.states[self.state_id](self) or interval
|
||||
interval = self.states[self.state_id](self) or interval
|
||||
if not self.alive:
|
||||
return time.NEVER
|
||||
return interval
|
||||
|
||||
def set_state(self, state):
|
||||
if hasattr(state, 'id'):
|
||||
@@ -475,6 +482,7 @@ def _definition_to_dict(definition, size=None, default_state=None):
|
||||
distro = sorted([item for item in definition if 'weight' in item])
|
||||
|
||||
ix = 0
|
||||
|
||||
def init_agent(item, id=ix):
|
||||
while id in agents:
|
||||
id += 1
|
||||
|
@@ -112,7 +112,7 @@ def get_types(df):
|
||||
Get the value type for every key stored in a raw history dataframe.
|
||||
'''
|
||||
dtypes = df.groupby(by=['key'])['value_type'].unique()
|
||||
return {k:v[0] for k,v in dtypes.iteritems()}
|
||||
return {k:v[0] for k,v in dtypes.items()}
|
||||
|
||||
|
||||
def process_one(df, *keys, columns=['key', 'agent_id'], values='value',
|
||||
@@ -146,7 +146,7 @@ def get_count(df, *keys):
|
||||
counts = pd.DataFrame()
|
||||
for key in df.columns.levels[0]:
|
||||
g = df[[key]].apply(pd.Series.value_counts, axis=1).fillna(0)
|
||||
for value, series in g.iteritems():
|
||||
for value, series in g.items():
|
||||
counts[key, value] = series
|
||||
counts.columns = pd.MultiIndex.from_tuples(counts.columns)
|
||||
return counts
|
||||
|
@@ -5,6 +5,7 @@ import math
|
||||
import random
|
||||
import yaml
|
||||
import tempfile
|
||||
import logging
|
||||
import pandas as pd
|
||||
from time import time as current_time
|
||||
from copy import deepcopy
|
||||
@@ -101,6 +102,8 @@ class Environment(Model):
|
||||
environment_agents = agents._convert_agent_types(distro)
|
||||
self.environment_agents = environment_agents
|
||||
|
||||
self.logger = utils.logger.getChild(self.name)
|
||||
|
||||
@property
|
||||
def now(self):
|
||||
if self.schedule:
|
||||
@@ -121,7 +124,8 @@ class Environment(Model):
|
||||
def environment_agents(self, environment_agents):
|
||||
self._environment_agents = environment_agents
|
||||
|
||||
self._env_agents = agents._definition_to_dict(definition=environment_agents)
|
||||
for (ix, agent) in enumerate(self._environment_agents):
|
||||
self.init_agent(len(self.G) + ix, agent_definitions=environment_agents, with_node=False)
|
||||
|
||||
@property
|
||||
def network_agents(self):
|
||||
@@ -136,15 +140,19 @@ class Environment(Model):
|
||||
for ix in self.G.nodes():
|
||||
self.init_agent(ix, agent_definitions=network_agents)
|
||||
|
||||
def init_agent(self, agent_id, agent_definitions):
|
||||
node = self.G.nodes[agent_id]
|
||||
def init_agent(self, agent_id, agent_definitions, with_node=True):
|
||||
init = False
|
||||
state = dict(node)
|
||||
|
||||
state = {}
|
||||
if with_node:
|
||||
node = self.G.nodes[agent_id]
|
||||
state = dict(node)
|
||||
state.update(self.states.get(agent_id, {}))
|
||||
|
||||
agent_type = None
|
||||
if 'agent_type' in self.states.get(agent_id, {}):
|
||||
agent_type = self.states[agent_id]['agent_type']
|
||||
elif 'agent_type' in node:
|
||||
if 'agent_type' in state:
|
||||
agent_type = state['agent_type']
|
||||
elif with_node and 'agent_type' in node:
|
||||
agent_type = node['agent_type']
|
||||
elif 'agent_type' in self.default_state:
|
||||
agent_type = self.default_state['agent_type']
|
||||
@@ -154,31 +162,30 @@ class Environment(Model):
|
||||
elif agent_definitions:
|
||||
agent_type, state = agents._agent_from_definition(agent_definitions, unique_id=agent_id)
|
||||
else:
|
||||
serialization.logger.debug('Skipping node {}'.format(agent_id))
|
||||
serialization.logger.debug('Skipping agent {}'.format(agent_id))
|
||||
return
|
||||
return self.set_agent(agent_id, agent_type, state)
|
||||
return self.set_agent(agent_id, agent_type, state, with_node=with_node)
|
||||
|
||||
def set_agent(self, agent_id, agent_type, state=None):
|
||||
node = self.G.nodes[agent_id]
|
||||
def set_agent(self, agent_id, agent_type, state=None, with_node=True):
|
||||
defstate = deepcopy(self.default_state) or {}
|
||||
defstate.update(self.states.get(agent_id, {}))
|
||||
defstate.update(node.get('state', {}))
|
||||
if with_node:
|
||||
node = self.G.nodes[agent_id]
|
||||
defstate.update(node.get('state', {}))
|
||||
if state:
|
||||
defstate.update(state)
|
||||
a = None
|
||||
if agent_type:
|
||||
state = defstate
|
||||
a = agent_type(model=self,
|
||||
unique_id=agent_id)
|
||||
|
||||
for (k, v) in getattr(a, 'defaults', {}).items():
|
||||
if not hasattr(a, k) or getattr(a, k) is None:
|
||||
setattr(a, k, v)
|
||||
unique_id=agent_id
|
||||
)
|
||||
|
||||
for (k, v) in state.items():
|
||||
setattr(a, k, v)
|
||||
|
||||
node['agent'] = a
|
||||
if with_node:
|
||||
node['agent'] = a
|
||||
self.schedule.add(a)
|
||||
return a
|
||||
|
||||
@@ -197,17 +204,29 @@ class Environment(Model):
|
||||
start = start or self.now
|
||||
return self.G.add_edge(agent1, agent2, **attrs)
|
||||
|
||||
def log(self, message, *args, level=logging.INFO, **kwargs):
|
||||
if not self.logger.isEnabledFor(level):
|
||||
return
|
||||
message = message + " ".join(str(i) for i in args)
|
||||
message = " @{:>3}: {}".format(self.now, message)
|
||||
for k, v in kwargs:
|
||||
message += " {k}={v} ".format(k, v)
|
||||
extra = {}
|
||||
extra['now'] = self.now
|
||||
extra['unique_id'] = self.name
|
||||
return self.logger.log(level, message, extra=extra)
|
||||
|
||||
def step(self):
|
||||
super().step()
|
||||
self.datacollector.collect(self)
|
||||
self.schedule.step()
|
||||
|
||||
def run(self, until, *args, **kwargs):
|
||||
self._save_state()
|
||||
|
||||
while self.schedule.next_time <= until and not math.isinf(self.schedule.next_time):
|
||||
self.schedule.step(until=until)
|
||||
while self.schedule.next_time < until:
|
||||
self.step()
|
||||
utils.logger.debug(f'Simulation step {self.schedule.time}/{until}. Next: {self.schedule.next_time}')
|
||||
self.schedule.time = until
|
||||
self._history.flush_cache()
|
||||
|
||||
def _save_state(self, now=None):
|
||||
|
@@ -1,11 +1,12 @@
|
||||
import os
|
||||
import time
|
||||
import importlib
|
||||
import sys
|
||||
import yaml
|
||||
import traceback
|
||||
import logging
|
||||
import networkx as nx
|
||||
|
||||
from time import strftime
|
||||
from networkx.readwrite import json_graph
|
||||
from multiprocessing import Pool
|
||||
from functools import partial
|
||||
@@ -98,7 +99,7 @@ class Simulation:
|
||||
self.network_params = network_params
|
||||
self.name = name or 'Unnamed'
|
||||
self.seed = str(seed or name)
|
||||
self._id = '{}_{}'.format(self.name, time.strftime("%Y-%m-%d_%H.%M.%S"))
|
||||
self._id = '{}_{}'.format(self.name, strftime("%Y-%m-%d_%H.%M.%S"))
|
||||
self.group = group or ''
|
||||
self.num_trials = num_trials
|
||||
self.max_time = max_time
|
||||
@@ -142,12 +143,10 @@ class Simulation:
|
||||
'''Run the simulation and return the list of resulting environments'''
|
||||
return list(self.run_gen(*args, **kwargs))
|
||||
|
||||
def _run_sync_or_async(self, parallel=False, *args, **kwargs):
|
||||
def _run_sync_or_async(self, parallel=False, **kwargs):
|
||||
if parallel and not os.environ.get('SENPY_DEBUG', None):
|
||||
p = Pool()
|
||||
func = partial(self.run_trial_exceptions,
|
||||
*args,
|
||||
**kwargs)
|
||||
func = partial(self.run_trial_exceptions, **kwargs)
|
||||
for i in p.imap_unordered(func, range(self.num_trials)):
|
||||
if isinstance(i, Exception):
|
||||
logger.error('Trial failed:\n\t%s', i.message)
|
||||
@@ -155,10 +154,10 @@ class Simulation:
|
||||
yield i
|
||||
else:
|
||||
for i in range(self.num_trials):
|
||||
yield self.run_trial(*args,
|
||||
yield self.run_trial(trial_id=i,
|
||||
**kwargs)
|
||||
|
||||
def run_gen(self, *args, parallel=False, dry_run=False,
|
||||
def run_gen(self, parallel=False, dry_run=False,
|
||||
exporters=[default, ], stats=[], outdir=None, exporter_params={},
|
||||
stats_params={}, log_level=None,
|
||||
**kwargs):
|
||||
@@ -184,8 +183,7 @@ class Simulation:
|
||||
|
||||
for exporter in exporters:
|
||||
exporter.start()
|
||||
for env in self._run_sync_or_async(*args,
|
||||
parallel=parallel,
|
||||
for env in self._run_sync_or_async(parallel=parallel,
|
||||
log_level=log_level,
|
||||
**kwargs):
|
||||
|
||||
@@ -224,7 +222,7 @@ class Simulation:
|
||||
'''Create an environment for a trial of the simulation'''
|
||||
opts = self.environment_params.copy()
|
||||
opts.update({
|
||||
'name': trial_id,
|
||||
'name': '{}_trial_{}'.format(self.name, trial_id),
|
||||
'topology': self.topology.copy(),
|
||||
'network_params': self.network_params,
|
||||
'seed': '{}_trial_{}'.format(self.seed, trial_id),
|
||||
@@ -241,12 +239,11 @@ class Simulation:
|
||||
env = self.environment_class(**opts)
|
||||
return env
|
||||
|
||||
def run_trial(self, until=None, log_level=logging.INFO, **opts):
|
||||
def run_trial(self, trial_id=0, until=None, log_level=logging.INFO, **opts):
|
||||
"""
|
||||
Run a single trial of the simulation
|
||||
|
||||
"""
|
||||
trial_id = '{}_trial_{}'.format(self.name, time.time()).replace('.', '-')
|
||||
if log_level:
|
||||
logger.setLevel(log_level)
|
||||
# Set-up trial environment and graph
|
||||
|
@@ -52,7 +52,7 @@ class distribution(Stats):
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
for name, count in t.value_counts().iteritems():
|
||||
for name, count in t.value_counts().items():
|
||||
if a not in stats['count']:
|
||||
stats['count'][a] = {}
|
||||
stats['count'][a][name] = count
|
||||
@@ -68,10 +68,10 @@ class distribution(Stats):
|
||||
mean = {}
|
||||
|
||||
if self.means:
|
||||
res = dfm.groupby(by=['key']).agg(['mean', 'std', 'count', 'median', 'max', 'min'])
|
||||
res = dfm.drop('metric', axis=1).groupby(by=['key']).agg(['mean', 'std', 'count', 'median', 'max', 'min'])
|
||||
mean = res['value'].to_dict()
|
||||
if self.counts:
|
||||
res = dfc.groupby(by=['key', 'value']).agg(['mean', 'std', 'count', 'median', 'max', 'min'])
|
||||
res = dfc.drop('metric', axis=1).groupby(by=['key', 'value']).agg(['mean', 'std', 'count', 'median', 'max', 'min'])
|
||||
for k,v in res['count'].to_dict().items():
|
||||
if k not in count:
|
||||
count[k] = {}
|
||||
|
67
soil/time.py
@@ -6,15 +6,21 @@ from .utils import logger
|
||||
from mesa import Agent
|
||||
|
||||
|
||||
INFINITY = float('inf')
|
||||
|
||||
class When:
|
||||
def __init__(self, time):
|
||||
self._time = float(time)
|
||||
if isinstance(time, When):
|
||||
return time
|
||||
self._time = time
|
||||
|
||||
def abs(self, time):
|
||||
return self._time
|
||||
|
||||
NEVER = When(INFINITY)
|
||||
|
||||
class Delta:
|
||||
|
||||
class Delta(When):
|
||||
def __init__(self, delta):
|
||||
self._delta = delta
|
||||
|
||||
@@ -40,48 +46,35 @@ class TimedActivation(BaseScheduler):
|
||||
heappush(self._queue, (self.time, agent.unique_id))
|
||||
super().add(agent)
|
||||
|
||||
def step(self, until: float =float('inf')) -> None:
|
||||
def step(self) -> None:
|
||||
"""
|
||||
Executes agents in order, one at a time. After each step,
|
||||
an agent will signal when it wants to be scheduled next.
|
||||
"""
|
||||
|
||||
when = None
|
||||
agent_id = None
|
||||
unsched = []
|
||||
until = until or float('inf')
|
||||
if self.next_time == INFINITY:
|
||||
return
|
||||
|
||||
self.time = self.next_time
|
||||
when = self.time
|
||||
|
||||
while self._queue and self._queue[0][0] == self.time:
|
||||
(when, agent_id) = heappop(self._queue)
|
||||
logger.debug(f'Stepping agent {agent_id}')
|
||||
|
||||
returned = self._agents[agent_id].step()
|
||||
when = (returned or Delta(1)).abs(self.time)
|
||||
if when < self.time:
|
||||
raise Exception("Cannot schedule an agent for a time in the past ({} < {})".format(when, self.time))
|
||||
|
||||
heappush(self._queue, (when, agent_id))
|
||||
|
||||
self.steps += 1
|
||||
|
||||
if not self._queue:
|
||||
self.time = until
|
||||
self.next_time = float('inf')
|
||||
self.time = INFINITY
|
||||
self.next_time = INFINITY
|
||||
return
|
||||
|
||||
(when, agent_id) = self._queue[0]
|
||||
self.next_time = self._queue[0][0]
|
||||
|
||||
if until and when > until:
|
||||
self.time = until
|
||||
self.next_time = when
|
||||
return
|
||||
|
||||
self.time = when
|
||||
next_time = float("inf")
|
||||
|
||||
while when == self.time:
|
||||
heappop(self._queue)
|
||||
logger.debug(f'Stepping agent {agent_id}')
|
||||
when = (self._agents[agent_id].step() or Delta(1)).abs(self.time)
|
||||
heappush(self._queue, (when, agent_id))
|
||||
if when < next_time:
|
||||
next_time = when
|
||||
|
||||
if not self._queue or self._queue[0][0] > self.time:
|
||||
agent_id = None
|
||||
break
|
||||
else:
|
||||
(when, agent_id) = self._queue[0]
|
||||
|
||||
if when and when < self.time:
|
||||
raise Exception("Invalid scheduling time")
|
||||
|
||||
self.next_time = next_time
|
||||
self.steps += 1
|
||||
|
22
tests/test_agents.py
Normal file
@@ -0,0 +1,22 @@
|
||||
from unittest import TestCase
|
||||
import pytest
|
||||
|
||||
from soil import agents, environment
|
||||
from soil import time as stime
|
||||
|
||||
class Dead(agents.FSM):
|
||||
@agents.default_state
|
||||
@agents.state
|
||||
def only(self):
|
||||
self.die()
|
||||
|
||||
class TestMain(TestCase):
|
||||
def test_die_raises_exception(self):
|
||||
d = Dead(unique_id=0, model=environment.Environment())
|
||||
d.step()
|
||||
with pytest.raises(agents.DeadAgent):
|
||||
d.step()
|
||||
|
||||
def test_die_returns_infinity(self):
|
||||
d = Dead(unique_id=0, model=environment.Environment())
|
||||
assert d.step().abs(0) == stime.INFINITY
|
@@ -127,7 +127,7 @@ class TestMain(TestCase):
|
||||
env = s.run_simulation(dry_run=True)[0]
|
||||
for agent in env.network_agents:
|
||||
last = 0
|
||||
assert len(agent[None, None]) == 11
|
||||
assert len(agent[None, None]) == 10
|
||||
for step, total in sorted(agent['total', None]):
|
||||
assert total == last + 2
|
||||
last = total
|
||||
|