From f811ee18c5e3b2d7e2c1c434516c2ce9f116c62f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?J=2E=20Fernando=20S=C3=A1nchez?=
Date: Thu, 6 Oct 2022 15:49:10 +0200
Subject: [PATCH] WIP
---
CHANGELOG.md | 5 +-
README.md | 55 ++--
docs/configuration.rst | 28 +-
docs/example.yml | 6 +-
docs/quickstart.yml | 4 +-
docs/soil_tutorial.rst | 10 +-
examples/NewsSpread.ipynb | 24 +-
examples/Untitled.ipynb | 200 ++++++------
examples/complete.yml | 1 +
.../custom_generator/custom_generator.yml | 2 +-
examples/custom_generator/mymodule.py | 7 +-
examples/custom_timeouts/custom_timeouts.py | 4 +-
examples/mesa/mesa.yml | 4 +-
examples/mesa/server.py | 2 +-
examples/mesa/social_wealth.py | 2 +-
examples/newsspread/NewsSpread.ipynb | 24 +-
examples/newsspread/NewsSpread.yml | 24 +-
examples/programmatic/programmatic.py | 2 +-
examples/pubcrawl/pubcrawl.py | 5 +-
examples/pubcrawl/pubcrawl.yml | 6 +-
examples/rabbits/rabbit_agents.py | 12 +-
examples/rabbits/rabbits.yml | 8 +-
examples/random_delays/random_delays.py | 9 +-
examples/template.yml | 4 +-
examples/terrorism/TerroristNetworkModel.py | 27 +-
examples/terrorism/TerroristNetworkModel.yml | 8 +-
examples/torvalds.yml | 2 +-
examples/tutorial/soil_tutorial.html | 10 +-
examples/tutorial/soil_tutorial.ipynb | 10 +-
soil/agents/BassModel.py | 5 +-
soil/agents/BigMarketModel.py | 11 +-
soil/agents/IndependentCascadeModel.py | 5 +-
soil/agents/ModelM2.py | 66 ++--
soil/agents/SISaModel.py | 35 +--
soil/agents/SentimentCorrelationModel.py | 5 +-
soil/agents/__init__.py | 30 +-
soil/config.py | 47 ++-
soil/datacollection.py | 20 +-
soil/environment.py | 289 +++++++++---------
soil/exporters.py | 4 +-
soil/network.py | 23 ++
soil/serialization.py | 2 -
soil/simulation.py | 165 +++++-----
soil/time.py | 11 +-
soil/utils.py | 15 +
soil/web/config.yml | 4 +-
tests/old_complete.yml | 8 +-
tests/test_analysis.py | 2 +-
tests/test_config.py | 64 +++-
tests/test_exporters.py | 4 +-
tests/test_history.py | 2 +-
tests/test_main.py | 225 +++++---------
tests/test_network.py | 85 ++++++
53 files changed, 857 insertions(+), 775 deletions(-)
create mode 100644 tests/test_network.py
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 6b7dc3e..92c457e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,13 +3,14 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
-## [UNRELEASED]
+## [0.3 UNRELEASED]
### Changed
-* Configuration schema is very different now. Check `soil.config` for more information. We are using Pydantic for (de)serialization.
+* Configuration schema is very different now. Check `soil.config` for more information. We are also using Pydantic for (de)serialization.
* There may be more than one topology/network in the simulation
* Agents are split into groups now. Each group may be assigned a given set of agents or an agent distribution, and a network topology to be assigned to.
### Removed
* Any `tsih` and `History` integration in the main classes. To record the state of environments/agents, just use a datacollector. In some cases this may be slower or consume more memory than the previous system. However, few cases actually used the full potential of the history, and it came at the cost of unnecessary complexity and worse performance for the majority of cases.
+
## [0.20.7]
### Changed
* Creating a `time.When` from another `time.When` does not nest them anymore (it returns the argument)
diff --git a/README.md b/README.md
index 7ea6f1d..8bcca61 100644
--- a/README.md
+++ b/README.md
@@ -5,6 +5,42 @@ Learn how to run your own simulations with our [documentation](http://soilsim.re
Follow our [tutorial](examples/tutorial/soil_tutorial.ipynb) to develop your own agent models.
+
+# Changes in version 0.3
+
+Version 0.3 came packed with many changes to provide much better integration with MESA.
+For a long time, we tried to keep soil backwards-compatible, but it turned out to be a big endeavour and the resulting code was less readable.
+This translates to harder maintenance and a worse experience for newcomers.
+In the end, we decided to make some breaking changes.
+
+If you have an older Soil simulation, you have two options:
+
+* Update the necessary configuration files and code. You may use the examples in the `examples` folder for reference, as well as the documentation.
+* Keep using a previous `soil` version.
+
+## Mesa compatibility
+
+Soil is in the process of becoming fully compatible with MESA.
+The idea is to provide a set of modular classes and functions that extend the functionality of mesa, whilst staying compatible.
+In the end, it should be possible to add regular mesa agents to a soil simulation, or use a soil agent within a mesa simulation/model.
+
+This is a non-exhaustive list of tasks to achieve compatibility:
+
+- [ ] Integrate `soil.Simulation` with mesa's runners:
+ - [ ] `soil.Simulation` could mimic/become a `mesa.batchrunner`
+- [ ] Integrate `soil.Environment` with `mesa.Model`:
+ - [x] `Soil.Environment` inherits from `mesa.Model`
+ - [x] `Soil.Environment` includes a Mesa-like Scheduler (see the `soil.time` module.
+ - [ ] Allow for `mesa.Model` to be used in a simulation.
+- [ ] Integrate `soil.Agent` with `mesa.Agent`:
+ - [x] Rename agent.id to unique_id?
+ - [x] mesa agents can be used in soil simulations (see `examples/mesa`)
+- [ ] Provide examples
+ - [ ] Using mesa modules in a soil simulation
+ - [ ] Using soil modules in a mesa simulation
+- [ ] Document the new APIs and usage
+
+
## Citation
@@ -31,25 +67,6 @@ If you use Soil in your research, don't forget to cite this paper:
```
-## Mesa compatibility
-
-Soil is in the process of becoming fully compatible with MESA.
-As of this writing,
-
-This is a non-exhaustive list of tasks to achieve compatibility:
-
-* Environments.agents and mesa.Agent.agents are not the same. env is a property, and it only takes into account network and environment agents. Might rename environment_agents to other_agents or sth like that
-
-- [ ] Integrate `soil.Simulation` with mesa's runners:
- - [ ] `soil.Simulation` could mimic/become a `mesa.batchrunner`
-- [ ] Integrate `soil.Environment` with `mesa.Model`:
- - [x] `Soil.Environment` inherits from `mesa.Model`
- - [x] `Soil.Environment` includes a Mesa-like Scheduler (see the `soil.time` module.
-- [ ] Integrate `soil.Agent` with `mesa.Agent`:
- - [x] Rename agent.id to unique_id?
- - [x] mesa agents can be used in soil simulations (see `examples/mesa`)
-- [ ] Document the new APIs and usage
-
@Copyright GSI - Universidad Politécnica de Madrid 2017-2021
[![SOIL](logo_gsi.png)](https://www.gsi.upm.es)
diff --git a/docs/configuration.rst b/docs/configuration.rst
index 3d5a88d..223fbbc 100644
--- a/docs/configuration.rst
+++ b/docs/configuration.rst
@@ -13,7 +13,7 @@ Here's an example (``example.yml``).
This example configuration will run three trials (``num_trials``) of a simulation containing a randomly generated network (``network_params``).
-The 100 nodes in the network will be SISaModel agents (``network_agents.agent_type``), which is an agent behavior that is included in Soil.
+The 100 nodes in the network will be SISaModel agents (``network_agents.agent_class``), which is an agent behavior that is included in Soil.
10% of the agents (``weight=1``) will start in the content state, 10% in the discontent state, and the remaining 80% (``weight=8``) in the neutral state.
All agents will have access to the environment (``environment_params``), which only contains one variable, ``prob_infected``.
The state of the agents will be updated every 2 seconds (``interval``).
@@ -116,7 +116,7 @@ Agents
======
Agents are a way of modelling behavior.
-Agents can be characterized with two variables: agent type (``agent_type``) and state.
+Agents can be characterized with two variables: agent type (``agent_class``) and state.
The agent type is a ``soil.Agent`` class, which contains the code that encapsulates the behavior of the agent.
The state is a set of variables, which may change during the simulation, and that the code may use to control the behavior.
All agents provide a ``step`` method either explicitly or implicitly (by inheriting it from a superclass), which controls how the agent will behave in each step of the simulation.
@@ -142,7 +142,7 @@ Hence, every node in the network will be associated to an agent of that type.
.. code:: yaml
- agent_type: SISaModel
+ agent_class: SISaModel
It is also possible to add more than one type of agent to the simulation.
@@ -152,9 +152,9 @@ For instance, with following configuration, it is five times more likely for a n
.. code:: yaml
network_agents:
- - agent_type: SISaModel
+ - agent_class: SISaModel
weight: 1
- - agent_type: CounterModel
+ - agent_class: CounterModel
weight: 5
The third option is to specify the type of agent on the node itself, e.g.:
@@ -165,10 +165,10 @@ The third option is to specify the type of agent on the node itself, e.g.:
topology:
nodes:
- id: first
- agent_type: BaseAgent
+ agent_class: BaseAgent
states:
first:
- agent_type: SISaModel
+ agent_class: SISaModel
This would also work with a randomly generated network:
@@ -179,9 +179,9 @@ This would also work with a randomly generated network:
network:
generator: complete
n: 5
- agent_type: BaseAgent
+ agent_class: BaseAgent
states:
- - agent_type: SISaModel
+ - agent_class: SISaModel
@@ -192,11 +192,11 @@ e.g., to populate the network with SISaModel, roughly 10% of them with a discont
.. code:: yaml
network_agents:
- - agent_type: SISaModel
+ - agent_class: SISaModel
weight: 9
state:
id: neutral
- - agent_type: SISaModel
+ - agent_class: SISaModel
weight: 1
state:
id: discontent
@@ -206,7 +206,7 @@ For instance, to add a state for the two nodes in this configuration:
.. code:: yaml
- agent_type: SISaModel
+ agent_class: SISaModel
network:
generator: complete_graph
n: 2
@@ -231,10 +231,10 @@ These agents are programmed in much the same way as network agents, the only dif
.. code::
environment_agents:
- - agent_type: MyAgent
+ - agent_class: MyAgent
state:
mood: happy
- - agent_type: DummyAgent
+ - agent_class: DummyAgent
You may use environment agents to model events that a normal agent cannot control, such as natural disasters or chance.
diff --git a/docs/example.yml b/docs/example.yml
index 4ef6ef2..1554512 100644
--- a/docs/example.yml
+++ b/docs/example.yml
@@ -8,15 +8,15 @@ network_params:
n: 100
m: 2
network_agents:
- - agent_type: SISaModel
+ - agent_class: SISaModel
weight: 1
state:
id: content
- - agent_type: SISaModel
+ - agent_class: SISaModel
weight: 1
state:
id: discontent
- - agent_type: SISaModel
+ - agent_class: SISaModel
weight: 8
state:
id: neutral
diff --git a/docs/quickstart.yml b/docs/quickstart.yml
index 76ed3e2..3b1f36d 100644
--- a/docs/quickstart.yml
+++ b/docs/quickstart.yml
@@ -3,11 +3,11 @@ name: quickstart
num_trials: 1
max_time: 1000
network_agents:
- - agent_type: SISaModel
+ - agent_class: SISaModel
state:
id: neutral
weight: 1
- - agent_type: SISaModel
+ - agent_class: SISaModel
state:
id: content
weight: 2
diff --git a/docs/soil_tutorial.rst b/docs/soil_tutorial.rst
index 647ae0c..f7eefe9 100644
--- a/docs/soil_tutorial.rst
+++ b/docs/soil_tutorial.rst
@@ -211,11 +211,11 @@ nodes in that network. Notice how node 0 is the only one with a TV.
sim = soil.Simulation(topology=G,
num_trials=1,
max_time=MAX_TIME,
- environment_agents=[{'agent_type': NewsEnvironmentAgent,
+ environment_agents=[{'agent_class': NewsEnvironmentAgent,
'state': {
'event_time': EVENT_TIME
}}],
- network_agents=[{'agent_type': NewsSpread,
+ network_agents=[{'agent_class': NewsSpread,
'weight': 1}],
states={0: {'has_tv': True}},
default_state={'has_tv': False},
@@ -285,14 +285,14 @@ For this demo, we will use a python dictionary:
},
'network_agents': [
{
- 'agent_type': NewsSpread,
+ 'agent_class': NewsSpread,
'weight': 1,
'state': {
'has_tv': False
}
},
{
- 'agent_type': NewsSpread,
+ 'agent_class': NewsSpread,
'weight': 2,
'state': {
'has_tv': True
@@ -300,7 +300,7 @@ For this demo, we will use a python dictionary:
}
],
'environment_agents':[
- {'agent_type': NewsEnvironmentAgent,
+ {'agent_class': NewsEnvironmentAgent,
'state': {
'event_time': 10
}
diff --git a/examples/NewsSpread.ipynb b/examples/NewsSpread.ipynb
index 3f73ff6..87b53f2 100644
--- a/examples/NewsSpread.ipynb
+++ b/examples/NewsSpread.ipynb
@@ -98,11 +98,11 @@
"max_time: 30\r\n",
"name: Sim_all_dumb\r\n",
"network_agents:\r\n",
- "- agent_type: DumbViewer\r\n",
+ "- agent_class: DumbViewer\r\n",
" state:\r\n",
" has_tv: false\r\n",
" weight: 1\r\n",
- "- agent_type: DumbViewer\r\n",
+ "- agent_class: DumbViewer\r\n",
" state:\r\n",
" has_tv: true\r\n",
" weight: 1\r\n",
@@ -122,19 +122,19 @@
"max_time: 30\r\n",
"name: Sim_half_herd\r\n",
"network_agents:\r\n",
- "- agent_type: DumbViewer\r\n",
+ "- agent_class: DumbViewer\r\n",
" state:\r\n",
" has_tv: false\r\n",
" weight: 1\r\n",
- "- agent_type: DumbViewer\r\n",
+ "- agent_class: DumbViewer\r\n",
" state:\r\n",
" has_tv: true\r\n",
" weight: 1\r\n",
- "- agent_type: HerdViewer\r\n",
+ "- agent_class: HerdViewer\r\n",
" state:\r\n",
" has_tv: false\r\n",
" weight: 1\r\n",
- "- agent_type: HerdViewer\r\n",
+ "- agent_class: HerdViewer\r\n",
" state:\r\n",
" has_tv: true\r\n",
" weight: 1\r\n",
@@ -154,12 +154,12 @@
"max_time: 30\r\n",
"name: Sim_all_herd\r\n",
"network_agents:\r\n",
- "- agent_type: HerdViewer\r\n",
+ "- agent_class: HerdViewer\r\n",
" state:\r\n",
" has_tv: true\r\n",
" id: neutral\r\n",
" weight: 1\r\n",
- "- agent_type: HerdViewer\r\n",
+ "- agent_class: HerdViewer\r\n",
" state:\r\n",
" has_tv: true\r\n",
" id: neutral\r\n",
@@ -181,12 +181,12 @@
"max_time: 30\r\n",
"name: Sim_wise_herd\r\n",
"network_agents:\r\n",
- "- agent_type: HerdViewer\r\n",
+ "- agent_class: HerdViewer\r\n",
" state:\r\n",
" has_tv: true\r\n",
" id: neutral\r\n",
" weight: 1\r\n",
- "- agent_type: WiseViewer\r\n",
+ "- agent_class: WiseViewer\r\n",
" state:\r\n",
" has_tv: true\r\n",
" weight: 1\r\n",
@@ -207,12 +207,12 @@
"max_time: 30\r\n",
"name: Sim_all_wise\r\n",
"network_agents:\r\n",
- "- agent_type: WiseViewer\r\n",
+ "- agent_class: WiseViewer\r\n",
" state:\r\n",
" has_tv: true\r\n",
" id: neutral\r\n",
" weight: 1\r\n",
- "- agent_type: WiseViewer\r\n",
+ "- agent_class: WiseViewer\r\n",
" state:\r\n",
" has_tv: true\r\n",
" weight: 1\r\n",
diff --git a/examples/Untitled.ipynb b/examples/Untitled.ipynb
index eb0387f..29e00b1 100644
--- a/examples/Untitled.ipynb
+++ b/examples/Untitled.ipynb
@@ -141,10 +141,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -1758,10 +1758,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -3363,10 +3363,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -4977,10 +4977,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -6591,10 +6591,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -8211,10 +8211,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -9828,10 +9828,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -11448,10 +11448,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -13062,10 +13062,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -14679,10 +14679,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -16296,10 +16296,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -17916,10 +17916,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -19521,10 +19521,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -21144,10 +21144,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -22767,10 +22767,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -24375,10 +24375,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -25992,10 +25992,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -27603,10 +27603,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -29220,10 +29220,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -30819,10 +30819,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -32439,10 +32439,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -34056,10 +34056,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -35676,10 +35676,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -37293,10 +37293,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -38913,10 +38913,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -40518,10 +40518,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -42129,10 +42129,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -43746,10 +43746,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -45357,10 +45357,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -46974,10 +46974,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -48588,10 +48588,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -50202,10 +50202,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -51819,10 +51819,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -53436,10 +53436,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -55041,10 +55041,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -56655,10 +56655,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -58257,10 +58257,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -59877,10 +59877,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -61494,10 +61494,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -63108,10 +63108,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -64713,10 +64713,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -66330,10 +66330,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -67947,10 +67947,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -69561,10 +69561,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -71178,10 +71178,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -72801,10 +72801,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -74418,10 +74418,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -76035,10 +76035,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -77643,10 +77643,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
@@ -79260,10 +79260,10 @@
" 'load_module': 'newsspread',\n",
" 'max_time': 30,\n",
" 'name': 'Sim_all_dumb',\n",
- " 'network_agents': [{'agent_type': 'DumbViewer',\n",
+ " 'network_agents': [{'agent_class': 'DumbViewer',\n",
" 'state': {'has_tv': False},\n",
" 'weight': 1},\n",
- " {'agent_type': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
+ " {'agent_class': 'DumbViewer', 'state': {'has_tv': True}, 'weight': 1}],\n",
" 'network_params': {'generator': 'barabasi_albert_graph', 'm': 5, 'n': 500},\n",
" 'num_trials': 50,\n",
" 'seed': 'None',\n",
diff --git a/examples/complete.yml b/examples/complete.yml
index adf1e7e..d33cbaf 100644
--- a/examples/complete.yml
+++ b/examples/complete.yml
@@ -30,6 +30,7 @@ agents:
times: 1
environment:
# In this group we are not specifying any topology
+ topology: False
fixed:
- name: 'Environment Agent 1'
agent_class: CounterModel
diff --git a/examples/custom_generator/custom_generator.yml b/examples/custom_generator/custom_generator.yml
index 8c128f3..12c130d 100644
--- a/examples/custom_generator/custom_generator.yml
+++ b/examples/custom_generator/custom_generator.yml
@@ -10,7 +10,7 @@ network_params:
n: 10
n_edges: 5
network_agents:
- - agent_type: CounterModel
+ - agent_class: CounterModel
weight: 1
state:
state_id: 0
diff --git a/examples/custom_generator/mymodule.py b/examples/custom_generator/mymodule.py
index 4f80510..ef3bacc 100644
--- a/examples/custom_generator/mymodule.py
+++ b/examples/custom_generator/mymodule.py
@@ -1,6 +1,5 @@
from networkx import Graph
import networkx as nx
-from random import choice
def mygenerator(n=5, n_edges=5):
'''
@@ -14,9 +13,9 @@ def mygenerator(n=5, n_edges=5):
for i in range(n_edges):
nodes = list(G.nodes)
- n_in = choice(nodes)
+ n_in = self.random.choice(nodes)
nodes.remove(n_in) # Avoid loops
- n_out = choice(nodes)
+ n_out = self.random.choice(nodes)
G.add_edge(n_in, n_out)
return G
@@ -24,4 +23,4 @@ def mygenerator(n=5, n_edges=5):
-
\ No newline at end of file
+
diff --git a/examples/custom_timeouts/custom_timeouts.py b/examples/custom_timeouts/custom_timeouts.py
index 75cfc91..16b8d66 100644
--- a/examples/custom_timeouts/custom_timeouts.py
+++ b/examples/custom_timeouts/custom_timeouts.py
@@ -27,8 +27,8 @@ if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.INFO)
from soil import Simulation
- s = Simulation(network_agents=[{'ids': [0], 'agent_type': Fibonacci},
- {'ids': [1], 'agent_type': Odds}],
+ s = Simulation(network_agents=[{'ids': [0], 'agent_class': Fibonacci},
+ {'ids': [1], 'agent_class': Odds}],
network_params={"generator": "complete_graph", "n": 2},
max_time=100,
)
diff --git a/examples/mesa/mesa.yml b/examples/mesa/mesa.yml
index 19148a2..a1572f2 100644
--- a/examples/mesa/mesa.yml
+++ b/examples/mesa/mesa.yml
@@ -10,11 +10,11 @@ network_params:
generator: social_wealth.graph_generator
n: 5
network_agents:
- - agent_type: social_wealth.SocialMoneyAgent
+ - agent_class: social_wealth.SocialMoneyAgent
weight: 1
environment_class: social_wealth.MoneyEnv
environment_params:
- mesa_agent_type: social_wealth.MoneyAgent
+ mesa_agent_class: social_wealth.MoneyAgent
N: 10
width: 50
height: 50
diff --git a/examples/mesa/server.py b/examples/mesa/server.py
index e6afecd..fc9b0b1 100644
--- a/examples/mesa/server.py
+++ b/examples/mesa/server.py
@@ -70,7 +70,7 @@ model_params = {
1,
description="Choose how many agents to include in the model",
),
- "network_agents": [{"agent_type": SocialMoneyAgent}],
+ "network_agents": [{"agent_class": SocialMoneyAgent}],
"height": UserSettableParameter(
"slider",
"height",
diff --git a/examples/mesa/social_wealth.py b/examples/mesa/social_wealth.py
index 5f7590b..b20bc9a 100644
--- a/examples/mesa/social_wealth.py
+++ b/examples/mesa/social_wealth.py
@@ -99,7 +99,7 @@ if __name__ == '__main__':
G = graph_generator()
fixed_params = {"topology": G,
"width": 10,
- "network_agents": [{"agent_type": SocialMoneyAgent,
+ "network_agents": [{"agent_class": SocialMoneyAgent,
'weight': 1}],
"height": 10}
diff --git a/examples/newsspread/NewsSpread.ipynb b/examples/newsspread/NewsSpread.ipynb
index 198588a..0c44b6e 100644
--- a/examples/newsspread/NewsSpread.ipynb
+++ b/examples/newsspread/NewsSpread.ipynb
@@ -89,11 +89,11 @@
"max_time: 30\r\n",
"name: Sim_all_dumb\r\n",
"network_agents:\r\n",
- "- agent_type: DumbViewer\r\n",
+ "- agent_class: DumbViewer\r\n",
" state:\r\n",
" has_tv: false\r\n",
" weight: 1\r\n",
- "- agent_type: DumbViewer\r\n",
+ "- agent_class: DumbViewer\r\n",
" state:\r\n",
" has_tv: true\r\n",
" weight: 1\r\n",
@@ -113,19 +113,19 @@
"max_time: 30\r\n",
"name: Sim_half_herd\r\n",
"network_agents:\r\n",
- "- agent_type: DumbViewer\r\n",
+ "- agent_class: DumbViewer\r\n",
" state:\r\n",
" has_tv: false\r\n",
" weight: 1\r\n",
- "- agent_type: DumbViewer\r\n",
+ "- agent_class: DumbViewer\r\n",
" state:\r\n",
" has_tv: true\r\n",
" weight: 1\r\n",
- "- agent_type: HerdViewer\r\n",
+ "- agent_class: HerdViewer\r\n",
" state:\r\n",
" has_tv: false\r\n",
" weight: 1\r\n",
- "- agent_type: HerdViewer\r\n",
+ "- agent_class: HerdViewer\r\n",
" state:\r\n",
" has_tv: true\r\n",
" weight: 1\r\n",
@@ -145,12 +145,12 @@
"max_time: 30\r\n",
"name: Sim_all_herd\r\n",
"network_agents:\r\n",
- "- agent_type: HerdViewer\r\n",
+ "- agent_class: HerdViewer\r\n",
" state:\r\n",
" has_tv: true\r\n",
" id: neutral\r\n",
" weight: 1\r\n",
- "- agent_type: HerdViewer\r\n",
+ "- agent_class: HerdViewer\r\n",
" state:\r\n",
" has_tv: true\r\n",
" id: neutral\r\n",
@@ -172,12 +172,12 @@
"max_time: 30\r\n",
"name: Sim_wise_herd\r\n",
"network_agents:\r\n",
- "- agent_type: HerdViewer\r\n",
+ "- agent_class: HerdViewer\r\n",
" state:\r\n",
" has_tv: true\r\n",
" id: neutral\r\n",
" weight: 1\r\n",
- "- agent_type: WiseViewer\r\n",
+ "- agent_class: WiseViewer\r\n",
" state:\r\n",
" has_tv: true\r\n",
" weight: 1\r\n",
@@ -198,12 +198,12 @@
"max_time: 30\r\n",
"name: Sim_all_wise\r\n",
"network_agents:\r\n",
- "- agent_type: WiseViewer\r\n",
+ "- agent_class: WiseViewer\r\n",
" state:\r\n",
" has_tv: true\r\n",
" id: neutral\r\n",
" weight: 1\r\n",
- "- agent_type: WiseViewer\r\n",
+ "- agent_class: WiseViewer\r\n",
" state:\r\n",
" has_tv: true\r\n",
" weight: 1\r\n",
diff --git a/examples/newsspread/NewsSpread.yml b/examples/newsspread/NewsSpread.yml
index 27f8411..10ae525 100644
--- a/examples/newsspread/NewsSpread.yml
+++ b/examples/newsspread/NewsSpread.yml
@@ -8,11 +8,11 @@ interval: 1
max_time: 300
name: Sim_all_dumb
network_agents:
-- agent_type: newsspread.DumbViewer
+- agent_class: newsspread.DumbViewer
state:
has_tv: false
weight: 1
-- agent_type: newsspread.DumbViewer
+- agent_class: newsspread.DumbViewer
state:
has_tv: true
weight: 1
@@ -31,19 +31,19 @@ interval: 1
max_time: 300
name: Sim_half_herd
network_agents:
-- agent_type: newsspread.DumbViewer
+- agent_class: newsspread.DumbViewer
state:
has_tv: false
weight: 1
-- agent_type: newsspread.DumbViewer
+- agent_class: newsspread.DumbViewer
state:
has_tv: true
weight: 1
-- agent_type: newsspread.HerdViewer
+- agent_class: newsspread.HerdViewer
state:
has_tv: false
weight: 1
-- agent_type: newsspread.HerdViewer
+- agent_class: newsspread.HerdViewer
state:
has_tv: true
weight: 1
@@ -62,12 +62,12 @@ interval: 1
max_time: 300
name: Sim_all_herd
network_agents:
-- agent_type: newsspread.HerdViewer
+- agent_class: newsspread.HerdViewer
state:
has_tv: true
state_id: neutral
weight: 1
-- agent_type: newsspread.HerdViewer
+- agent_class: newsspread.HerdViewer
state:
has_tv: true
state_id: neutral
@@ -88,12 +88,12 @@ interval: 1
max_time: 300
name: Sim_wise_herd
network_agents:
-- agent_type: newsspread.HerdViewer
+- agent_class: newsspread.HerdViewer
state:
has_tv: true
state_id: neutral
weight: 1
-- agent_type: newsspread.WiseViewer
+- agent_class: newsspread.WiseViewer
state:
has_tv: true
weight: 1
@@ -113,12 +113,12 @@ interval: 1
max_time: 300
name: Sim_all_wise
network_agents:
-- agent_type: newsspread.WiseViewer
+- agent_class: newsspread.WiseViewer
state:
has_tv: true
state_id: neutral
weight: 1
-- agent_type: newsspread.WiseViewer
+- agent_class: newsspread.WiseViewer
state:
has_tv: true
weight: 1
diff --git a/examples/programmatic/programmatic.py b/examples/programmatic/programmatic.py
index 7c8d54a..3b9f86f 100644
--- a/examples/programmatic/programmatic.py
+++ b/examples/programmatic/programmatic.py
@@ -27,7 +27,7 @@ s = Simulation(name='Programmatic',
network_params={'generator': mygenerator},
num_trials=1,
max_time=100,
- agent_type=MyAgent,
+ agent_class=MyAgent,
dry_run=True)
diff --git a/examples/pubcrawl/pubcrawl.py b/examples/pubcrawl/pubcrawl.py
index e6c92bd..7fc5b5f 100644
--- a/examples/pubcrawl/pubcrawl.py
+++ b/examples/pubcrawl/pubcrawl.py
@@ -1,6 +1,5 @@
from soil.agents import FSM, NetworkAgent, state, default_state
from soil import Environment
-from random import random, shuffle
from itertools import islice
import logging
@@ -128,7 +127,7 @@ class Patron(FSM, NetworkAgent):
Try to become friends with another agent. The chances of
success depend on both agents' openness.
'''
- if force or self['openness'] > random():
+ if force or self['openness'] > self.random.random():
self.env.add_edge(self, other_agent)
self.info('Made some friend {}'.format(other_agent))
return True
@@ -138,7 +137,7 @@ class Patron(FSM, NetworkAgent):
''' Look for random agents around me and try to befriend them'''
befriended = False
k = int(10*self['openness'])
- shuffle(others)
+ self.random.shuffle(others)
for friend in islice(others, k): # random.choice >= 3.7
if friend == self:
continue
diff --git a/examples/pubcrawl/pubcrawl.yml b/examples/pubcrawl/pubcrawl.yml
index 7a464a6..1f83f95 100644
--- a/examples/pubcrawl/pubcrawl.yml
+++ b/examples/pubcrawl/pubcrawl.yml
@@ -8,18 +8,18 @@ network_params:
generator: empty_graph
n: 30
network_agents:
- - agent_type: pubcrawl.Patron
+ - agent_class: pubcrawl.Patron
description: Extroverted patron
state:
openness: 1.0
weight: 9
- - agent_type: pubcrawl.Patron
+ - agent_class: pubcrawl.Patron
description: Introverted patron
state:
openness: 0.1
weight: 1
environment_agents:
- - agent_type: pubcrawl.Police
+ - agent_class: pubcrawl.Police
environment_class: pubcrawl.CityPubs
environment_params:
altercations: 0
diff --git a/examples/rabbits/rabbit_agents.py b/examples/rabbits/rabbit_agents.py
index a70b21a..df371b2 100644
--- a/examples/rabbits/rabbit_agents.py
+++ b/examples/rabbits/rabbit_agents.py
@@ -1,6 +1,5 @@
from soil.agents import FSM, state, default_state, BaseAgent, NetworkAgent
from enum import Enum
-from random import random, choice
import logging
import math
@@ -57,10 +56,10 @@ class Male(RabbitModel):
# Males try to mate
for f in self.get_agents(state_id=Female.fertile.id,
- agent_type=Female,
+ agent_class=Female,
limit_neighbors=False,
limit=self.max_females):
- r = random()
+ r = self.random.random()
if r < self['mating_prob']:
self.impregnate(f)
break # Take a break
@@ -85,11 +84,11 @@ class Female(RabbitModel):
self['pregnancy'] += 1
self.debug('Pregnancy: {}'.format(self['pregnancy']))
if self['pregnancy'] >= self.gestation:
- number_of_babies = int(8+4*random())
+ number_of_babies = int(8+4*self.random.random())
self.info('Having {} babies'.format(number_of_babies))
for i in range(number_of_babies):
state = {}
- state['gender'] = choice(list(Genders)).value
+ state['gender'] = self.random.choice(list(Genders)).value
child = self.env.add_node(self.__class__, state)
self.env.add_edge(self.id, child.id)
self.env.add_edge(self['mate'], child.id)
@@ -124,8 +123,7 @@ class RandomAccident(BaseAgent):
for i in self.env.network_agents:
if i.state['id'] == i.dead.id:
continue
- r = random()
- if r < prob_death:
+ if self.prob(prob_death):
self.debug('I killed a rabbit: {}'.format(i.id))
rabbits_alive = self.env['rabbits_alive'] = rabbits_alive -1
self.log('Rabbits alive: {}'.format(self.env['rabbits_alive']))
diff --git a/examples/rabbits/rabbits.yml b/examples/rabbits/rabbits.yml
index a4ec8e8..1b1d148 100644
--- a/examples/rabbits/rabbits.yml
+++ b/examples/rabbits/rabbits.yml
@@ -3,9 +3,9 @@ name: rabbits_example
max_time: 100
interval: 1
seed: MySeed
-agent_type: rabbit_agents.RabbitModel
+agent_class: rabbit_agents.RabbitModel
environment_agents:
- - agent_type: rabbit_agents.RandomAccident
+ - agent_class: rabbit_agents.RandomAccident
environment_params:
prob_death: 0.001
default_state:
@@ -13,8 +13,8 @@ default_state:
topology:
nodes:
- id: 1
- agent_type: rabbit_agents.Male
+ agent_class: rabbit_agents.Male
- id: 0
- agent_type: rabbit_agents.Female
+ agent_class: rabbit_agents.Female
directed: true
links: []
diff --git a/examples/random_delays/random_delays.py b/examples/random_delays/random_delays.py
index c3a6961..52dd55e 100644
--- a/examples/random_delays/random_delays.py
+++ b/examples/random_delays/random_delays.py
@@ -4,7 +4,6 @@ Example of a fully programmatic simulation, without definition files.
'''
from soil import Simulation, agents
from soil.time import Delta
-from random import expovariate
import logging
@@ -20,7 +19,7 @@ class MyAgent(agents.FSM):
@agents.state
def ping(self):
self.info('Ping')
- return self.pong, Delta(expovariate(1/16))
+ return self.pong, Delta(self.random.expovariate(1/16))
@agents.state
def pong(self):
@@ -29,15 +28,15 @@ class MyAgent(agents.FSM):
self.info(str(self.pong_counts))
if self.pong_counts < 1:
return self.die()
- return None, Delta(expovariate(1/16))
+ return None, Delta(self.random.expovariate(1/16))
s = Simulation(name='Programmatic',
- network_agents=[{'agent_type': MyAgent, 'id': 0}],
+ network_agents=[{'agent_class': MyAgent, 'id': 0}],
topology={'nodes': [{'id': 0}], 'links': []},
num_trials=1,
max_time=100,
- agent_type=MyAgent,
+ agent_class=MyAgent,
dry_run=True)
diff --git a/examples/template.yml b/examples/template.yml
index f61757d..a307eff 100644
--- a/examples/template.yml
+++ b/examples/template.yml
@@ -13,11 +13,11 @@ template:
generator: complete_graph
n: 10
network_agents:
- - agent_type: CounterModel
+ - agent_class: CounterModel
weight: "{{ x1 }}"
state:
state_id: 0
- - agent_type: AggregatedCounter
+ - agent_class: AggregatedCounter
weight: "{{ 1 - x1 }}"
environment_params:
name: "{{ x3 }}"
diff --git a/examples/terrorism/TerroristNetworkModel.py b/examples/terrorism/TerroristNetworkModel.py
index 3cdc675..2fa6de4 100644
--- a/examples/terrorism/TerroristNetworkModel.py
+++ b/examples/terrorism/TerroristNetworkModel.py
@@ -1,4 +1,3 @@
-import random
import networkx as nx
from soil.agents import Geo, NetworkAgent, FSM, state, default_state
from soil import Environment
@@ -26,26 +25,26 @@ class TerroristSpreadModel(FSM, Geo):
self.prob_interaction = model.environment_params['prob_interaction']
if self['id'] == self.civilian.id: # Civilian
- self.mean_belief = random.uniform(0.00, 0.5)
+ self.mean_belief = self.random.uniform(0.00, 0.5)
elif self['id'] == self.terrorist.id: # Terrorist
- self.mean_belief = random.uniform(0.8, 1.00)
+ self.mean_belief = self.random.uniform(0.8, 1.00)
elif self['id'] == self.leader.id: # Leader
self.mean_belief = 1.00
else:
raise Exception('Invalid state id: {}'.format(self['id']))
if 'min_vulnerability' in model.environment_params:
- self.vulnerability = random.uniform( model.environment_params['min_vulnerability'], model.environment_params['max_vulnerability'] )
+ self.vulnerability = self.random.uniform( model.environment_params['min_vulnerability'], model.environment_params['max_vulnerability'] )
else :
- self.vulnerability = random.uniform( 0, model.environment_params['max_vulnerability'] )
+ self.vulnerability = self.random.uniform( 0, model.environment_params['max_vulnerability'] )
@state
def civilian(self):
- neighbours = list(self.get_neighboring_agents(agent_type=TerroristSpreadModel))
+ neighbours = list(self.get_neighboring_agents(agent_class=TerroristSpreadModel))
if len(neighbours) > 0:
# Only interact with some of the neighbors
- interactions = list(n for n in neighbours if random.random() <= self.prob_interaction)
+ interactions = list(n for n in neighbours if self.random.random() <= self.prob_interaction)
influence = sum( self.degree(i) for i in interactions )
mean_belief = sum( i.mean_belief * self.degree(i) / influence for i in interactions )
mean_belief = mean_belief * self.information_spread_intensity + self.mean_belief * ( 1 - self.information_spread_intensity )
@@ -64,7 +63,7 @@ class TerroristSpreadModel(FSM, Geo):
@state
def terrorist(self):
neighbours = self.get_agents(state_id=[self.terrorist.id, self.leader.id],
- agent_type=TerroristSpreadModel,
+ agent_class=TerroristSpreadModel,
limit_neighbors=True)
if len(neighbours) > 0:
influence = sum( self.degree(n) for n in neighbours )
@@ -103,7 +102,7 @@ class TrainingAreaModel(FSM, Geo):
@default_state
@state
def terrorist(self):
- for neighbour in self.get_neighboring_agents(agent_type=TerroristSpreadModel):
+ for neighbour in self.get_neighboring_agents(agent_class=TerroristSpreadModel):
if neighbour.vulnerability > self.min_vulnerability:
neighbour.vulnerability = neighbour.vulnerability ** ( 1 - self.training_influence )
@@ -129,7 +128,7 @@ class HavenModel(FSM, Geo):
self.max_vulnerability = model.environment_params['max_vulnerability']
def get_occupants(self, **kwargs):
- return self.get_neighboring_agents(agent_type=TerroristSpreadModel, **kwargs)
+ return self.get_neighboring_agents(agent_class=TerroristSpreadModel, **kwargs)
@state
def civilian(self):
@@ -182,15 +181,15 @@ class TerroristNetworkModel(TerroristSpreadModel):
def update_relationships(self):
if self.count_neighboring_agents(state_id=self.civilian.id) == 0:
- close_ups = set(self.geo_search(radius=self.vision_range, agent_type=TerroristNetworkModel))
- step_neighbours = set(self.ego_search(self.sphere_influence, agent_type=TerroristNetworkModel, center=False))
- neighbours = set(agent.id for agent in self.get_neighboring_agents(agent_type=TerroristNetworkModel))
+ close_ups = set(self.geo_search(radius=self.vision_range, agent_class=TerroristNetworkModel))
+ step_neighbours = set(self.ego_search(self.sphere_influence, agent_class=TerroristNetworkModel, center=False))
+ neighbours = set(agent.id for agent in self.get_neighboring_agents(agent_class=TerroristNetworkModel))
search = (close_ups | step_neighbours) - neighbours
for agent in self.get_agents(search):
social_distance = 1 / self.shortest_path_length(agent.id)
spatial_proximity = ( 1 - self.get_distance(agent.id) )
prob_new_interaction = self.weight_social_distance * social_distance + self.weight_link_distance * spatial_proximity
- if agent['id'] == agent.civilian.id and random.random() < prob_new_interaction:
+ if agent['id'] == agent.civilian.id and self.random.random() < prob_new_interaction:
self.add_edge(agent)
break
diff --git a/examples/terrorism/TerroristNetworkModel.yml b/examples/terrorism/TerroristNetworkModel.yml
index 45dd1aa..b5a3d09 100644
--- a/examples/terrorism/TerroristNetworkModel.yml
+++ b/examples/terrorism/TerroristNetworkModel.yml
@@ -8,19 +8,19 @@ network_params:
# theta: 20
n: 100
network_agents:
- - agent_type: TerroristNetworkModel.TerroristNetworkModel
+ - agent_class: TerroristNetworkModel.TerroristNetworkModel
weight: 0.8
state:
id: civilian # Civilians
- - agent_type: TerroristNetworkModel.TerroristNetworkModel
+ - agent_class: TerroristNetworkModel.TerroristNetworkModel
weight: 0.1
state:
id: leader # Leaders
- - agent_type: TerroristNetworkModel.TrainingAreaModel
+ - agent_class: TerroristNetworkModel.TrainingAreaModel
weight: 0.05
state:
id: terrorist # Terrorism
- - agent_type: TerroristNetworkModel.HavenModel
+ - agent_class: TerroristNetworkModel.HavenModel
weight: 0.05
state:
id: civilian # Civilian
diff --git a/examples/torvalds.yml b/examples/torvalds.yml
index e338163..421e2ac 100644
--- a/examples/torvalds.yml
+++ b/examples/torvalds.yml
@@ -2,7 +2,7 @@
name: torvalds_example
max_time: 10
interval: 2
-agent_type: CounterModel
+agent_class: CounterModel
default_state:
skill_level: 'beginner'
network_params:
diff --git a/examples/tutorial/soil_tutorial.html b/examples/tutorial/soil_tutorial.html
index f93ca03..3f98f61 100644
--- a/examples/tutorial/soil_tutorial.html
+++ b/examples/tutorial/soil_tutorial.html
@@ -12330,11 +12330,11 @@ Notice how node 0 is the only one with a TV.
sim = soil.Simulation(topology=G,
num_trials=1,
max_time=MAX_TIME,
- environment_agents=[{'agent_type': NewsEnvironmentAgent,
+ environment_agents=[{'agent_class': NewsEnvironmentAgent,
'state': {
'event_time': EVENT_TIME
}}],
- network_agents=[{'agent_type': NewsSpread,
+ network_agents=[{'agent_class': NewsSpread,
'weight': 1}],
states={0: {'has_tv': True}},
default_state={'has_tv': False},
@@ -12468,14 +12468,14 @@ For this demo, we will use a python dictionary:
},
'network_agents': [
{
- 'agent_type': NewsSpread,
+ 'agent_class': NewsSpread,
'weight': 1,
'state': {
'has_tv': False
}
},
{
- 'agent_type': NewsSpread,
+ 'agent_class': NewsSpread,
'weight': 2,
'state': {
'has_tv': True
@@ -12483,7 +12483,7 @@ For this demo, we will use a python dictionary:
}
],
'environment_agents':[
- {'agent_type': NewsEnvironmentAgent,
+ {'agent_class': NewsEnvironmentAgent,
'state': {
'event_time': 10
}
diff --git a/examples/tutorial/soil_tutorial.ipynb b/examples/tutorial/soil_tutorial.ipynb
index 3e0ff2d..7599ab2 100644
--- a/examples/tutorial/soil_tutorial.ipynb
+++ b/examples/tutorial/soil_tutorial.ipynb
@@ -459,11 +459,11 @@
"sim = soil.Simulation(topology=G,\n",
" num_trials=1,\n",
" max_time=MAX_TIME,\n",
- " environment_agents=[{'agent_type': NewsEnvironmentAgent,\n",
+ " environment_agents=[{'agent_class': NewsEnvironmentAgent,\n",
" 'state': {\n",
" 'event_time': EVENT_TIME\n",
" }}],\n",
- " network_agents=[{'agent_type': NewsSpread,\n",
+ " network_agents=[{'agent_class': NewsSpread,\n",
" 'weight': 1}],\n",
" states={0: {'has_tv': True}},\n",
" default_state={'has_tv': False},\n",
@@ -588,14 +588,14 @@
" },\n",
" 'network_agents': [\n",
" {\n",
- " 'agent_type': NewsSpread,\n",
+ " 'agent_class': NewsSpread,\n",
" 'weight': 1,\n",
" 'state': {\n",
" 'has_tv': False\n",
" }\n",
" },\n",
" {\n",
- " 'agent_type': NewsSpread,\n",
+ " 'agent_class': NewsSpread,\n",
" 'weight': 2,\n",
" 'state': {\n",
" 'has_tv': True\n",
@@ -603,7 +603,7 @@
" }\n",
" ],\n",
" 'environment_agents':[\n",
- " {'agent_type': NewsEnvironmentAgent,\n",
+ " {'agent_class': NewsEnvironmentAgent,\n",
" 'state': {\n",
" 'event_time': 10\n",
" }\n",
diff --git a/soil/agents/BassModel.py b/soil/agents/BassModel.py
index cba6790..e3f5015 100644
--- a/soil/agents/BassModel.py
+++ b/soil/agents/BassModel.py
@@ -1,4 +1,3 @@
-import random
from . import FSM, state, default_state
@@ -16,13 +15,13 @@ class BassModel(FSM):
@default_state
@state
def innovation(self):
- if random.random() < self.innovation_prob:
+ if self.prob(self.innovation_prob):
self.sentimentCorrelation = 1
return self.aware
else:
aware_neighbors = self.get_neighboring_agents(state_id=self.aware.id)
num_neighbors_aware = len(aware_neighbors)
- if random.random() < (self['imitation_prob']*num_neighbors_aware):
+ if self.prob((self['imitation_prob']*num_neighbors_aware)):
self.sentimentCorrelation = 1
return self.aware
diff --git a/soil/agents/BigMarketModel.py b/soil/agents/BigMarketModel.py
index fbc3ba5..7db663d 100644
--- a/soil/agents/BigMarketModel.py
+++ b/soil/agents/BigMarketModel.py
@@ -1,4 +1,3 @@
-import random
from . import FSM, state, default_state
@@ -39,10 +38,10 @@ class BigMarketModel(FSM):
@state
def enterprise(self):
- if random.random() < self.tweet_probability: # Tweets
+ if self.random.random() < self.tweet_probability: # Tweets
aware_neighbors = self.get_neighboring_agents(state_id=self.number_of_enterprises) # Nodes neighbour users
for x in aware_neighbors:
- if random.uniform(0,10) < 5:
+ if self.random.uniform(0,10) < 5:
x.sentiment_about[self.id] += 0.1 # Increments for enterprise
else:
x.sentiment_about[self.id] -= 0.1 # Decrements for enterprise
@@ -57,11 +56,11 @@ class BigMarketModel(FSM):
@state
def user(self):
- if random.random() < self.tweet_probability: # Tweets
- if random.random() < self.tweet_relevant_probability: # Tweets something relevant
+ if self.random.random() < self.tweet_probability: # Tweets
+ if self.random.random() < self.tweet_relevant_probability: # Tweets something relevant
# Tweet probability per enterprise
for i in range(len(self.enterprises)):
- random_num = random.random()
+ random_num = self.random.random()
if random_num < self.tweet_probability_about[i]:
# The condition is fulfilled, sentiments are evaluated towards that enterprise
if self.sentiment_about[i] < 0:
diff --git a/soil/agents/IndependentCascadeModel.py b/soil/agents/IndependentCascadeModel.py
index ab5a8a8..e927a6f 100644
--- a/soil/agents/IndependentCascadeModel.py
+++ b/soil/agents/IndependentCascadeModel.py
@@ -1,4 +1,3 @@
-import random
from . import BaseAgent
@@ -23,7 +22,7 @@ class IndependentCascadeModel(BaseAgent):
def behaviour(self):
aware_neighbors_1_time_step = []
# Outside effects
- if random.random() < self.innovation_prob:
+ if self.prob(self.innovation_prob):
if self.state['id'] == 0:
self.state['id'] = 1
self.state['sentimentCorrelation'] = 1
@@ -40,7 +39,7 @@ class IndependentCascadeModel(BaseAgent):
if x.state['time_awareness'] == (self.env.now-1):
aware_neighbors_1_time_step.append(x)
num_neighbors_aware = len(aware_neighbors_1_time_step)
- if random.random() < (self.imitation_prob*num_neighbors_aware):
+ if self.prob(self.imitation_prob*num_neighbors_aware):
self.state['id'] = 1
self.state['sentimentCorrelation'] = 1
else:
diff --git a/soil/agents/ModelM2.py b/soil/agents/ModelM2.py
index ec0f98d..dec6b97 100644
--- a/soil/agents/ModelM2.py
+++ b/soil/agents/ModelM2.py
@@ -1,4 +1,3 @@
-import random
import numpy as np
from . import BaseAgent
@@ -24,23 +23,26 @@ class SpreadModelM2(BaseAgent):
def __init__(self, model=None, unique_id=0, state=()):
super().__init__(model=environment, unique_id=unique_id, state=state)
- self.prob_neutral_making_denier = np.random.normal(environment.environment_params['prob_neutral_making_denier'],
- environment.environment_params['standard_variance'])
- self.prob_infect = np.random.normal(environment.environment_params['prob_infect'],
- environment.environment_params['standard_variance'])
+ # Use a single generator with the same seed as `self.random`
+ random = np.random.default_rng(seed=self._seed)
+ self.prob_neutral_making_denier = random.normal(environment.environment_params['prob_neutral_making_denier'],
+ environment.environment_params['standard_variance'])
- self.prob_cured_healing_infected = np.random.normal(environment.environment_params['prob_cured_healing_infected'],
- environment.environment_params['standard_variance'])
- self.prob_cured_vaccinate_neutral = np.random.normal(environment.environment_params['prob_cured_vaccinate_neutral'],
- environment.environment_params['standard_variance'])
+ self.prob_infect = random.normal(environment.environment_params['prob_infect'],
+ environment.environment_params['standard_variance'])
- self.prob_vaccinated_healing_infected = np.random.normal(environment.environment_params['prob_vaccinated_healing_infected'],
- environment.environment_params['standard_variance'])
- self.prob_vaccinated_vaccinate_neutral = np.random.normal(environment.environment_params['prob_vaccinated_vaccinate_neutral'],
- environment.environment_params['standard_variance'])
- self.prob_generate_anti_rumor = np.random.normal(environment.environment_params['prob_generate_anti_rumor'],
+ self.prob_cured_healing_infected = random.normal(environment.environment_params['prob_cured_healing_infected'],
environment.environment_params['standard_variance'])
+ self.prob_cured_vaccinate_neutral = random.normal(environment.environment_params['prob_cured_vaccinate_neutral'],
+ environment.environment_params['standard_variance'])
+
+ self.prob_vaccinated_healing_infected = random.normal(environment.environment_params['prob_vaccinated_healing_infected'],
+ environment.environment_params['standard_variance'])
+ self.prob_vaccinated_vaccinate_neutral = random.normal(environment.environment_params['prob_vaccinated_vaccinate_neutral'],
+ environment.environment_params['standard_variance'])
+ self.prob_generate_anti_rumor = random.normal(environment.environment_params['prob_generate_anti_rumor'],
+ environment.environment_params['standard_variance'])
def step(self):
@@ -58,7 +60,7 @@ class SpreadModelM2(BaseAgent):
# Infected
infected_neighbors = self.get_neighboring_agents(state_id=1)
if len(infected_neighbors) > 0:
- if random.random() < self.prob_neutral_making_denier:
+ if self.prob(self.prob_neutral_making_denier):
self.state['id'] = 3 # Vaccinated making denier
def infected_behaviour(self):
@@ -66,7 +68,7 @@ class SpreadModelM2(BaseAgent):
# Neutral
neutral_neighbors = self.get_neighboring_agents(state_id=0)
for neighbor in neutral_neighbors:
- if random.random() < self.prob_infect:
+ if self.prob(self.prob_infect):
neighbor.state['id'] = 1 # Infected
def cured_behaviour(self):
@@ -74,13 +76,13 @@ class SpreadModelM2(BaseAgent):
# Vaccinate
neutral_neighbors = self.get_neighboring_agents(state_id=0)
for neighbor in neutral_neighbors:
- if random.random() < self.prob_cured_vaccinate_neutral:
+ if self.prob(self.prob_cured_vaccinate_neutral):
neighbor.state['id'] = 3 # Vaccinated
# Cure
infected_neighbors = self.get_neighboring_agents(state_id=1)
for neighbor in infected_neighbors:
- if random.random() < self.prob_cured_healing_infected:
+ if self.prob(self.prob_cured_healing_infected):
neighbor.state['id'] = 2 # Cured
def vaccinated_behaviour(self):
@@ -88,19 +90,19 @@ class SpreadModelM2(BaseAgent):
# Cure
infected_neighbors = self.get_neighboring_agents(state_id=1)
for neighbor in infected_neighbors:
- if random.random() < self.prob_cured_healing_infected:
+ if self.prob(self.prob_cured_healing_infected):
neighbor.state['id'] = 2 # Cured
# Vaccinate
neutral_neighbors = self.get_neighboring_agents(state_id=0)
for neighbor in neutral_neighbors:
- if random.random() < self.prob_cured_vaccinate_neutral:
+ if self.prob(self.prob_cured_vaccinate_neutral):
neighbor.state['id'] = 3 # Vaccinated
# Generate anti-rumor
infected_neighbors_2 = self.get_neighboring_agents(state_id=1)
for neighbor in infected_neighbors_2:
- if random.random() < self.prob_generate_anti_rumor:
+ if self.prob(self.prob_generate_anti_rumor):
neighbor.state['id'] = 2 # Cured
@@ -165,7 +167,7 @@ class ControlModelM2(BaseAgent):
# Infected
infected_neighbors = self.get_neighboring_agents(state_id=1)
if len(infected_neighbors) > 0:
- if random.random() < self.prob_neutral_making_denier:
+ if self.random(self.prob_neutral_making_denier):
self.state['id'] = 3 # Vaccinated making denier
def infected_behaviour(self):
@@ -173,7 +175,7 @@ class ControlModelM2(BaseAgent):
# Neutral
neutral_neighbors = self.get_neighboring_agents(state_id=0)
for neighbor in neutral_neighbors:
- if random.random() < self.prob_infect:
+ if self.prob(self.prob_infect):
neighbor.state['id'] = 1 # Infected
self.state['visible'] = False
@@ -183,13 +185,13 @@ class ControlModelM2(BaseAgent):
# Vaccinate
neutral_neighbors = self.get_neighboring_agents(state_id=0)
for neighbor in neutral_neighbors:
- if random.random() < self.prob_cured_vaccinate_neutral:
+ if self.prob(self.prob_cured_vaccinate_neutral):
neighbor.state['id'] = 3 # Vaccinated
# Cure
infected_neighbors = self.get_neighboring_agents(state_id=1)
for neighbor in infected_neighbors:
- if random.random() < self.prob_cured_healing_infected:
+ if self.prob(self.prob_cured_healing_infected):
neighbor.state['id'] = 2 # Cured
def vaccinated_behaviour(self):
@@ -198,19 +200,19 @@ class ControlModelM2(BaseAgent):
# Cure
infected_neighbors = self.get_neighboring_agents(state_id=1)
for neighbor in infected_neighbors:
- if random.random() < self.prob_cured_healing_infected:
+ if self.prob(self.prob_cured_healing_infected):
neighbor.state['id'] = 2 # Cured
# Vaccinate
neutral_neighbors = self.get_neighboring_agents(state_id=0)
for neighbor in neutral_neighbors:
- if random.random() < self.prob_cured_vaccinate_neutral:
+ if self.prob(self.prob_cured_vaccinate_neutral):
neighbor.state['id'] = 3 # Vaccinated
# Generate anti-rumor
infected_neighbors_2 = self.get_neighboring_agents(state_id=1)
for neighbor in infected_neighbors_2:
- if random.random() < self.prob_generate_anti_rumor:
+ if self.prob(self.prob_generate_anti_rumor):
neighbor.state['id'] = 2 # Cured
def beacon_off_behaviour(self):
@@ -224,19 +226,19 @@ class ControlModelM2(BaseAgent):
# Cure (M2 feature added)
infected_neighbors = self.get_neighboring_agents(state_id=1)
for neighbor in infected_neighbors:
- if random.random() < self.prob_generate_anti_rumor:
+ if self.prob(self.prob_generate_anti_rumor):
neighbor.state['id'] = 2 # Cured
neutral_neighbors_infected = neighbor.get_neighboring_agents(state_id=0)
for neighbor in neutral_neighbors_infected:
- if random.random() < self.prob_generate_anti_rumor:
+ if self.prob(self.prob_generate_anti_rumor):
neighbor.state['id'] = 3 # Vaccinated
infected_neighbors_infected = neighbor.get_neighboring_agents(state_id=1)
for neighbor in infected_neighbors_infected:
- if random.random() < self.prob_generate_anti_rumor:
+ if self.prob(self.prob_generate_anti_rumor):
neighbor.state['id'] = 2 # Cured
# Vaccinate
neutral_neighbors = self.get_neighboring_agents(state_id=0)
for neighbor in neutral_neighbors:
- if random.random() < self.prob_cured_vaccinate_neutral:
+ if self.prob(self.prob_cured_vaccinate_neutral):
neighbor.state['id'] = 3 # Vaccinated
diff --git a/soil/agents/SISaModel.py b/soil/agents/SISaModel.py
index 4b66087..fa0d224 100644
--- a/soil/agents/SISaModel.py
+++ b/soil/agents/SISaModel.py
@@ -1,4 +1,3 @@
-import random
import numpy as np
from . import FSM, state
@@ -32,62 +31,64 @@ class SISaModel(FSM):
def __init__(self, environment, unique_id=0, state=()):
super().__init__(model=environment, unique_id=unique_id, state=state)
- self.neutral_discontent_spon_prob = np.random.normal(self.env['neutral_discontent_spon_prob'],
+ random = np.random.default_rng(seed=self._seed)
+
+ self.neutral_discontent_spon_prob = random.normal(self.env['neutral_discontent_spon_prob'],
self.env['standard_variance'])
- self.neutral_discontent_infected_prob = np.random.normal(self.env['neutral_discontent_infected_prob'],
+ self.neutral_discontent_infected_prob = random.normal(self.env['neutral_discontent_infected_prob'],
self.env['standard_variance'])
- self.neutral_content_spon_prob = np.random.normal(self.env['neutral_content_spon_prob'],
+ self.neutral_content_spon_prob = random.normal(self.env['neutral_content_spon_prob'],
self.env['standard_variance'])
- self.neutral_content_infected_prob = np.random.normal(self.env['neutral_content_infected_prob'],
+ self.neutral_content_infected_prob = random.normal(self.env['neutral_content_infected_prob'],
self.env['standard_variance'])
- self.discontent_neutral = np.random.normal(self.env['discontent_neutral'],
+ self.discontent_neutral = random.normal(self.env['discontent_neutral'],
self.env['standard_variance'])
- self.discontent_content = np.random.normal(self.env['discontent_content'],
+ self.discontent_content = random.normal(self.env['discontent_content'],
self.env['variance_d_c'])
- self.content_discontent = np.random.normal(self.env['content_discontent'],
+ self.content_discontent = random.normal(self.env['content_discontent'],
self.env['variance_c_d'])
- self.content_neutral = np.random.normal(self.env['content_neutral'],
+ self.content_neutral = random.normal(self.env['content_neutral'],
self.env['standard_variance'])
@state
def neutral(self):
# Spontaneous effects
- if random.random() < self.neutral_discontent_spon_prob:
+ if self.prob(self.neutral_discontent_spon_prob):
return self.discontent
- if random.random() < self.neutral_content_spon_prob:
+ if self.prob(self.neutral_content_spon_prob):
return self.content
# Infected
discontent_neighbors = self.count_neighboring_agents(state_id=self.discontent)
- if random.random() < discontent_neighbors * self.neutral_discontent_infected_prob:
+ if self.prob(scontent_neighbors * self.neutral_discontent_infected_prob):
return self.discontent
content_neighbors = self.count_neighboring_agents(state_id=self.content.id)
- if random.random() < content_neighbors * self.neutral_content_infected_prob:
+ if self.prob(s * self.neutral_content_infected_prob):
return self.content
return self.neutral
@state
def discontent(self):
# Healing
- if random.random() < self.discontent_neutral:
+ if self.prob(self.discontent_neutral):
return self.neutral
# Superinfected
content_neighbors = self.count_neighboring_agents(state_id=self.content.id)
- if random.random() < content_neighbors * self.discontent_content:
+ if self.prob(s * self.discontent_content):
return self.content
return self.discontent
@state
def content(self):
# Healing
- if random.random() < self.content_neutral:
+ if self.prob(self.content_neutral):
return self.neutral
# Superinfected
discontent_neighbors = self.count_neighboring_agents(state_id=self.discontent.id)
- if random.random() < discontent_neighbors * self.content_discontent:
+ if self.prob(scontent_neighbors * self.content_discontent):
self.discontent
return self.content
diff --git a/soil/agents/SentimentCorrelationModel.py b/soil/agents/SentimentCorrelationModel.py
index 7c12d7b..96907aa 100644
--- a/soil/agents/SentimentCorrelationModel.py
+++ b/soil/agents/SentimentCorrelationModel.py
@@ -1,4 +1,3 @@
-import random
from . import BaseAgent
@@ -68,10 +67,10 @@ class SentimentCorrelationModel(BaseAgent):
disgust_prob = self.disgust_prob+(len(disgusted_neighbors_1_time_step)*self.disgust_prob)
outside_effects_prob = self.outside_effects_prob
- num = random.random()
+ num = self.random.random()
if num= 3.10
nodeId = int
@@ -125,10 +112,18 @@ class AgentConfig(SingleAgentConfig):
class Config(BaseModel, extra=Extra.forbid):
version: Optional[str] = '1'
- general: General = General.default()
- topologies: Optional[Dict[str, NetConfig]] = {}
- environment: EnvConfig = EnvConfig.default()
- agents: Optional[Dict[str, AgentConfig]] = {}
+
+ id: str = 'Unnamed Simulation'
+ group: str = None
+ dir_path: Optional[str] = None
+ num_trials: int = 1
+ max_time: float = 100
+ interval: float = 1
+ seed: str = ""
+
+ model_class: Union[Type, str]
+ model_parameters: Optiona[Dict[str, Any]] = {}
+
def convert_old(old, strict=True):
'''
@@ -137,9 +132,13 @@ def convert_old(old, strict=True):
This is still a work in progress and might not work in many cases.
'''
+ #TODO: implement actual conversion
+ print('The old configuration format is no longer supported. \
+ Update your config files or run Soil==0.20')
+ raise NotImplementedError()
- new = {}
+ new = {}
general = {}
for k in ['id',
@@ -173,8 +172,8 @@ def convert_old(old, strict=True):
'default': {},
}
- if 'agent_type' in old:
- agents['default']['agent_class'] = old['agent_type']
+ if 'agent_class' in old:
+ agents['default']['agent_class'] = old['agent_class']
if 'default_state' in old:
agents['default']['state'] = old['default_state']
@@ -182,8 +181,8 @@ def convert_old(old, strict=True):
def updated_agent(agent):
newagent = dict(agent)
- newagent['agent_class'] = newagent['agent_type']
- del newagent['agent_type']
+ newagent['agent_class'] = newagent['agent_class']
+ del newagent['agent_class']
return newagent
for agent in old.get('environment_agents', []):
@@ -207,9 +206,9 @@ def convert_old(old, strict=True):
else:
by_weight.append(agent)
- if 'agent_type' in old and (not fixed and not by_weight):
+ if 'agent_class' in old and (not fixed and not by_weight):
agents['network']['topology'] = 'default'
- by_weight = [{'agent_class': old['agent_type']}]
+ by_weight = [{'agent_class': old['agent_class']}]
# TODO: translate states properly
diff --git a/soil/datacollection.py b/soil/datacollection.py
index 979c7bd..a889a76 100644
--- a/soil/datacollection.py
+++ b/soil/datacollection.py
@@ -2,23 +2,5 @@ from mesa import DataCollector as MDC
class SoilDataCollector(MDC):
-
- def __init__(self, environment, *args, **kwargs):
+ def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
- # Populate model and env reporters so they have a key per
- # So they can be shown in the web interface
- self.environment = environment
- raise NotImplementedError()
-
- @property
- def model_vars(self):
- raise NotImplementedError()
-
- @model_vars.setter
- def model_vars(self, value):
- raise NotImplementedError()
-
- @property
- def agent_reporters(self):
- raise NotImplementedError()
-
diff --git a/soil/environment.py b/soil/environment.py
index 9e00f36..2f59553 100644
--- a/soil/environment.py
+++ b/soil/environment.py
@@ -5,7 +5,7 @@ import math
import random
import logging
-from typing import Dict
+from typing import Any, Dict, Optional, Union
from collections import namedtuple
from time import time as current_time
from copy import deepcopy
@@ -17,20 +17,24 @@ import networkx as nx
from mesa import Model
from mesa.datacollection import DataCollector
-from . import serialization, agents, analysis, utils, time, config, network
+from . import serialization, analysis, utils, time, network
+
+from .agents import AgentView, BaseAgent, NetworkAgent, from_config as agents_from_config
Record = namedtuple('Record', 'dict_id t_step key value')
-class Environment(Model):
+class BaseEnvironment(Model):
"""
- The environment is key in a simulation. It contains the network topology,
- a reference to network and environment agents, as well as the environment
- params, which are used as shared state between agents.
+ The environment is key in a simulation. It controls how agents interact,
+ and what information is available to them.
+
+ This is an opinionated version of `mesa.Model` class, which adds many
+ convenience methods and abstractions.
The environment parameters and the state of every agent can be accessed
- both by using the environment as a dictionary or with the environment's
+ both by using the environment as a dictionary and with the environment's
:meth:`soil.environment.Environment.get` method.
"""
@@ -40,67 +44,62 @@ class Environment(Model):
schedule=None,
dir_path=None,
interval=1,
- agents: Dict[str, config.AgentConfig] = {},
- topologies: Dict[str, config.NetConfig] = {},
+ agent_class=BaseAgent,
+ agents: [tuple[type, Dict[str, Any]]] = {},
agent_reporters: Optional[Any] = None,
model_reporters: Optional[Any] = None,
tables: Optional[Any] = None,
**env_params):
- super().__init__()
+ super().__init__(seed=seed)
self.current_id = -1
- self.seed = '{}_{}'.format(seed, env_id)
self.id = env_id
self.dir_path = dir_path or os.getcwd()
if schedule is None:
- schedule = time.TimedActivation()
+ schedule = time.TimedActivation(self)
self.schedule = schedule
- seed = seed or current_time()
-
- random.seed(seed)
+ self.agent_class = agent_class
-
- self.topologies = {}
- self._node_ids = {}
- for (name, cfg) in topologies.items():
- self.set_topology(cfg=cfg,
- graph=name)
- self.agents = agents or {}
+ self.init_agents(agents)
self.env_params = env_params or {}
self.interval = interval
- self['SEED'] = seed
self.logger = utils.logger.getChild(self.id)
- self.datacollector = DataCollector(model_reporters, agent_reporters, tables)
- @property
- def topology(self):
- return self.topologies['default']
+ self.datacollector = DataCollector(
+ model_reporters=model_reporters,
+ agent_reporters=agent_reporters,
+ tables=tables,
+ )
+
+ def __read_agent_tuple(self, tup):
+ cls = self.agent_class
+ args = tup
+ if isinstance(tup, tuple):
+ cls = tup[0]
+ args = tup[1]
+ return serialization.deserialize(cls)(unique_id=self.next_id(),
+ model=self, **args)
+
+ def init_agents(self, agents: [tuple[type, Dict[str, Any]]] = {}):
+ agents = [self.__read_agent_tuple(tup) for tup in agents]
+ self._agents = {'default': {agent.id: agent for agent in agents}}
@property
- def network_agents(self):
- yield from self.agents(agent_class=agents.NetworkAgent)
-
- @staticmethod
- def from_config(conf: config.Config, trial_id, **kwargs) -> Environment:
- '''Create an environment for a trial of the simulation'''
- conf = conf
- if kwargs:
- conf = config.Config(**conf.dict(exclude_defaults=True), **kwargs)
- seed = '{}_{}'.format(conf.general.seed, trial_id)
- id = '{}_trial_{}'.format(conf.general.id, trial_id).replace('.', '-')
- opts = conf.environment.params.copy()
- dir_path = conf.general.dir_path
- opts.update(conf)
- opts.update(kwargs)
- env = serialization.deserialize(conf.environment.environment_class)(env_id=id, seed=seed, dir_path=dir_path, **opts)
- return env
+ def agents(self):
+ return AgentView(self._agents)
+
+ def find_one(self, *args, **kwargs):
+ return AgentView(self._agents).one(*args, **kwargs)
+
+ def count_agents(self, *args, **kwargs):
+ return sum(1 for i in self.agents(*args, **kwargs))
@property
def now(self):
@@ -109,115 +108,42 @@ class Environment(Model):
raise Exception('The environment has not been scheduled, so it has no sense of time')
- def topology_for(self, agent_id):
- return self.topologies[self._node_ids[agent_id][0]]
-
- def node_id_for(self, agent_id):
- return self._node_ids[agent_id][1]
-
- def set_topology(self, cfg=None, dir_path=None, graph='default'):
- topology = cfg
- if not isinstance(cfg, nx.Graph):
- topology = network.from_config(cfg, dir_path=dir_path or self.dir_path)
-
- self.topologies[graph] = topology
-
- @property
- def agents(self):
- return agents.AgentView(self._agents)
-
- def count_agents(self, *args, **kwargs):
- return sum(1 for i in self.find_all(*args, **kwargs))
-
- def find_all(self, *args, **kwargs):
- return agents.AgentView(self._agents).filter(*args, **kwargs)
-
- def find_one(self, *args, **kwargs):
- return agents.AgentView(self._agents).one(*args, **kwargs)
-
- @agents.setter
- def agents(self, agents_def: Dict[str, config.AgentConfig]):
- self._agents = agents.from_config(agents_def, env=self)
- for d in self._agents.values():
- for a in d.values():
- self.schedule.add(a)
-
- def init_agent(self, agent_id, agent_definitions, graph='default'):
- node = self.topologies[graph].nodes[agent_id]
- init = False
- state = dict(node)
+ # def init_agent(self, agent_id, agent_definitions, state=None):
+ # state = state or {}
- agent_class = None
- if 'agent_class' in self.states.get(agent_id, {}):
- agent_class = self.states[agent_id]['agent_class']
- elif 'agent_class' in node:
- agent_class = node['agent_class']
- elif 'agent_class' in self.default_state:
- agent_class = self.default_state['agent_class']
+ # agent_class = None
+ # if 'agent_class' in self.states.get(agent_id, {}):
+ # agent_class = self.states[agent_id]['agent_class']
+ # elif 'agent_class' in self.default_state:
+ # agent_class = self.default_state['agent_class']
- if agent_class:
- agent_class = agents.deserialize_type(agent_class)
- elif agent_definitions:
- agent_class, state = agents._agent_from_definition(agent_definitions, unique_id=agent_id)
- else:
- serialization.logger.debug('Skipping node {}'.format(agent_id))
- return
- return self.set_agent(agent_id, agent_class, state)
+ # if agent_class:
+ # agent_class = agents.deserialize_type(agent_class)
+ # elif agent_definitions:
+ # agent_class, state = agents._agent_from_definition(agent_definitions, unique_id=agent_id)
+ # else:
+ # serialization.logger.debug('Skipping agent {}'.format(agent_id))
+ # return
+ # return self.add_agent(agent_id, agent_class, state)
- def agent_to_node(self, agent_id, graph_name='default', node_id=None, shuffle=False):
- #TODO: test
- if node_id is None:
- G = self.topologies[graph_name]
- candidates = list(G.nodes(data=True))
- if shuffle:
- random.shuffle(candidates)
- for next_id, data in candidates:
- if data.get('agent_id', None) is None:
- node_id = next_id
- data['agent_id'] = agent_id
- break
-
- self._node_ids[agent_id] = (graph_name, node_id)
- print(self._node_ids)
-
-
- def set_agent(self, agent_id, agent_class, state=None, graph='default'):
- node = self.topologies[graph].nodes[agent_id]
+ def add_agent(self, agent_id, agent_class, state=None, graph='default'):
defstate = deepcopy(self.default_state) or {}
defstate.update(self.states.get(agent_id, {}))
- defstate.update(node.get('state', {}))
if state:
defstate.update(state)
a = None
if agent_class:
state = defstate
a = agent_class(model=self,
- unique_id=agent_id
- )
+ unique_id=agent_id)
for (k, v) in state.items():
setattr(a, k, v)
- node['agent'] = a
self.schedule.add(a)
return a
- def add_node(self, agent_class, state=None, graph='default'):
- agent_id = int(len(self.topologies[graph].nodes()))
- self.topologies[graph].add_node(agent_id)
- a = self.set_agent(agent_id, agent_class, state, graph=graph)
- a['visible'] = True
- return a
-
- def add_edge(self, agent1, agent2, start=None, graph='default', **attrs):
- if hasattr(agent1, 'id'):
- agent1 = agent1.id
- if hasattr(agent2, 'id'):
- agent2 = agent2.id
- start = start or self.now
- return self.topologies[graph].add_edge(agent1, agent2, **attrs)
-
def log(self, message, *args, level=logging.INFO, **kwargs):
if not self.logger.isEnabledFor(level):
return
@@ -238,14 +164,6 @@ class Environment(Model):
self.schedule.step()
self.datacollector.collect(self)
- def run(self, until, *args, **kwargs):
- until = until or float('inf')
-
- while self.schedule.next_time < until:
- self.step()
- utils.logger.debug(f'Simulation step {self.schedule.time}/{until}. Next: {self.schedule.next_time}')
- self.schedule.time = until
-
def __contains__(self, key):
return key in self.env_params
@@ -289,5 +207,90 @@ class Environment(Model):
yield from self._agent_to_tuples(agent, now)
+class AgentConfigEnvironment(BaseEnvironment):
+
+ def __init__(self, *args,
+ agents: Dict[str, config.AgentConfig] = {},
+ **kwargs):
+ return super().__init__(*args, agents=agents, **kwargs)
+
+ def init_agents(self, agents: Union[Dict[str, config.AgentConfig], [tuple[type, Dict[str, Any]]]] = {}):
+ if not isinstance(agents, dict):
+ return BaseEnvironment.init_agents(self, agents)
+
+ self._agents = agents_from_config(agents,
+ env=self,
+ random=self.random)
+ for d in self._agents.values():
+ for a in d.values():
+ self.schedule.add(a)
+
+
+class NetworkConfigEnvironment(BaseEnvironment):
+
+ def __init__(self, *args, topologies: Dict[str, config.NetConfig] = {}, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.topologies = {}
+ self._node_ids = {}
+ for (name, cfg) in topologies.items():
+ self.set_topology(cfg=cfg, graph=name)
+
+ @property
+ def topology(self):
+ return self.topologies['default']
+
+ def set_topology(self, cfg=None, dir_path=None, graph='default'):
+ topology = cfg
+ if not isinstance(cfg, nx.Graph):
+ topology = network.from_config(cfg, dir_path=dir_path or self.dir_path)
+
+ self.topologies[graph] = topology
+
+ def topology_for(self, agent_id):
+ return self.topologies[self._node_ids[agent_id][0]]
+
+ @property
+ def network_agents(self):
+ yield from self.agents(agent_class=NetworkAgent)
+
+ def agent_to_node(self, agent_id, graph_name='default', node_id=None, shuffle=False):
+ node_id = network.agent_to_node(G=self.topologies[graph_name], agent_id=agent_id,
+ node_id=node_id, shuffle=shuffle,
+ random=self.random)
+
+ self._node_ids[agent_id] = (graph_name, node_id)
+
+
+ def add_node(self, agent_class, state=None, graph='default'):
+ agent_id = int(len(self.topologies[graph].nodes()))
+ self.topologies[graph].add_node(agent_id)
+ a = self.add_agent(agent_id, agent_class, state, graph=graph)
+ a['visible'] = True
+ return a
+
+ def add_edge(self, agent1, agent2, start=None, graph='default', **attrs):
+ if hasattr(agent1, 'id'):
+ agent1 = agent1.id
+ if hasattr(agent2, 'id'):
+ agent2 = agent2.id
+ start = start or self.now
+ return self.topologies[graph].add_edge(agent1, agent2, **attrs)
+
+ def add_agent(self, *args, state=None, graph='default', **kwargs):
+ node = self.topologies[graph].nodes[agent_id]
+ node_state = node.get('state', {})
+ if node_state:
+ node_state.update(state or {})
+ state = node_state
+ a = super().add_agent(*args, state=state, **kwargs)
+ node['agent'] = a
+ return a
+
+ def node_id_for(self, agent_id):
+ return self._node_ids[agent_id][1]
-SoilEnvironment = Environment
+class Environment(AgentConfigEnvironment, NetworkConfigEnvironment):
+ def __init__(self, *args, **kwargs):
+ agents = kwargs.pop('agents', {})
+ NetworkConfigEnvironment.__init__(self, *args, **kwargs)
+ AgentConfigEnvironment.__init__(self, *args, agents=agents, **kwargs)
diff --git a/soil/exporters.py b/soil/exporters.py
index 1bd06de..20a0f92 100644
--- a/soil/exporters.py
+++ b/soil/exporters.py
@@ -49,8 +49,8 @@ class Exporter:
self.simulation = simulation
outdir = outdir or os.path.join(os.getcwd(), 'soil_output')
self.outdir = os.path.join(outdir,
- simulation.config.general.group or '',
- simulation.config.general.id)
+ simulation.group or '',
+ simulation.name)
self.dry_run = dry_run
self.copy_to = copy_to
diff --git a/soil/network.py b/soil/network.py
index 0eb3688..25b55ab 100644
--- a/soil/network.py
+++ b/soil/network.py
@@ -1,6 +1,7 @@
from typing import Dict
import os
import sys
+import random
import networkx as nx
@@ -40,3 +41,25 @@ def from_config(cfg: config.NetConfig, dir_path: str = None):
return nx.json_graph.node_link_graph(cfg.topology)
return nx.Graph()
+
+
+def agent_to_node(G, agent_id, node_id=None, shuffle=False, random=random):
+ '''
+ Link an agent to a node in a topology.
+
+ If node_id is None, a node without an agent_id will be found.
+ '''
+ #TODO: test
+ if node_id is None:
+ candidates = list(G.nodes(data=True))
+ if shuffle:
+ random.shuffle(candidates)
+ for next_id, data in candidates:
+ if data.get('agent_id', None) is None:
+ node_id = next_id
+ data['agent_id'] = agent_id
+ break
+
+ if node_id is None:
+ raise ValueError(f"Not enough nodes in topology to assign one to agent {agent_id}")
+ return node_id
diff --git a/soil/serialization.py b/soil/serialization.py
index 8c17f23..328efdd 100644
--- a/soil/serialization.py
+++ b/soil/serialization.py
@@ -122,8 +122,6 @@ def load_files(*patterns, **kwargs):
for i in glob(pattern, **kwargs):
for config in load_file(i):
path = os.path.abspath(i)
- if 'general' in config and 'dir_path' not in config['general']:
- config['general']['dir_path'] = os.path.dirname(path)
yield config, path
diff --git a/soil/simulation.py b/soil/simulation.py
index d28549d..4d50c30 100644
--- a/soil/simulation.py
+++ b/soil/simulation.py
@@ -7,6 +7,10 @@ import traceback
import logging
import networkx as nx
+from dataclasses import dataclass, field, asdict
+from typing import Union
+
+
from networkx.readwrite import json_graph
from multiprocessing import Pool
from functools import partial
@@ -14,13 +18,15 @@ import pickle
from . import serialization, utils, basestring, agents
from .environment import Environment
-from .utils import logger
+from .utils import logger, run_and_return_exceptions
from .exporters import default
+from .time import INFINITY
from .config import Config, convert_old
#TODO: change documentation for simulation
+@dataclass
class Simulation:
"""
Parameters
@@ -30,23 +36,16 @@ class Simulation:
kwargs: parameters to use to initialize a new configuration, if one has not been provided.
"""
-
- def __init__(self, config=None,
- **kwargs):
- if kwargs:
- cfg = {}
- if config:
- cfg.update(config.dict(include_defaults=False))
- cfg.update(kwargs)
- config = Config(**cfg)
- if not config:
- raise ValueError("You need to specify a simulation configuration")
- self.config = config
-
-
- @property
- def name(self) -> str:
- return self.config.general.id
+ name: str = 'Unnamed simulation'
+ group: str = None
+ model_class: Union[str, type] = 'soil.Environment'
+ model_params: dict = field(default_factory=dict)
+ seed: str = field(default_factory=lambda: current_time())
+ dir_path: str = field(default_factory=lambda: os.getcwd())
+ max_time: float = float('inf')
+ max_steps: int = -1
+ num_trials: int = 3
+ dry_run: bool = False
def run_simulation(self, *args, **kwargs):
return self.run(*args, **kwargs)
@@ -58,14 +57,14 @@ class Simulation:
def _run_sync_or_async(self, parallel=False, **kwargs):
if parallel and not os.environ.get('SENPY_DEBUG', None):
p = Pool()
- func = partial(self.run_trial_exceptions, **kwargs)
- for i in p.imap_unordered(func, range(self.config.general.num_trials)):
+ func = partial(run_and_return_exceptions, self.run_trial, **kwargs)
+ for i in p.imap_unordered(func, self.num_trials):
if isinstance(i, Exception):
logger.error('Trial failed:\n\t%s', i.message)
continue
yield i
else:
- for i in range(self.config.general.num_trials):
+ for i in range(self.num_trials):
yield self.run_trial(trial_id=i,
**kwargs)
@@ -80,12 +79,12 @@ class Simulation:
logger.info('Output directory: %s', outdir)
exporters = serialization.deserialize_all(exporters,
simulation=self,
- known_modules=['soil.exporters',],
+ known_modules=['soil.exporters', ],
dry_run=dry_run,
outdir=outdir,
**exporter_params)
- with utils.timer('simulation {}'.format(self.config.general.id)):
+ with utils.timer('simulation {}'.format(self.name)):
for exporter in exporters:
exporter.sim_start()
@@ -104,95 +103,95 @@ class Simulation:
for exporter in exporters:
exporter.sim_end()
+ def run_model(self, until=None, *args, **kwargs):
+ until = until or float('inf')
+
+ while self.schedule.next_time < until:
+ self.step()
+ utils.logger.debug(f'Simulation step {self.schedule.time}/{until}. Next: {self.schedule.next_time}')
+ self.schedule.time = until
+
def get_env(self, trial_id=0, **kwargs):
'''Create an environment for a trial of the simulation'''
- # opts = self.environment_params.copy()
- # opts.update({
- # 'name': '{}_trial_{}'.format(self.name, trial_id),
- # 'topology': self.topology.copy(),
- # 'network_params': self.network_params,
- # 'seed': '{}_trial_{}'.format(self.seed, trial_id),
- # 'initial_time': 0,
- # 'interval': self.interval,
- # 'network_agents': self.network_agents,
- # 'initial_time': 0,
- # 'states': self.states,
- # 'dir_path': self.dir_path,
- # 'default_state': self.default_state,
- # 'history': bool(self._history),
- # 'environment_agents': self.environment_agents,
- # })
- # opts.update(kwargs)
- print(self.config)
- env = Environment.from_config(self.config, trial_id=trial_id, **kwargs)
- return env
+ def deserialize_reporters(reporters):
+ for (k, v) in reporters.items():
+ if isinstance(v, str) and v.startswith('py:'):
+ reporters[k] = serialization.deserialize(value.lsplit(':', 1)[1])
+
+ model_params = self.model_params.copy()
+ model_params.update(kwargs)
+
+ agent_reporters = deserialize_reporters(model_params.pop('agent_reporters', {}))
+ model_reporters = deserialize_reporters(model_params.pop('model_reporters', {}))
+
+ env = serialization.deserialize(self.model_class)
+ return env(id=f'{self.name}_trial_{trial_id}',
+ seed=f'{self.seed}_trial_{trial_id}',
+ dir_path=self.dir_path,
+ agent_reporters=agent_reporters,
+ model_reporters=model_reporters,
+ **model_params)
def run_trial(self, trial_id=None, until=None, log_level=logging.INFO, **opts):
"""
Run a single trial of the simulation
"""
+ model = self.get_env(trial_id, **opts)
+ return self.run_model(model, trial_id=trial_id, until=until, log_level=log_level)
+
+ def run_model(self, model, trial_id=None, until=None, log_level=logging.INFO, **opts):
trial_id = trial_id if trial_id is not None else current_time()
if log_level:
logger.setLevel(log_level)
# Set-up trial environment and graph
- until = until or self.config.general.max_time
+ until = until or self.max_time
- env = self.get_env(trial_id, **opts)
# Set up agents on nodes
- with utils.timer('Simulation {} trial {}'.format(self.config.general.id, trial_id)):
- env.run(until)
- return env
-
- def run_trial_exceptions(self, *args, **kwargs):
- '''
- A wrapper for run_trial that catches exceptions and returns them.
- It is meant for async simulations
- '''
- try:
- return self.run_trial(*args, **kwargs)
- except Exception as ex:
- if ex.__cause__ is not None:
- ex = ex.__cause__
- ex.message = ''.join(traceback.format_exception(type(ex), ex, ex.__traceback__)[:])
- return ex
+ is_done = lambda: False
+ if self.max_time and hasattr(self.schedule, 'time'):
+ is_done = lambda x: is_done() or self.schedule.time >= self.max_time
+ if self.max_steps and hasattr(self.schedule, 'time'):
+ is_done = lambda: is_done() or self.schedule.steps >= self.max_steps
+
+ with utils.timer('Simulation {} trial {}'.format(self.name, trial_id)):
+ while not is_done():
+ utils.logger.debug(f'Simulation time {model.schedule.time}/{until}. Next: {getattr(model.schedule, "next_time", model.schedule.time + self.interval)}')
+ model.step()
+ return model
def to_dict(self):
- return self.config.dict()
+ d = asdict(self)
+ d['model_class'] = serialization.serialize(d['model_class'])[0]
+ d['model_params'] = serialization.serialize(d['model_params'])[0]
+ d['dir_path'] = str(d['dir_path'])
+
+ return d
def to_yaml(self):
- return yaml.dump(self.config.dict())
+ return yaml.dump(self.asdict())
-def all_from_config(config):
+def iter_from_config(config):
configs = list(serialization.load_config(config))
for config, path in configs:
- if config.get('version', '1') == '1':
- config = convert_old(config)
- if not isinstance(config, Config):
- config = Config(**config)
- if not config.general.dir_path:
- config.general.dir_path = os.path.dirname(path)
- sim = Simulation(config=config)
- yield sim
+ d = dict(config)
+ if 'dir_path' not in d:
+ d['dir_path'] = os.path.dirname(path)
+ if d.get('version', '2') == '1' or 'agents' in d or 'network_agents' in d or 'environment_agents' in d:
+ d = convert_old(d)
+ d.pop('version', None)
+ yield Simulation(**d)
def from_config(conf_or_path):
- lst = list(all_from_config(conf_or_path))
+ lst = list(iter_from_config(conf_or_path))
if len(lst) > 1:
raise AttributeError('Provide only one configuration')
return lst[0]
-def from_old_config(conf_or_path):
- config = list(serialization.load_config(conf_or_path))
- if len(config) > 1:
- raise AttributeError('Provide only one configuration')
- config = convert_old(config[0][0])
- return Simulation(config)
-
def run_from_config(*configs, **kwargs):
- for sim in all_from_config(configs):
- name = config.general.id
- logger.info("Using config(s): {name}".format(name=name))
+ for sim in iter_from_config(configs):
+ logger.info(f"Using config(s): {sim.id}")
sim.run_simulation(**kwargs)
diff --git a/soil/time.py b/soil/time.py
index b74ef82..b2faf46 100644
--- a/soil/time.py
+++ b/soil/time.py
@@ -37,9 +37,10 @@ class TimedActivation(BaseScheduler):
"""
def __init__(self, *args, **kwargs):
- super().__init__(self)
+ super().__init__(*args, **kwargs)
self._queue = []
self.next_time = 0
+ self.logger = logger.getChild(f'time_{ self.model }')
def add(self, agent: MesaAgent):
if agent.unique_id not in self._agents:
@@ -52,7 +53,8 @@ class TimedActivation(BaseScheduler):
an agent will signal when it wants to be scheduled next.
"""
- if self.next_time == INFINITY:
+ self.logger.debug(f'Simulation step {self.next_time}')
+ if not self.model.running:
return
self.time = self.next_time
@@ -60,7 +62,7 @@ class TimedActivation(BaseScheduler):
while self._queue and self._queue[0][0] == self.time:
(when, agent_id) = heappop(self._queue)
- logger.debug(f'Stepping agent {agent_id}')
+ self.logger.debug(f'Stepping agent {agent_id}')
returned = self._agents[agent_id].step()
when = (returned or Delta(1)).abs(self.time)
@@ -74,7 +76,8 @@ class TimedActivation(BaseScheduler):
if not self._queue:
self.time = INFINITY
self.next_time = INFINITY
+ self.model.running = False
return
self.next_time = self._queue[0][0]
-
+ self.logger.debug(f'Next step: {self.next_time}')
diff --git a/soil/utils.py b/soil/utils.py
index 562def1..cd82588 100644
--- a/soil/utils.py
+++ b/soil/utils.py
@@ -1,6 +1,7 @@
import logging
from time import time as current_time, strftime, gmtime, localtime
import os
+import traceback
from shutil import copyfile
@@ -89,3 +90,17 @@ def unflatten_dict(d):
target = target[token]
target[tokens[-1]] = v
return out
+
+
+def run_and_return_exceptions(self, func, *args, **kwargs):
+ '''
+ A wrapper for run_trial that catches exceptions and returns them.
+ It is meant for async simulations.
+ '''
+ try:
+ return func(*args, **kwargs)
+ except Exception as ex:
+ if ex.__cause__ is not None:
+ ex = ex.__cause__
+ ex.message = ''.join(traceback.format_exception(type(ex), ex, ex.__traceback__)[:])
+ return ex
diff --git a/soil/web/config.yml b/soil/web/config.yml
index 1f741eb..27e2785 100644
--- a/soil/web/config.yml
+++ b/soil/web/config.yml
@@ -6,11 +6,11 @@ network_params:
n: 100
m: 2
network_agents:
- - agent_type: ControlModelM2
+ - agent_class: ControlModelM2
weight: 0.1
state:
id: 1
- - agent_type: ControlModelM2
+ - agent_class: ControlModelM2
weight: 0.9
state:
id: 0
diff --git a/tests/old_complete.yml b/tests/old_complete.yml
index 8609eb9..517abc4 100644
--- a/tests/old_complete.yml
+++ b/tests/old_complete.yml
@@ -10,21 +10,21 @@ network_params:
generator: complete_graph
n: 10
network_agents:
- - agent_type: CounterModel
+ - agent_class: CounterModel
weight: 0.4
state:
state_id: 0
- - agent_type: AggregatedCounter
+ - agent_class: AggregatedCounter
weight: 0.6
environment_agents:
- agent_id: 'Environment Agent 1'
- agent_type: CounterModel
+ agent_class: CounterModel
state:
times: 10
environment_class: Environment
environment_params:
am_i_complete: true
-agent_type: CounterModel
+agent_class: CounterModel
default_state:
times: 1
states:
diff --git a/tests/test_analysis.py b/tests/test_analysis.py
index 83d6e54..204b4dd 100644
--- a/tests/test_analysis.py
+++ b/tests/test_analysis.py
@@ -46,7 +46,7 @@ class TestAnalysis(TestCase):
'generator': 'complete_graph',
'n': 2
},
- 'agent_type': Ping,
+ 'agent_class': Ping,
'states': [{'interval': 1}, {'interval': 2}],
'max_time': 30,
'num_trials': 1,
diff --git a/tests/test_config.py b/tests/test_config.py
index 6f69ee2..fd9fc70 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -1,8 +1,10 @@
from unittest import TestCase
import os
+import yaml
+import copy
from os.path import join
-from soil import simulation, serialization, config, network, agents
+from soil import simulation, serialization, config, network, agents, utils
ROOT = os.path.abspath(os.path.dirname(__file__))
EXAMPLES = join(ROOT, '..', 'examples')
@@ -10,6 +12,17 @@ EXAMPLES = join(ROOT, '..', 'examples')
FORCE_TESTS = os.environ.get('FORCE_TESTS', '')
+def isequal(a, b):
+ if isinstance(a, dict):
+ for (k, v) in a.items():
+ if v:
+ isequal(a[k], b[k])
+ else:
+ assert not b.get(k, None)
+ return
+ assert a == b
+
+
class TestConfig(TestCase):
def test_conversion(self):
@@ -18,18 +31,23 @@ class TestConfig(TestCase):
converted_defaults = config.convert_old(old, strict=False)
converted = converted_defaults.dict(skip_defaults=True)
- def isequal(a, b):
- if isinstance(a, dict):
- for (k, v) in a.items():
- if v:
- isequal(a[k], b[k])
- else:
- assert not b.get(k, None)
- return
- assert a == b
-
isequal(converted, expected)
+ def test_configuration_changes(self):
+ """
+ The configuration should not change after running
+ the simulation.
+ """
+ config = serialization.load_file(join(EXAMPLES, 'complete.yml'))[0]
+ s = simulation.from_config(config)
+ init_config = copy.copy(s.config)
+
+ s.run_simulation(dry_run=True)
+ nconfig = s.config
+ # del nconfig['to
+ isequal(init_config, nconfig)
+
+
def test_topology_config(self):
netconfig = config.NetConfig(**{
'path': join(ROOT, 'test.gexf')
@@ -48,7 +66,7 @@ class TestConfig(TestCase):
'network_params': {
'path': join(ROOT, 'test.gexf')
},
- 'agent_type': 'CounterModel',
+ 'agent_class': 'CounterModel',
# 'states': [{'times': 10}, {'times': 20}],
'max_time': 2,
'dry_run': True,
@@ -63,7 +81,6 @@ class TestConfig(TestCase):
assert len(env.agents) == 2
assert env.agents[0].topology == env.topologies['default']
-
def test_agents_from_config(self):
'''We test that the known complete configuration produces
the right agents in the right groups'''
@@ -74,8 +91,25 @@ class TestConfig(TestCase):
assert len(env.agents(group='network')) == 10
assert len(env.agents(group='environment')) == 1
- assert sum(1 for a in env.agents(group='network', agent_type=agents.CounterModel)) == 4
- assert sum(1 for a in env.agents(group='network', agent_type=agents.AggregatedCounter)) == 6
+ assert sum(1 for a in env.agents(group='network', agent_class=agents.CounterModel)) == 4
+ assert sum(1 for a in env.agents(group='network', agent_class=agents.AggregatedCounter)) == 6
+
+ def test_yaml(self):
+ """
+ The YAML version of a newly created configuration should be equivalent
+ to the configuration file used.
+ Values not present in the original config file should have reasonable
+ defaults.
+ """
+ with utils.timer('loading'):
+ config = serialization.load_file(join(EXAMPLES, 'complete.yml'))[0]
+ s = simulation.from_config(config)
+ with utils.timer('serializing'):
+ serial = s.to_yaml()
+ with utils.timer('recovering'):
+ recovered = yaml.load(serial, Loader=yaml.SafeLoader)
+ for (k, v) in config.items():
+ assert recovered[k] == v
def make_example_test(path, cfg):
def wrapped(self):
diff --git a/tests/test_exporters.py b/tests/test_exporters.py
index 6ff544b..debc14a 100644
--- a/tests/test_exporters.py
+++ b/tests/test_exporters.py
@@ -36,7 +36,7 @@ class Exporters(TestCase):
config = {
'name': 'exporter_sim',
'network_params': {},
- 'agent_type': 'CounterModel',
+ 'agent_class': 'CounterModel',
'max_time': 2,
'num_trials': 5,
'environment_params': {}
@@ -62,7 +62,7 @@ class Exporters(TestCase):
'generator': 'complete_graph',
'n': 4
},
- 'agent_type': 'CounterModel',
+ 'agent_class': 'CounterModel',
'max_time': 2,
'num_trials': n_trials,
'dry_run': False,
diff --git a/tests/test_history.py b/tests/test_history.py
index 4a76d5c..773cfd6 100644
--- a/tests/test_history.py
+++ b/tests/test_history.py
@@ -41,7 +41,7 @@ class TestHistory(TestCase):
'path': join(ROOT, 'test.gexf')
},
'network_agents': [{
- 'agent_type': 'AggregatedCounter',
+ 'agent_class': 'AggregatedCounter',
'weight': 1,
'state': {'state_id': 0}
diff --git a/tests/test_main.py b/tests/test_main.py
index 017e92e..a114b6c 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -1,9 +1,6 @@
from unittest import TestCase
import os
-import io
-import yaml
-import copy
import pickle
import networkx as nx
from functools import partial
@@ -29,56 +26,17 @@ class CustomAgent(agents.FSM, agents.NetworkAgent):
class TestMain(TestCase):
- def test_load_graph(self):
- """
- Load a graph from file if the extension is known.
- Raise an exception otherwise.
- """
- config = {
- 'network_params': {
- 'path': join(ROOT, 'test.gexf')
- }
- }
- G = network.from_config(config['network_params'])
- assert G
- assert len(G) == 2
- with self.assertRaises(AttributeError):
- config = {
- 'network_params': {
- 'path': join(ROOT, 'unknown.extension')
- }
- }
- G = network.from_config(config['network_params'])
- print(G)
-
- def test_generate_barabasi(self):
- """
- If no path is given, a generator and network parameters
- should be used to generate a network
- """
- cfg = {
- 'params': {
- 'generator': 'barabasi_albert_graph'
- }
- }
- with self.assertRaises(Exception):
- G = network.from_config(cfg)
- cfg['params']['n'] = 100
- cfg['params']['m'] = 10
- G = network.from_config(cfg)
- assert len(G) == 100
-
def test_empty_simulation(self):
"""A simulation with a base behaviour should do nothing"""
config = {
- 'network_params': {
- 'path': join(ROOT, 'test.gexf')
- },
- 'agent_type': 'BaseAgent',
- 'environment_params': {
+ 'model_params': {
+ 'network_params': {
+ 'path': join(ROOT, 'test.gexf')
+ },
+ 'agent_class': 'BaseAgent',
}
}
- s = simulation.from_old_config(config)
+ s = simulation.from_config(config)
s.run_simulation(dry_run=True)
@@ -88,21 +46,21 @@ class TestMain(TestCase):
agent should be able to update its state."""
config = {
'name': 'CounterAgent',
- 'network_params': {
- 'generator': nx.complete_graph,
- 'n': 2,
- },
- 'agent_type': 'CounterModel',
- 'states': {
- 0: {'times': 10},
- 1: {'times': 20},
- },
- 'max_time': 2,
'num_trials': 1,
- 'environment_params': {
+ 'max_time': 2,
+ 'model_params': {
+ 'network_params': {
+ 'generator': nx.complete_graph,
+ 'n': 2,
+ },
+ 'agent_class': 'CounterModel',
+ 'states': {
+ 0: {'times': 10},
+ 1: {'times': 20},
+ },
}
}
- s = simulation.from_old_config(config)
+ s = simulation.from_config(config)
def test_counter_agent(self):
"""
@@ -110,24 +68,24 @@ class TestMain(TestCase):
agent should be able to update its state."""
config = {
'version': '2',
- 'general': {
- 'name': 'CounterAgent',
- 'max_time': 2,
- 'dry_run': True,
- 'num_trials': 1,
- },
- 'topologies': {
- 'default': {
- 'path': join(ROOT, 'test.gexf')
- }
- },
- 'agents': {
- 'default': {
- 'agent_class': 'CounterModel',
+ 'name': 'CounterAgent',
+ 'dry_run': True,
+ 'num_trials': 1,
+ 'max_time': 2,
+ 'model_params': {
+ 'topologies': {
+ 'default': {
+ 'path': join(ROOT, 'test.gexf')
+ }
},
- 'counters': {
- 'topology': 'default',
- 'fixed': [{'state': {'times': 10}}, {'state': {'times': 20}}],
+ 'agents': {
+ 'default': {
+ 'agent_class': 'CounterModel',
+ },
+ 'counters': {
+ 'topology': 'default',
+ 'fixed': [{'state': {'times': 10}}, {'state': {'times': 20}}],
+ }
}
}
}
@@ -141,33 +99,37 @@ class TestMain(TestCase):
assert env.agents[0]['times'] == 11
assert env.agents[1]['times'] == 21
- def test_custom_agent(self):
- """Allow for search of neighbors with a certain state_id"""
+ def test_init_and_count_agents(self):
+ """Agents should be properly initialized and counting should filter them properly"""
+ #TODO: separate this test into two or more test cases
config = {
- 'network_params': {
- 'path': join(ROOT, 'test.gexf')
- },
- 'network_agents': [{
- 'agent_type': CustomAgent,
- 'weight': 1
-
- }],
'max_time': 10,
- 'environment_params': {
- }
+ 'model_params': {
+ 'agents': [(CustomAgent, {'weight': 1}),
+ (CustomAgent, {'weight': 3}),
+ ],
+ 'topologies': {
+ 'default': {
+ 'path': join(ROOT, 'test.gexf')
+ }
+ },
+ },
}
- s = simulation.from_old_config(config)
+ s = simulation.from_config(config)
env = s.run_simulation(dry_run=True)[0]
- assert env.agents[1].count_agents(state_id='normal') == 2
- assert env.agents[1].count_agents(state_id='normal', limit_neighbors=True) == 1
- assert env.agents[0].neighbors == 1
+ assert env.agents[0].weight == 1
+ assert env.count_agents() == 2
+ assert env.count_agents(weight=1) == 1
+ assert env.count_agents(weight=3) == 1
+ assert env.count_agents(agent_class=CustomAgent) == 2
+
def test_torvalds_example(self):
"""A complete example from a documentation should work."""
config = serialization.load_file(join(EXAMPLES, 'torvalds.yml'))[0]
- config['network_params']['path'] = join(EXAMPLES,
+ config['model_params']['network_params']['path'] = join(EXAMPLES,
config['network_params']['path'])
- s = simulation.from_old_config(config)
+ s = simulation.from_config(config)
env = s.run_simulation(dry_run=True)[0]
for a in env.network_agents:
skill_level = a.state['skill_level']
@@ -184,47 +146,6 @@ class TestMain(TestCase):
assert a.state['total'] == 3
assert a.state['neighbors'] == 1
- def test_yaml(self):
- """
- The YAML version of a newly created configuration should be equivalent
- to the configuration file used.
- Values not present in the original config file should have reasonable
- defaults.
- """
- with utils.timer('loading'):
- config = serialization.load_file(join(EXAMPLES, 'complete.yml'))[0]
- s = simulation.from_old_config(config)
- with utils.timer('serializing'):
- serial = s.to_yaml()
- with utils.timer('recovering'):
- recovered = yaml.load(serial, Loader=yaml.SafeLoader)
- for (k, v) in config.items():
- assert recovered[k] == v
-
- def test_configuration_changes(self):
- """
- The configuration should not change after running
- the simulation.
- """
- config = serialization.load_file(join(EXAMPLES, 'complete.yml'))[0]
- s = simulation.from_old_config(config)
- init_config = copy.copy(s.config)
-
- s.run_simulation(dry_run=True)
- nconfig = s.config
- # del nconfig['to
- assert init_config == nconfig
-
- def test_save_geometric(self):
- """
- There is a bug in networkx that prevents it from creating a GEXF file
- from geometric models. We should work around it.
- """
- G = nx.random_geometric_graph(20, 0.1)
- env = Environment(topology=G)
- f = io.BytesIO()
- env.dump_gexf(f)
-
def test_serialize_class(self):
ser, name = serialization.serialize(agents.BaseAgent, known_modules=[])
assert name == 'soil.agents.BaseAgent'
@@ -247,7 +168,7 @@ class TestMain(TestCase):
des = serialization.deserialize(name, ser)
assert i == des
- def test_serialize_agent_type(self):
+ def test_serialize_agent_class(self):
'''A class from soil.agents should be serialized without the module part'''
ser = agents.serialize_type(CustomAgent)
assert ser == 'test_main.CustomAgent'
@@ -258,33 +179,33 @@ class TestMain(TestCase):
def test_deserialize_agent_distribution(self):
agent_distro = [
{
- 'agent_type': 'CounterModel',
+ 'agent_class': 'CounterModel',
'weight': 1
},
{
- 'agent_type': 'test_main.CustomAgent',
+ 'agent_class': 'test_main.CustomAgent',
'weight': 2
},
]
converted = agents.deserialize_definition(agent_distro)
- assert converted[0]['agent_type'] == agents.CounterModel
- assert converted[1]['agent_type'] == CustomAgent
+ assert converted[0]['agent_class'] == agents.CounterModel
+ assert converted[1]['agent_class'] == CustomAgent
pickle.dumps(converted)
def test_serialize_agent_distribution(self):
agent_distro = [
{
- 'agent_type': agents.CounterModel,
+ 'agent_class': agents.CounterModel,
'weight': 1
},
{
- 'agent_type': CustomAgent,
+ 'agent_class': CustomAgent,
'weight': 2
},
]
converted = agents.serialize_definition(agent_distro)
- assert converted[0]['agent_type'] == 'CounterModel'
- assert converted[1]['agent_type'] == 'test_main.CustomAgent'
+ assert converted[0]['agent_class'] == 'CounterModel'
+ assert converted[1]['agent_class'] == 'test_main.CustomAgent'
pickle.dumps(converted)
def test_subgraph(self):
@@ -292,7 +213,7 @@ class TestMain(TestCase):
G = nx.Graph()
G.add_node(3)
G.add_edge(1, 2)
- distro = agents.calculate_distribution(agent_type=agents.NetworkAgent)
+ distro = agents.calculate_distribution(agent_class=agents.NetworkAgent)
distro[0]['topology'] = 'default'
aconfig = config.AgentConfig(distribution=distro, topology='default')
env = Environment(name='Test', topologies={'default': G}, agents={'network': aconfig})
@@ -303,7 +224,7 @@ class TestMain(TestCase):
assert len(a2.subgraph(limit_neighbors=True)) == 2
assert len(a3.subgraph(limit_neighbors=True)) == 1
assert len(a3.subgraph(limit_neighbors=True, center=False)) == 0
- assert len(a3.subgraph(agent_type=agents.NetworkAgent)) == 3
+ assert len(a3.subgraph(agent_class=agents.NetworkAgent)) == 3
def test_templates(self):
'''Loading a template should result in several configs'''
@@ -313,19 +234,19 @@ class TestMain(TestCase):
def test_until(self):
config = {
'name': 'until_sim',
- 'network_params': {},
- 'agent_type': 'CounterModel',
+ 'model_params': {
+ 'network_params': {},
+ 'agent_class': 'CounterModel',
+ },
'max_time': 2,
'num_trials': 50,
- 'environment_params': {}
}
- s = simulation.from_old_config(config)
+ s = simulation.from_config(config)
runs = list(s.run_simulation(dry_run=True))
over = list(x.now for x in runs if x.now>2)
assert len(runs) == config['num_trials']
assert len(over) == 0
-
def test_fsm(self):
'''Basic state change'''
class ToggleAgent(agents.FSM):
diff --git a/tests/test_network.py b/tests/test_network.py
new file mode 100644
index 0000000..b111a94
--- /dev/null
+++ b/tests/test_network.py
@@ -0,0 +1,85 @@
+from unittest import TestCase
+
+import io
+import os
+import networkx as nx
+
+from os.path import join
+
+from soil import network, environment
+
+ROOT = os.path.abspath(os.path.dirname(__file__))
+EXAMPLES = join(ROOT, '..', 'examples')
+
+
+class TestNetwork(TestCase):
+ def test_load_graph(self):
+ """
+ Load a graph from file if the extension is known.
+ Raise an exception otherwise.
+ """
+ config = {
+ 'network_params': {
+ 'path': join(ROOT, 'test.gexf')
+ }
+ }
+ G = network.from_config(config['network_params'])
+ assert G
+ assert len(G) == 2
+ with self.assertRaises(AttributeError):
+ config = {
+ 'network_params': {
+ 'path': join(ROOT, 'unknown.extension')
+ }
+ }
+ G = network.from_config(config['network_params'])
+ print(G)
+
+ def test_generate_barabasi(self):
+ """
+ If no path is given, a generator and network parameters
+ should be used to generate a network
+ """
+ cfg = {
+ 'params': {
+ 'generator': 'barabasi_albert_graph'
+ }
+ }
+ with self.assertRaises(Exception):
+ G = network.from_config(cfg)
+ cfg['params']['n'] = 100
+ cfg['params']['m'] = 10
+ G = network.from_config(cfg)
+ assert len(G) == 100
+
+ def test_save_geometric(self):
+ """
+ There is a bug in networkx that prevents it from creating a GEXF file
+ from geometric models. We should work around it.
+ """
+ G = nx.random_geometric_graph(20, 0.1)
+ env = environment.NetworkEnvironment(topology=G)
+ f = io.BytesIO()
+ env.dump_gexf(f)
+
+ def test_custom_agent_neighbors(self):
+ """Allow for search of neighbors with a certain state_id"""
+ config = {
+ 'network_params': {
+ 'path': join(ROOT, 'test.gexf')
+ },
+ 'network_agents': [{
+ 'agent_class': CustomAgent,
+ 'weight': 1
+
+ }],
+ 'max_time': 10,
+ 'environment_params': {
+ }
+ }
+ s = simulation.from_config(config)
+ env = s.run_simulation(dry_run=True)[0]
+ assert env.agents[1].count_agents(state_id='normal') == 2
+ assert env.agents[1].count_agents(state_id='normal', limit_neighbors=True) == 1
+ assert env.agents[0].neighbors == 1
+