mirror of
https://github.com/gsi-upm/soil
synced 2025-09-14 04:02:21 +00:00
Compare commits
18 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
092591fb97 | ||
|
263b4e0e33 | ||
|
189836408f | ||
|
ee0c4517cb | ||
|
3041156f19 | ||
|
f49be3af68 | ||
|
5e93399d58 | ||
|
eca4cae298 | ||
|
47a67f6665 | ||
|
c13550cf83 | ||
|
55bbc76b2a | ||
|
d13e4eb4b9 | ||
|
93d23e4cab | ||
|
3802578ad5 | ||
|
4e296e0cf1 | ||
|
302075a65d | ||
|
fba379c97c | ||
|
bf481f0f88 |
@@ -1,5 +1,7 @@
|
||||
**/soil_output
|
||||
.*
|
||||
**/.*
|
||||
**/__pycache__
|
||||
__pycache__
|
||||
*.pyc
|
||||
**/backup
|
||||
|
4
.gitignore
vendored
4
.gitignore
vendored
@@ -8,4 +8,6 @@ soil_output
|
||||
docs/_build*
|
||||
build/*
|
||||
dist/*
|
||||
prof
|
||||
prof
|
||||
backup
|
||||
*.egg-info
|
||||
|
12
CHANGELOG.md
12
CHANGELOG.md
@@ -13,11 +13,13 @@ For an explanation of the general changes in version 1.0, please refer to the fi
|
||||
* Environments now have a class method to make them easier to use without a simulation`.run`. Notice that this is different from `run_model`, which is an instance method.
|
||||
* Ability to run simulations using mesa models
|
||||
* The `soil.exporters` module to export the results of datacollectors (`model.datacollector`) into files at the end of trials/simulations
|
||||
* Agents can now have generators as a step function or a state. They work similar to normal functions, with one caveat in the case of `FSM`: only `time` values (or None) can be yielded, not a state. This is because the state will not change, it will be resumed after the yield, at the appropriate time. The return value *can* be a state, or a `(state, time)` tuple, just like in normal states.
|
||||
* Agents can now have generators or async functions as their step or as states. They work similar to normal functions, with one caveat in the case of `FSM`: only time values (a float, int or None) can be awaited or yielded, not a state. This is because the state will not change, it will be resumed after the yield, at the appropriate time. To return to a different state, use the `delay` and `at` functions of the state.
|
||||
* Simulations can now specify a `matrix` with possible values for every simulation parameter. The final parameters will be calculated based on the `parameters` used and a cartesian product (i.e., all possible combinations) of each parameter.
|
||||
* Simple debugging capabilities in `soil.debugging`, with a custom `pdb.Debugger` subclass that exposes commands to list agents and their status and set breakpoints on states (for FSM agents). Try it with `soil --debug <simulation file>`
|
||||
* The `agent.after` and `agent.at` methods, to avoid having to return a time manually.
|
||||
### Changed
|
||||
* Configuration schema (`Simulation`) is very simplified. All simulations should be checked
|
||||
* Agents that wish to
|
||||
* Model / environment variables are expected (but not enforced) to be a single value. This is done to more closely align with mesa
|
||||
* `Exporter.iteration_end` now takes two parameters: `env` (same as before) and `params` (specific parameters for this environment). We considered including a `parameters` attribute in the environment, but this would not be compatible with mesa.
|
||||
* `num_trials` renamed to `iterations`
|
||||
@@ -26,8 +28,16 @@ For an explanation of the general changes in version 1.0, please refer to the fi
|
||||
* Simulation results for every iteration of a simulation with the same name are stored in a single `sqlite` database
|
||||
|
||||
### Removed
|
||||
* The `time.When` and `time.Cond` classes are removed
|
||||
* Any `tsih` and `History` integration in the main classes. To record the state of environments/agents, just use a datacollector. In some cases this may be slower or consume more memory than the previous system. However, few cases actually used the full potential of the history, and it came at the cost of unnecessary complexity and worse performance for the majority of cases.
|
||||
|
||||
## [0.20.8]
|
||||
### Changed
|
||||
* Tsih bumped to version 0.1.8
|
||||
### Fixed
|
||||
* Mentions to `id` in docs. It should be `state_id` now.
|
||||
* Fixed bug: environment agents were not being added to the simulation
|
||||
|
||||
## [0.20.7]
|
||||
### Changed
|
||||
* Creating a `time.When` from another `time.When` does not nest them anymore (it returns the argument)
|
||||
|
@@ -4,7 +4,7 @@
|
||||
Soil is an extensible and user-friendly Agent-based Social Simulator for Social Networks.
|
||||
Learn how to run your own simulations with our [documentation](http://soilsim.readthedocs.io).
|
||||
|
||||
Follow our [tutorial](examples/tutorial/soil_tutorial.ipynb) to develop your own agent models.
|
||||
Follow our [tutorial](docs/tutorial/soil_tutorial.ipynb) to develop your own agent models.
|
||||
|
||||
> **Warning**
|
||||
> Soil 1.0 introduced many fundamental changes. Check the [documention on how to update your simulations to work with newer versions](docs/notes_v1.0.rst)
|
||||
@@ -36,7 +36,6 @@ Follow our [tutorial](examples/tutorial/soil_tutorial.ipynb) to develop your own
|
||||
* A command line interface (`soil`), to quickly run simulations with different parameters
|
||||
* An integrated debugger (`soil --debug`) with custom functions to print agent states and break at specific states
|
||||
|
||||
|
||||
## Mesa compatibility
|
||||
|
||||
SOIL has been redesigned to integrate well with [Mesa](https://github.com/projectmesa/mesa).
|
||||
|
12
benchmarks/noop-bench-async.csv
Normal file
12
benchmarks/noop-bench-async.csv
Normal file
@@ -0,0 +1,12 @@
|
||||
command,mean,stddev,median,user,system,min,max,parameter_sim
|
||||
python noop/mesa_batchrunner.py,1.3258325165599998,0.05822826666377271,1.31279976286,1.2978164199999997,0.25767558,1.2780627573599999,1.46763559736,mesa_batchrunner
|
||||
python noop/mesa_simulation.py,1.3915081544599999,0.07311646048704976,1.37166811936,1.35267662,0.29222067999999995,1.32746067836,1.58495303336,mesa_simulation
|
||||
python noop/soil_step.py,1.9859962588599998,0.12143759641749913,1.93586195486,2.0000750199999997,0.54126188,1.9061700903599998,2.2532835533599997,soil_step
|
||||
python noop/soil_step_pqueue.py,2.1347049971600005,0.01336179424666973,2.13492341986,2.1368160200000004,0.56862948,2.11810132936,2.16042739636,soil_step_pqueue
|
||||
python noop/soil_gens.py,2.1284937893599998,0.03030587681163665,2.13585231586,2.14158812,0.54900038,2.0768625143599997,2.19043625236,soil_gens
|
||||
python noop/soil_gens_pqueue.py,2.3469003942599995,0.019461346004472344,2.3486906343599996,2.36505852,0.54629858,2.31766326036,2.37998102136,soil_gens_pqueue
|
||||
python noop/soil_async.py,2.85755484126,0.0314955571121844,2.84774029536,2.86388112,0.55261338,2.81428668936,2.90567961636,soil_async
|
||||
python noop/soil_async_pqueue.py,3.1999731134600005,0.04432336803797717,3.20255954186,3.2162337199999995,0.5501872800000001,3.1406816913599997,3.26137401936,soil_async_pqueue
|
||||
python noop/soilent_step.py,1.30038977816,0.017973958957989845,1.30187804986,1.3231730199999998,0.5452653799999999,1.27058263436,1.31902240836,soilent_step
|
||||
python noop/soilent_step_pqueue.py,1.4708435788599998,0.027193290392962755,1.4707784423599999,1.4900387199999998,0.54749428,1.43498127536,1.53065598436,soilent_step_pqueue
|
||||
python noop/soilent_gens.py,1.6338810973599998,0.05752539125688073,1.63513330036,1.65216122,0.51846678,1.54135944036,1.7038832853599999,soilent_gens
|
|
11
benchmarks/noop-bench.csv
Normal file
11
benchmarks/noop-bench.csv
Normal file
@@ -0,0 +1,11 @@
|
||||
command,mean,stddev,median,user,system,min,max,parameter_sim
|
||||
python noop/mesa1_batchrunner.py,1.2559917394000002,0.012031173494887278,1.2572688413000002,1.2168630799999998,0.31825289999999995,1.2346063853,1.2735512493,mesa1_batchrunner
|
||||
python noop/mesa1_simulation.py,1.3024417227,0.022498874113931668,1.2994157323,1.2595484799999999,0.3087897,1.2697029703,1.3350640403,mesa1_simulation
|
||||
python noop/soil1.py,1.8789492443,0.18023367899835044,1.8186795393000001,1.86076288,0.5309521,1.7326687413000001,2.2928370642999996,soil1
|
||||
python noop/soil1_pqueue.py,1.9841675890000001,0.01735524088843906,1.9884363323,2.01830338,0.5787977999999999,1.9592171483,2.0076169282999996,soil1_pqueue
|
||||
python noop/soil2.py,2.0135188921999996,0.02869307129649681,2.0184709453,2.03951308,0.5885591,1.9680417823,2.0567112592999997,soil2
|
||||
python noop/soil2_pqueue.py,2.2367320454999997,0.024339667344486046,2.2357249777999995,2.2515216799999997,0.5978869,2.1957917303,2.2688685033,soil2_pqueue
|
||||
python noop/soilent1.py,1.1309301329,0.015133005948737871,1.1276461497999999,1.14056688,0.6027519,1.1135821423,1.1625753893,soilent1
|
||||
python noop/soilent1_pqueue.py,1.3097537665000003,0.018821977712258842,1.3073709358,1.3270259799999997,0.6000067999999998,1.2874580013,1.3381646823,soilent1_pqueue
|
||||
python noop/soilent2.py,1.5055360476,0.05166674417574119,1.4883118568,1.5121205799999997,0.5817363999999999,1.4490918363,1.6005909333000001,soilent2
|
||||
python noop/soilent2_pqueue.py,1.6622598218,0.031130739036296016,1.6588702603,1.6862567799999997,0.5854159,1.6289724583,1.7330545383,soilent2_pqueue
|
|
26
benchmarks/noop/_config.py
Normal file
26
benchmarks/noop/_config.py
Normal file
@@ -0,0 +1,26 @@
|
||||
import os
|
||||
|
||||
NUM_AGENTS = int(os.environ.get('NUM_AGENTS', 100))
|
||||
NUM_ITERS = int(os.environ.get('NUM_ITERS', 10))
|
||||
MAX_STEPS = int(os.environ.get('MAX_STEPS', 1000))
|
||||
|
||||
|
||||
def run_sim(model, **kwargs):
|
||||
from soil import Simulation
|
||||
opts = dict(model=model,
|
||||
dump=False,
|
||||
num_processes=1,
|
||||
parameters={'num_agents': NUM_AGENTS},
|
||||
seed="",
|
||||
max_steps=MAX_STEPS,
|
||||
iterations=NUM_ITERS)
|
||||
opts.update(kwargs)
|
||||
res = Simulation(**opts).run()
|
||||
|
||||
total = sum(a.num_calls for e in res for a in e.schedule.agents)
|
||||
expected = NUM_AGENTS * NUM_ITERS * MAX_STEPS
|
||||
print(total)
|
||||
print(expected)
|
||||
|
||||
assert total == expected
|
||||
return res
|
43
benchmarks/noop/mesa_batchrunner.py
Normal file
43
benchmarks/noop/mesa_batchrunner.py
Normal file
@@ -0,0 +1,43 @@
|
||||
from mesa import batch_run, DataCollector, Agent, Model
|
||||
from mesa.time import RandomActivation
|
||||
|
||||
|
||||
class NoopAgent(Agent):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.num_calls = 0
|
||||
|
||||
def step(self):
|
||||
self.num_calls += 1
|
||||
|
||||
|
||||
class NoopModel(Model):
|
||||
def __init__(self, N):
|
||||
super().__init__()
|
||||
self.schedule = RandomActivation(self)
|
||||
for i in range(N):
|
||||
self.schedule.add(NoopAgent(self.next_id(), self))
|
||||
self.datacollector = DataCollector(model_reporters={"num_agents": lambda m: m.schedule.get_agent_count(),
|
||||
"time": lambda m: m.schedule.time},
|
||||
agent_reporters={"num_calls": "num_calls"})
|
||||
self.datacollector.collect(self)
|
||||
|
||||
def step(self):
|
||||
self.schedule.step()
|
||||
self.datacollector.collect(self)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from _config import *
|
||||
|
||||
res = batch_run(model_cls=NoopModel,
|
||||
max_steps=MAX_STEPS,
|
||||
iterations=NUM_ITERS,
|
||||
number_processes=1,
|
||||
parameters={'N': NUM_AGENTS})
|
||||
total = sum(s["num_calls"] for s in res)
|
||||
total_agents = sum(s["num_agents"] for s in res)
|
||||
assert len(res) == NUM_AGENTS * NUM_ITERS
|
||||
assert total == NUM_AGENTS * NUM_ITERS * MAX_STEPS
|
||||
assert total_agents == NUM_AGENTS * NUM_AGENTS * NUM_ITERS
|
||||
|
37
benchmarks/noop/mesa_simulation.py
Normal file
37
benchmarks/noop/mesa_simulation.py
Normal file
@@ -0,0 +1,37 @@
|
||||
from mesa import batch_run, DataCollector, Agent, Model
|
||||
from mesa.time import RandomActivation
|
||||
from soil import Simulation
|
||||
from _config import *
|
||||
|
||||
|
||||
class NoopAgent(Agent):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.num_calls = 0
|
||||
|
||||
def step(self):
|
||||
self.num_calls += 1
|
||||
|
||||
|
||||
class NoopModel(Model):
|
||||
def __init__(self, num_agents, *args, **kwargs):
|
||||
super().__init__()
|
||||
self.schedule = RandomActivation(self)
|
||||
for i in range(num_agents):
|
||||
self.schedule.add(NoopAgent(self.next_id(), self))
|
||||
self.datacollector = DataCollector(model_reporters={"num_agents": lambda m: m.schedule.get_agent_count(),
|
||||
"time": lambda m: m.schedule.time},
|
||||
agent_reporters={"num_calls": "num_calls"})
|
||||
self.datacollector.collect(self)
|
||||
|
||||
def step(self):
|
||||
self.schedule.step()
|
||||
self.datacollector.collect(self)
|
||||
|
||||
|
||||
def run():
|
||||
run_sim(model=NoopModel)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run()
|
3
benchmarks/noop/noop-bench.csv
Normal file
3
benchmarks/noop/noop-bench.csv
Normal file
@@ -0,0 +1,3 @@
|
||||
command,mean,stddev,median,user,system,min,max,parameter_sim
|
||||
python mesa1_batchrunner.py,1.2932078178200002,0.05649377020829272,1.2705532802200001,1.25902256,0.27242284,1.22210926572,1.40867459172,mesa1_batchrunner
|
||||
python mesa1_simulation.py,1.81112963812,0.015491072368938567,1.81342524572,1.8594407599999996,0.8005329399999999,1.78538603972,1.84176361172,mesa1_simulation
|
|
24
benchmarks/noop/soil_async.py
Normal file
24
benchmarks/noop/soil_async.py
Normal file
@@ -0,0 +1,24 @@
|
||||
from soil import BaseAgent, Environment, Simulation
|
||||
|
||||
|
||||
class NoopAgent(BaseAgent):
|
||||
num_calls = 0
|
||||
|
||||
async def step(self):
|
||||
while True:
|
||||
self.num_calls += 1
|
||||
await self.delay()
|
||||
|
||||
|
||||
class NoopEnvironment(Environment):
|
||||
num_agents = 100
|
||||
|
||||
def init(self):
|
||||
self.add_agents(NoopAgent, k=self.num_agents)
|
||||
self.add_agent_reporter("num_calls")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from _config import *
|
||||
|
||||
run_sim(model=NoopEnvironment)
|
25
benchmarks/noop/soil_async_pqueue.py
Normal file
25
benchmarks/noop/soil_async_pqueue.py
Normal file
@@ -0,0 +1,25 @@
|
||||
from soil import BaseAgent, Environment, Simulation, PQueueActivation
|
||||
|
||||
|
||||
class NoopAgent(BaseAgent):
|
||||
num_calls = 0
|
||||
|
||||
async def step(self):
|
||||
while True:
|
||||
self.num_calls += 1
|
||||
await self.delay()
|
||||
|
||||
|
||||
class NoopEnvironment(Environment):
|
||||
num_agents = 100
|
||||
schedule_class = PQueueActivation
|
||||
|
||||
def init(self):
|
||||
self.add_agents(NoopAgent, k=self.num_agents)
|
||||
self.add_agent_reporter("num_calls")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from _config import *
|
||||
|
||||
run_sim(model=NoopEnvironment)
|
24
benchmarks/noop/soil_gens.py
Normal file
24
benchmarks/noop/soil_gens.py
Normal file
@@ -0,0 +1,24 @@
|
||||
from soil import BaseAgent, Environment, Simulation
|
||||
|
||||
|
||||
class NoopAgent(BaseAgent):
|
||||
num_calls = 0
|
||||
|
||||
def step(self):
|
||||
while True:
|
||||
self.num_calls += 1
|
||||
yield self.delay()
|
||||
|
||||
|
||||
class NoopEnvironment(Environment):
|
||||
num_agents = 100
|
||||
|
||||
def init(self):
|
||||
self.add_agents(NoopAgent, k=self.num_agents)
|
||||
self.add_agent_reporter("num_calls")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from _config import *
|
||||
|
||||
run_sim(model=NoopEnvironment)
|
25
benchmarks/noop/soil_gens_pqueue.py
Normal file
25
benchmarks/noop/soil_gens_pqueue.py
Normal file
@@ -0,0 +1,25 @@
|
||||
from soil import BaseAgent, Environment, Simulation, PQueueActivation
|
||||
|
||||
|
||||
class NoopAgent(BaseAgent):
|
||||
num_calls = 0
|
||||
|
||||
def step(self):
|
||||
while True:
|
||||
self.num_calls += 1
|
||||
yield self.delay()
|
||||
|
||||
|
||||
class NoopEnvironment(Environment):
|
||||
num_agents = 100
|
||||
schedule_class = PQueueActivation
|
||||
|
||||
def init(self):
|
||||
self.add_agents(NoopAgent, k=self.num_agents)
|
||||
self.add_agent_reporter("num_calls")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from _config import *
|
||||
|
||||
run_sim(model=NoopEnvironment)
|
21
benchmarks/noop/soil_state.py
Normal file
21
benchmarks/noop/soil_state.py
Normal file
@@ -0,0 +1,21 @@
|
||||
from soil import Agent, Environment, Simulation, state
|
||||
|
||||
|
||||
class NoopAgent(Agent):
|
||||
num_calls = 0
|
||||
|
||||
@state(default=True)
|
||||
def unique(self):
|
||||
self.num_calls += 1
|
||||
|
||||
class NoopEnvironment(Environment):
|
||||
num_agents = 100
|
||||
|
||||
def init(self):
|
||||
self.add_agents(NoopAgent, k=self.num_agents)
|
||||
self.add_agent_reporter("num_calls")
|
||||
|
||||
|
||||
from _config import *
|
||||
|
||||
run_sim(model=NoopEnvironment)
|
20
benchmarks/noop/soil_step.py
Normal file
20
benchmarks/noop/soil_step.py
Normal file
@@ -0,0 +1,20 @@
|
||||
from soil import Agent, Environment, Simulation
|
||||
|
||||
|
||||
class NoopAgent(Agent):
|
||||
num_calls = 0
|
||||
|
||||
def step(self):
|
||||
self.num_calls += 1
|
||||
|
||||
class NoopEnvironment(Environment):
|
||||
num_agents = 100
|
||||
|
||||
def init(self):
|
||||
self.add_agents(NoopAgent, k=self.num_agents)
|
||||
self.add_agent_reporter("num_calls")
|
||||
|
||||
|
||||
from _config import *
|
||||
|
||||
run_sim(model=NoopEnvironment)
|
22
benchmarks/noop/soil_step_pqueue.py
Normal file
22
benchmarks/noop/soil_step_pqueue.py
Normal file
@@ -0,0 +1,22 @@
|
||||
from soil import BaseAgent, Environment, Simulation, PQueueActivation
|
||||
|
||||
|
||||
class NoopAgent(BaseAgent):
|
||||
num_calls = 0
|
||||
|
||||
def step(self):
|
||||
self.num_calls += 1
|
||||
|
||||
class NoopEnvironment(Environment):
|
||||
num_agents = 100
|
||||
schedule_class = PQueueActivation
|
||||
|
||||
def init(self):
|
||||
self.add_agents(NoopAgent, k=self.num_agents)
|
||||
self.add_agent_reporter("num_calls")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from _config import *
|
||||
|
||||
run_sim(model=NoopEnvironment)
|
29
benchmarks/noop/soilent_async.py
Normal file
29
benchmarks/noop/soilent_async.py
Normal file
@@ -0,0 +1,29 @@
|
||||
from soil import Agent, Environment, Simulation
|
||||
from soil.time import SoilentActivation
|
||||
|
||||
|
||||
class NoopAgent(Agent):
|
||||
num_calls = 0
|
||||
|
||||
async def step(self):
|
||||
while True:
|
||||
self.num_calls += 1
|
||||
# yield self.delay(1)
|
||||
await self.delay()
|
||||
|
||||
|
||||
class NoopEnvironment(Environment):
|
||||
num_agents = 100
|
||||
schedule_class = SoilentActivation
|
||||
|
||||
def init(self):
|
||||
self.add_agents(NoopAgent, k=self.num_agents)
|
||||
self.add_agent_reporter("num_calls")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from _config import *
|
||||
|
||||
res = run_sim(model=NoopEnvironment)
|
||||
for r in res:
|
||||
assert isinstance(r.schedule, SoilentActivation)
|
27
benchmarks/noop/soilent_async_pqueue.py
Normal file
27
benchmarks/noop/soilent_async_pqueue.py
Normal file
@@ -0,0 +1,27 @@
|
||||
from soil import Agent, Environment
|
||||
from soil.time import SoilentPQueueActivation
|
||||
|
||||
|
||||
class NoopAgent(Agent):
|
||||
num_calls = 0
|
||||
|
||||
async def step(self):
|
||||
while True:
|
||||
self.num_calls += 1
|
||||
await self.delay()
|
||||
|
||||
class NoopEnvironment(Environment):
|
||||
num_agents = 100
|
||||
schedule_class = SoilentPQueueActivation
|
||||
|
||||
def init(self):
|
||||
self.add_agents(NoopAgent, k=self.num_agents)
|
||||
self.add_agent_reporter("num_calls")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from _config import *
|
||||
|
||||
res = run_sim(model=NoopEnvironment)
|
||||
for r in res:
|
||||
assert isinstance(r.schedule, SoilentPQueueActivation)
|
28
benchmarks/noop/soilent_gens.py
Normal file
28
benchmarks/noop/soilent_gens.py
Normal file
@@ -0,0 +1,28 @@
|
||||
from soil import Agent, Environment, Simulation
|
||||
from soil.time import SoilentActivation
|
||||
|
||||
|
||||
class NoopAgent(Agent):
|
||||
num_calls = 0
|
||||
|
||||
def step(self):
|
||||
while True:
|
||||
self.num_calls += 1
|
||||
# yield self.delay(1)
|
||||
yield self.delay()
|
||||
|
||||
class NoopEnvironment(Environment):
|
||||
num_agents = 100
|
||||
schedule_class = SoilentActivation
|
||||
|
||||
def init(self):
|
||||
self.add_agents(NoopAgent, k=self.num_agents)
|
||||
self.add_agent_reporter("num_calls")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from _config import *
|
||||
|
||||
res = run_sim(model=NoopEnvironment)
|
||||
for r in res:
|
||||
assert isinstance(r.schedule, SoilentActivation)
|
28
benchmarks/noop/soilent_gens_pqueue.py
Normal file
28
benchmarks/noop/soilent_gens_pqueue.py
Normal file
@@ -0,0 +1,28 @@
|
||||
from soil import Agent, Environment
|
||||
from soil.time import SoilentPQueueActivation
|
||||
|
||||
|
||||
class NoopAgent(Agent):
|
||||
num_calls = 0
|
||||
|
||||
def step(self):
|
||||
while True:
|
||||
self.num_calls += 1
|
||||
# yield self.delay(1)
|
||||
yield
|
||||
|
||||
class NoopEnvironment(Environment):
|
||||
num_agents = 100
|
||||
schedule_class = SoilentPQueueActivation
|
||||
|
||||
def init(self):
|
||||
self.add_agents(NoopAgent, k=self.num_agents)
|
||||
self.add_agent_reporter("num_calls")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from _config import *
|
||||
|
||||
res = run_sim(model=NoopEnvironment)
|
||||
for r in res:
|
||||
assert isinstance(r.schedule, SoilentPQueueActivation)
|
30
benchmarks/noop/soilent_state.py
Normal file
30
benchmarks/noop/soilent_state.py
Normal file
@@ -0,0 +1,30 @@
|
||||
from soil import Agent, Environment, Simulation, state
|
||||
from soil.time import SoilentActivation
|
||||
|
||||
|
||||
class NoopAgent(Agent):
|
||||
num_calls = 0
|
||||
|
||||
@state(default=True)
|
||||
async def unique(self):
|
||||
while True:
|
||||
self.num_calls += 1
|
||||
# yield self.delay(1)
|
||||
await self.delay()
|
||||
|
||||
|
||||
class NoopEnvironment(Environment):
|
||||
num_agents = 100
|
||||
schedule_class = SoilentActivation
|
||||
|
||||
def init(self):
|
||||
self.add_agents(NoopAgent, k=self.num_agents)
|
||||
self.add_agent_reporter("num_calls")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from _config import *
|
||||
|
||||
res = run_sim(model=NoopEnvironment)
|
||||
for r in res:
|
||||
assert isinstance(r.schedule, SoilentActivation)
|
24
benchmarks/noop/soilent_step.py
Normal file
24
benchmarks/noop/soilent_step.py
Normal file
@@ -0,0 +1,24 @@
|
||||
from soil import BaseAgent, Environment, Simulation
|
||||
from soil.time import SoilentActivation
|
||||
|
||||
|
||||
class NoopAgent(BaseAgent):
|
||||
num_calls = 0
|
||||
|
||||
def step(self):
|
||||
self.num_calls += 1
|
||||
|
||||
class NoopEnvironment(Environment):
|
||||
num_agents = 100
|
||||
schedule_class = SoilentActivation
|
||||
|
||||
def init(self):
|
||||
self.add_agents(NoopAgent, k=self.num_agents)
|
||||
self.add_agent_reporter("num_calls")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from _config import *
|
||||
res = run_sim(model=NoopEnvironment)
|
||||
for r in res:
|
||||
assert isinstance(r.schedule, SoilentActivation)
|
24
benchmarks/noop/soilent_step_pqueue.py
Normal file
24
benchmarks/noop/soilent_step_pqueue.py
Normal file
@@ -0,0 +1,24 @@
|
||||
from soil import BaseAgent, Environment, Simulation
|
||||
from soil.time import SoilentPQueueActivation
|
||||
|
||||
|
||||
class NoopAgent(BaseAgent):
|
||||
num_calls = 0
|
||||
|
||||
def step(self):
|
||||
self.num_calls += 1
|
||||
|
||||
class NoopEnvironment(Environment):
|
||||
num_agents = 100
|
||||
schedule_class = SoilentPQueueActivation
|
||||
|
||||
def init(self):
|
||||
self.add_agents(NoopAgent, k=self.num_agents)
|
||||
self.add_agent_reporter("num_calls")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from _config import *
|
||||
res = run_sim(model=NoopEnvironment)
|
||||
for r in res:
|
||||
assert isinstance(r.schedule, SoilentPqueueActivation)
|
19
benchmarks/run.py
Executable file
19
benchmarks/run.py
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/bin/env python
|
||||
import sys
|
||||
import os
|
||||
import subprocess
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(
|
||||
prog='Profiler for soil')
|
||||
parser.add_argument('--suffix', default=None)
|
||||
parser.add_argument('files', nargs="+")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
for fname in args.files:
|
||||
suffix = ("_" + args.suffix) if args.suffix else ""
|
||||
simname = f"{fname.replace('/', '-')}{suffix}"
|
||||
profpath = os.path.join("profs", simname + ".prof")
|
||||
|
||||
print(f"Running {fname} and saving profile to {profpath}")
|
||||
subprocess.call(["python", "-m", "cProfile", "-o", profpath, fname])
|
4
benchmarks/virusonnetwork.csv
Normal file
4
benchmarks/virusonnetwork.csv
Normal file
@@ -0,0 +1,4 @@
|
||||
command,mean,stddev,median,user,system,min,max,parameter_sim
|
||||
python virusonnetwork/mesa_basic.py,3.8381473157,0.0518143371442526,3.8475315791,3.873109219999999,0.55102658,3.7523016936,3.9095182436,mesa_basic.py
|
||||
python virusonnetwork/soil_step.py,3.2167258977000004,0.02337131987357665,3.2257620261,3.28374132,0.51343958,3.1792271306,3.2511521286000002,soil_step.py
|
||||
python virusonnetwork/soil_states.py,3.4908183217,0.03726734070349347,3.4912775086,3.5684004200000006,0.50416068,3.4272087936,3.5529207346000002,soil_states.py
|
|
38
benchmarks/virusonnetwork/_config.py
Normal file
38
benchmarks/virusonnetwork/_config.py
Normal file
@@ -0,0 +1,38 @@
|
||||
import os
|
||||
from soil import simulation
|
||||
|
||||
NUM_AGENTS = int(os.environ.get('NUM_AGENTS', 100))
|
||||
NUM_ITERS = int(os.environ.get('NUM_ITERS', 10))
|
||||
MAX_STEPS = int(os.environ.get('MAX_STEPS', 500))
|
||||
|
||||
|
||||
def run_sim(model, **kwargs):
|
||||
from soil import Simulation
|
||||
opts = dict(model=model,
|
||||
dump=False,
|
||||
num_processes=1,
|
||||
parameters={'num_nodes': NUM_AGENTS,
|
||||
"avg_node_degree": 3,
|
||||
"initial_outbreak_size": 5,
|
||||
"virus_spread_chance": 0.25,
|
||||
"virus_check_frequency": 0.25,
|
||||
"recovery_chance": 0.3,
|
||||
"gain_resistance_chance": 0.1,
|
||||
},
|
||||
max_steps=MAX_STEPS,
|
||||
iterations=NUM_ITERS)
|
||||
opts.update(kwargs)
|
||||
its = Simulation(**opts).run()
|
||||
assert len(its) == NUM_ITERS
|
||||
|
||||
if not simulation._AVOID_RUNNING:
|
||||
ratios = list(it.resistant_susceptible_ratio for it in its)
|
||||
print("Max - Avg - Min ratio:", max(ratios), sum(ratios)/len(ratios), min(ratios))
|
||||
infected = list(it.number_infected for it in its)
|
||||
print("Max - Avg - Min infected:", max(infected), sum(infected)/len(infected), min(infected))
|
||||
|
||||
assert all((it.schedule.steps == MAX_STEPS or it.number_infected == 0) for it in its)
|
||||
assert all(sum([it.number_susceptible,
|
||||
it.number_infected,
|
||||
it.number_resistant]) == NUM_AGENTS for it in its)
|
||||
return its
|
180
benchmarks/virusonnetwork/mesa_basic.py
Normal file
180
benchmarks/virusonnetwork/mesa_basic.py
Normal file
@@ -0,0 +1,180 @@
|
||||
# Verbatim copy from mesa
|
||||
# https://github.com/projectmesa/mesa/blob/976ddfc8a1e5feaaf8007a7abaa9abc7093881a0/examples/virus_on_network/virus_on_network/model.py
|
||||
import math
|
||||
from enum import Enum
|
||||
import networkx as nx
|
||||
|
||||
import mesa
|
||||
|
||||
|
||||
class State(Enum):
|
||||
SUSCEPTIBLE = 0
|
||||
INFECTED = 1
|
||||
RESISTANT = 2
|
||||
|
||||
|
||||
def number_state(model, state):
|
||||
return sum(1 for a in model.grid.get_all_cell_contents() if a.state is state)
|
||||
|
||||
|
||||
def number_infected(model):
|
||||
return number_state(model, State.INFECTED)
|
||||
|
||||
|
||||
def number_susceptible(model):
|
||||
return number_state(model, State.SUSCEPTIBLE)
|
||||
|
||||
|
||||
def number_resistant(model):
|
||||
return number_state(model, State.RESISTANT)
|
||||
|
||||
|
||||
class VirusOnNetwork(mesa.Model):
|
||||
"""A virus model with some number of agents"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*args,
|
||||
num_nodes=10,
|
||||
avg_node_degree=3,
|
||||
initial_outbreak_size=1,
|
||||
virus_spread_chance=0.4,
|
||||
virus_check_frequency=0.4,
|
||||
recovery_chance=0.3,
|
||||
gain_resistance_chance=0.5,
|
||||
**kwargs,
|
||||
):
|
||||
|
||||
self.num_nodes = num_nodes
|
||||
prob = avg_node_degree / self.num_nodes
|
||||
self.G = nx.erdos_renyi_graph(n=self.num_nodes, p=prob)
|
||||
self.grid = mesa.space.NetworkGrid(self.G)
|
||||
self.schedule = mesa.time.RandomActivation(self)
|
||||
self.initial_outbreak_size = (
|
||||
initial_outbreak_size if initial_outbreak_size <= num_nodes else num_nodes
|
||||
)
|
||||
self.virus_spread_chance = virus_spread_chance
|
||||
self.virus_check_frequency = virus_check_frequency
|
||||
self.recovery_chance = recovery_chance
|
||||
self.gain_resistance_chance = gain_resistance_chance
|
||||
|
||||
self.datacollector = mesa.DataCollector(
|
||||
{
|
||||
"Ratio": "resistant_susceptible_ratio",
|
||||
"Infected": number_infected,
|
||||
"Susceptible": number_susceptible,
|
||||
"Resistant": number_resistant,
|
||||
}
|
||||
)
|
||||
|
||||
# Create agents
|
||||
for i, node in enumerate(self.G.nodes()):
|
||||
a = VirusAgent(
|
||||
i,
|
||||
self,
|
||||
State.SUSCEPTIBLE,
|
||||
self.virus_spread_chance,
|
||||
self.virus_check_frequency,
|
||||
self.recovery_chance,
|
||||
self.gain_resistance_chance,
|
||||
)
|
||||
self.schedule.add(a)
|
||||
# Add the agent to the node
|
||||
self.grid.place_agent(a, node)
|
||||
|
||||
# Infect some nodes
|
||||
infected_nodes = self.random.sample(list(self.G), self.initial_outbreak_size)
|
||||
for a in self.grid.get_cell_list_contents(infected_nodes):
|
||||
a.state = State.INFECTED
|
||||
|
||||
self.running = True
|
||||
self.datacollector.collect(self)
|
||||
|
||||
@property
|
||||
def number_susceptible(self):
|
||||
return number_susceptible(self)
|
||||
@property
|
||||
def number_resistant(self):
|
||||
return number_resistant(self)
|
||||
@property
|
||||
def number_infected(self):
|
||||
return number_infected(self)
|
||||
|
||||
@property
|
||||
def resistant_susceptible_ratio(self):
|
||||
try:
|
||||
return number_state(self, State.RESISTANT) / number_state(
|
||||
self, State.SUSCEPTIBLE
|
||||
)
|
||||
except ZeroDivisionError:
|
||||
return math.inf
|
||||
|
||||
def step(self):
|
||||
self.schedule.step()
|
||||
# collect data
|
||||
self.datacollector.collect(self)
|
||||
|
||||
def run_model(self, n):
|
||||
for i in range(n):
|
||||
self.step()
|
||||
|
||||
|
||||
class VirusAgent(mesa.Agent):
|
||||
def __init__(
|
||||
self,
|
||||
unique_id,
|
||||
model,
|
||||
initial_state,
|
||||
virus_spread_chance,
|
||||
virus_check_frequency,
|
||||
recovery_chance,
|
||||
gain_resistance_chance,
|
||||
):
|
||||
super().__init__(unique_id, model)
|
||||
|
||||
self.state = initial_state
|
||||
|
||||
self.virus_spread_chance = virus_spread_chance
|
||||
self.virus_check_frequency = virus_check_frequency
|
||||
self.recovery_chance = recovery_chance
|
||||
self.gain_resistance_chance = gain_resistance_chance
|
||||
|
||||
def try_to_infect_neighbors(self):
|
||||
neighbors_nodes = self.model.grid.get_neighbors(self.pos, include_center=False)
|
||||
susceptible_neighbors = [
|
||||
agent
|
||||
for agent in self.model.grid.get_cell_list_contents(neighbors_nodes)
|
||||
if agent.state is State.SUSCEPTIBLE
|
||||
]
|
||||
for a in susceptible_neighbors:
|
||||
if self.random.random() < self.virus_spread_chance:
|
||||
a.state = State.INFECTED
|
||||
|
||||
def try_gain_resistance(self):
|
||||
if self.random.random() < self.gain_resistance_chance:
|
||||
self.state = State.RESISTANT
|
||||
|
||||
def try_remove_infection(self):
|
||||
# Try to remove
|
||||
if self.random.random() < self.recovery_chance:
|
||||
# Success
|
||||
self.state = State.SUSCEPTIBLE
|
||||
self.try_gain_resistance()
|
||||
else:
|
||||
# Failed
|
||||
self.state = State.INFECTED
|
||||
|
||||
def try_check_situation(self):
|
||||
if self.random.random() < self.virus_check_frequency:
|
||||
# Checking...
|
||||
if self.state is State.INFECTED:
|
||||
self.try_remove_infection()
|
||||
|
||||
def step(self):
|
||||
if self.state is State.INFECTED:
|
||||
self.try_to_infect_neighbors()
|
||||
self.try_check_situation()
|
||||
|
||||
|
||||
from _config import run_sim
|
||||
run_sim(model=VirusOnNetwork)
|
91
benchmarks/virusonnetwork/soil_states.py
Normal file
91
benchmarks/virusonnetwork/soil_states.py
Normal file
@@ -0,0 +1,91 @@
|
||||
# Verbatim copy from mesa
|
||||
# https://github.com/projectmesa/mesa/blob/976ddfc8a1e5feaaf8007a7abaa9abc7093881a0/examples/virus_on_network/virus_on_network/model.py
|
||||
import math
|
||||
from enum import Enum
|
||||
import networkx as nx
|
||||
|
||||
from soil import *
|
||||
|
||||
|
||||
class VirusOnNetwork(Environment):
|
||||
"""A virus model with some number of agents"""
|
||||
num_nodes = 10
|
||||
avg_node_degree = 3
|
||||
initial_outbreak_size = 1
|
||||
virus_spread_chance = 0.4
|
||||
virus_check_frequency = 0.4
|
||||
recovery_chance = 0
|
||||
gain_resistance_chance = 0
|
||||
|
||||
def init(self):
|
||||
prob = self.avg_node_degree / self.num_nodes
|
||||
# Use internal seed with the networkx generator
|
||||
self.create_network(generator=nx.erdos_renyi_graph, n=self.num_nodes, p=prob)
|
||||
|
||||
self.initial_outbreak_size = min(self.initial_outbreak_size, self.num_nodes)
|
||||
self.populate_network(VirusAgent)
|
||||
|
||||
# Infect some nodes
|
||||
infected_nodes = self.random.sample(list(self.G), self.initial_outbreak_size)
|
||||
for a in self.agents(node_id=infected_nodes):
|
||||
a.set_state(VirusAgent.infected)
|
||||
assert self.number_infected == self.initial_outbreak_size
|
||||
|
||||
def step(self):
|
||||
super().step()
|
||||
|
||||
@report
|
||||
@property
|
||||
def resistant_susceptible_ratio(self):
|
||||
try:
|
||||
return self.number_resistant / self.number_susceptible
|
||||
except ZeroDivisionError:
|
||||
return math.inf
|
||||
|
||||
@report
|
||||
@property
|
||||
def number_infected(self):
|
||||
return self.count_agents(state_id=VirusAgent.infected.id)
|
||||
|
||||
@report
|
||||
@property
|
||||
def number_susceptible(self):
|
||||
return self.count_agents(state_id=VirusAgent.susceptible.id)
|
||||
|
||||
@report
|
||||
@property
|
||||
def number_resistant(self):
|
||||
return self.count_agents(state_id=VirusAgent.resistant.id)
|
||||
|
||||
|
||||
class VirusAgent(Agent):
|
||||
virus_spread_chance = None # Inherit from model
|
||||
virus_check_frequency = None # Inherit from model
|
||||
recovery_chance = None # Inherit from model
|
||||
gain_resistance_chance = None # Inherit from model
|
||||
|
||||
@state(default=True)
|
||||
async def susceptible(self):
|
||||
await self.received()
|
||||
return self.infected
|
||||
|
||||
@state
|
||||
def infected(self):
|
||||
susceptible_neighbors = self.get_neighbors(state_id=self.susceptible.id)
|
||||
for a in susceptible_neighbors:
|
||||
if self.prob(self.virus_spread_chance):
|
||||
a.tell(True, sender=self)
|
||||
if self.prob(self.virus_check_frequency):
|
||||
if self.prob(self.recovery_chance):
|
||||
if self.prob(self.gain_resistance_chance):
|
||||
return self.resistant
|
||||
else:
|
||||
return self.susceptible
|
||||
|
||||
@state
|
||||
def resistant(self):
|
||||
return self.at(INFINITY)
|
||||
|
||||
|
||||
from _config import run_sim
|
||||
run_sim(model=VirusOnNetwork)
|
104
benchmarks/virusonnetwork/soil_step.py
Normal file
104
benchmarks/virusonnetwork/soil_step.py
Normal file
@@ -0,0 +1,104 @@
|
||||
# Verbatim copy from mesa
|
||||
# https://github.com/projectmesa/mesa/blob/976ddfc8a1e5feaaf8007a7abaa9abc7093881a0/examples/virus_on_network/virus_on_network/model.py
|
||||
import math
|
||||
from enum import Enum
|
||||
import networkx as nx
|
||||
|
||||
from soil import *
|
||||
|
||||
|
||||
class State(Enum):
|
||||
SUSCEPTIBLE = 0
|
||||
INFECTED = 1
|
||||
RESISTANT = 2
|
||||
|
||||
|
||||
class VirusOnNetwork(Environment):
|
||||
"""A virus model with some number of agents"""
|
||||
num_nodes = 10
|
||||
avg_node_degree = 3
|
||||
initial_outbreak_size = 1
|
||||
virus_spread_chance = 0.4
|
||||
virus_check_frequency = 0.4
|
||||
recovery_chance = 0
|
||||
gain_resistance_chance = 0
|
||||
|
||||
def init(self):
|
||||
prob = self.avg_node_degree / self.num_nodes
|
||||
# Use internal seed with the networkx generator
|
||||
self.create_network(generator=nx.erdos_renyi_graph, n=self.num_nodes, p=prob)
|
||||
|
||||
self.initial_outbreak_size = min(self.initial_outbreak_size, self.num_nodes)
|
||||
self.populate_network(VirusAgent)
|
||||
|
||||
# Infect some nodes
|
||||
infected_nodes = self.random.sample(list(self.G), self.initial_outbreak_size)
|
||||
for a in self.agents(node_id=infected_nodes):
|
||||
a.status = State.INFECTED
|
||||
assert self.number_infected == self.initial_outbreak_size
|
||||
|
||||
@report
|
||||
@property
|
||||
def resistant_susceptible_ratio(self):
|
||||
try:
|
||||
return self.number_resistant / self.number_susceptible
|
||||
except ZeroDivisionError:
|
||||
return math.inf
|
||||
|
||||
@report
|
||||
@property
|
||||
def number_infected(self):
|
||||
return self.count_agents(status=State.INFECTED)
|
||||
|
||||
@report
|
||||
@property
|
||||
def number_susceptible(self):
|
||||
return self.count_agents(status=State.SUSCEPTIBLE)
|
||||
|
||||
@report
|
||||
@property
|
||||
def number_resistant(self):
|
||||
return self.count_agents(status=State.RESISTANT)
|
||||
|
||||
|
||||
|
||||
class VirusAgent(Agent):
|
||||
status = State.SUSCEPTIBLE
|
||||
virus_spread_chance = None # Inherit from model
|
||||
virus_check_frequency = None # Inherit from model
|
||||
recovery_chance = None # Inherit from model
|
||||
gain_resistance_chance = None # Inherit from model
|
||||
|
||||
def try_to_infect_neighbors(self):
|
||||
susceptible_neighbors = self.get_neighbors(status=State.SUSCEPTIBLE)
|
||||
for a in susceptible_neighbors:
|
||||
if self.prob(self.virus_spread_chance):
|
||||
a.status = State.INFECTED
|
||||
|
||||
def try_gain_resistance(self):
|
||||
if self.prob(self.gain_resistance_chance):
|
||||
self.status = State.RESISTANT
|
||||
return self.at(INFINITY)
|
||||
|
||||
def try_remove_infection(self):
|
||||
# Try to remove
|
||||
if self.prob(self.recovery_chance):
|
||||
# Success
|
||||
self.status = State.SUSCEPTIBLE
|
||||
return self.try_gain_resistance()
|
||||
|
||||
def try_check_situation(self):
|
||||
if self.prob(self.virus_check_frequency):
|
||||
# Checking...
|
||||
if self.status is State.INFECTED:
|
||||
return self.try_remove_infection()
|
||||
|
||||
def step(self):
|
||||
if self.status is State.INFECTED:
|
||||
self.try_to_infect_neighbors()
|
||||
return self.try_check_situation()
|
||||
|
||||
|
||||
|
||||
from _config import run_sim
|
||||
run_sim(model=VirusOnNetwork)
|
10
docs/conf.py
10
docs/conf.py
@@ -31,7 +31,10 @@
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = ['IPython.sphinxext.ipython_console_highlighting']
|
||||
extensions = [
|
||||
"IPython.sphinxext.ipython_console_highlighting",
|
||||
"nbsphinx",
|
||||
]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
@@ -64,7 +67,7 @@ release = '0.1'
|
||||
#
|
||||
# This is also used if you do content translation via gettext catalogs.
|
||||
# Usually you set "language" from the command line for these cases.
|
||||
language = None
|
||||
language = "en"
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
@@ -152,6 +155,3 @@ texinfo_documents = [
|
||||
author, 'Soil', 'One line description of project.',
|
||||
'Miscellaneous'),
|
||||
]
|
||||
|
||||
|
||||
|
||||
|
@@ -1,40 +0,0 @@
|
||||
---
|
||||
name: MyExampleSimulation
|
||||
max_time: 50
|
||||
num_trials: 3
|
||||
interval: 2
|
||||
model_params:
|
||||
topology:
|
||||
params:
|
||||
generator: barabasi_albert_graph
|
||||
n: 100
|
||||
m: 2
|
||||
agents:
|
||||
distribution:
|
||||
- agent_class: SISaModel
|
||||
topology: True
|
||||
ratio: 0.1
|
||||
state:
|
||||
state_id: content
|
||||
- agent_class: SISaModel
|
||||
topology: True
|
||||
ratio: .1
|
||||
state:
|
||||
state_id: discontent
|
||||
- agent_class: SISaModel
|
||||
topology: True
|
||||
ratio: 0.8
|
||||
state:
|
||||
state_id: neutral
|
||||
prob_infect: 0.075
|
||||
neutral_discontent_spon_prob: 0.1
|
||||
neutral_discontent_infected_prob: 0.3
|
||||
neutral_content_spon_prob: 0.3
|
||||
neutral_content_infected_prob: 0.4
|
||||
discontent_neutral: 0.5
|
||||
discontent_content: 0.5
|
||||
variance_d_c: 0.2
|
||||
content_discontent: 0.2
|
||||
variance_c_d: 0.2
|
||||
content_neutral: 0.2
|
||||
standard_variance: 1
|
@@ -2,19 +2,20 @@ Welcome to Soil's documentation!
|
||||
================================
|
||||
|
||||
Soil is an opinionated Agent-based Social Simulator in Python focused on Social Networks.
|
||||
To get started developing your own simulations and agent behaviors, check out our :doc:`Tutorial <tutorial/soil_tutorial>` and the `examples on GitHub <https://github.com/gsi-upm/soil/tree/master/examples>`.
|
||||
|
||||
Soil can be installed through pip (see more details in the :doc:`installation` page):.
|
||||
|
||||
.. image:: soil.png
|
||||
:width: 80%
|
||||
:align: center
|
||||
|
||||
Soil can be installed through pip (see more details in the :doc:`installation` page):
|
||||
|
||||
.. code:: bash
|
||||
|
||||
pip install soil
|
||||
|
||||
|
||||
To get started developing your own simulations and agent behaviors, check out our :doc:`Tutorial <soil_tutorial>` and the `examples on GitHub <https://github.com/gsi-upm/soil/tree/master/examples>.
|
||||
|
||||
If you use Soil in your research, do not forget to cite this paper:
|
||||
|
||||
@@ -46,7 +47,9 @@ If you use Soil in your research, do not forget to cite this paper:
|
||||
:caption: Learn more about soil:
|
||||
|
||||
installation
|
||||
Tutorial <soil_tutorial>
|
||||
Tutorial <tutorial/soil_tutorial>
|
||||
notes_v1.0
|
||||
soil-vs
|
||||
|
||||
..
|
||||
|
||||
|
@@ -1,22 +0,0 @@
|
||||
Mesa compatibility
|
||||
------------------
|
||||
|
||||
Soil is in the process of becoming fully compatible with MESA.
|
||||
The idea is to provide a set of modular classes and functions that extend the functionality of mesa, whilst staying compatible.
|
||||
In the end, it should be possible to add regular mesa agents to a soil simulation, or use a soil agent within a mesa simulation/model.
|
||||
|
||||
This is a non-exhaustive list of tasks to achieve compatibility:
|
||||
|
||||
- [ ] Integrate `soil.Simulation` with mesa's runners:
|
||||
- [ ] `soil.Simulation` could mimic/become a `mesa.batchrunner`
|
||||
- [ ] Integrate `soil.Environment` with `mesa.Model`:
|
||||
- [x] `Soil.Environment` inherits from `mesa.Model`
|
||||
- [x] `Soil.Environment` includes a Mesa-like Scheduler (see the `soil.time` module.
|
||||
- [ ] Allow for `mesa.Model` to be used in a simulation.
|
||||
- [ ] Integrate `soil.Agent` with `mesa.Agent`:
|
||||
- [x] Rename agent.id to unique_id?
|
||||
- [x] mesa agents can be used in soil simulations (see `examples/mesa`)
|
||||
- [ ] Provide examples
|
||||
- [ ] Using mesa modules in a soil simulation
|
||||
- [ ] Using soil modules in a mesa simulation
|
||||
- [ ] Document the new APIs and usage
|
@@ -1,3 +1,6 @@
|
||||
Upgrading to Soil 1.0
|
||||
---------------------
|
||||
|
||||
What are the main changes in version 1.0?
|
||||
#########################################
|
||||
|
||||
|
Binary file not shown.
Before Width: | Height: | Size: 4.7 KiB |
Binary file not shown.
Before Width: | Height: | Size: 11 KiB |
Binary file not shown.
Before Width: | Height: | Size: 26 KiB |
Binary file not shown.
Before Width: | Height: | Size: 31 KiB |
@@ -1 +1,2 @@
|
||||
ipython>=7.31.1
|
||||
nbsphinx==0.9.1
|
||||
|
@@ -1,4 +1,8 @@
|
||||
### MESA
|
||||
Soil vs other ABM frameworks
|
||||
============================
|
||||
|
||||
MESA
|
||||
----
|
||||
|
||||
Starting with version 0.3, Soil has been redesigned to complement Mesa, while remaining compatible with it.
|
||||
That means that every component in Soil (i.e., Models, Environments, etc.) can be mixed with existing mesa components.
|
||||
@@ -10,3 +14,42 @@ Here are some reasons to use Soil instead of plain mesa:
|
||||
- Functions to automatically populate a topology with an agent distribution (i.e., different ratios of agent class and state)
|
||||
- The `soil.Simulation` class allows you to run multiple instances of the same experiment (i.e., multiple trials with the same parameters but a different randomness seed)
|
||||
- Reporting functions that aggregate multiple
|
||||
|
||||
Mesa compatibility
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Soil is in the process of becoming fully compatible with MESA.
|
||||
The idea is to provide a set of modular classes and functions that extend the functionality of mesa, whilst staying compatible.
|
||||
In the end, it should be possible to add regular mesa agents to a soil simulation, or use a soil agent within a mesa simulation/model.
|
||||
|
||||
This is a non-exhaustive list of tasks to achieve compatibility:
|
||||
|
||||
.. |check| raw:: html
|
||||
|
||||
☑
|
||||
|
||||
.. |uncheck| raw:: html
|
||||
|
||||
☐
|
||||
|
||||
- |check| Integrate `soil.Simulation` with mesa's runners:
|
||||
|
||||
- |check| `soil.Simulation` can replace `mesa.batchrunner`
|
||||
|
||||
- |check| Integrate `soil.Environment` with `mesa.Model`:
|
||||
|
||||
- |check| `Soil.Environment` inherits from `mesa.Model`
|
||||
- |check| `Soil.Environment` includes a Mesa-like Scheduler (see the `soil.time` module.
|
||||
- |check| Allow for `mesa.Model` to be used in a simulation.
|
||||
|
||||
- |check| Integrate `soil.Agent` with `mesa.Agent`:
|
||||
|
||||
- |check| Rename agent.id to unique_id
|
||||
- |check| mesa agents can be used in soil simulations (see `examples/mesa`)
|
||||
|
||||
- |check| Provide examples
|
||||
|
||||
- |check| Using mesa modules in a soil simulation (see `examples/mesa`)
|
||||
- |uncheck| Using soil modules in a mesa simulation (see `examples/mesa`)
|
||||
|
||||
- |uncheck| Document the new APIs and usage
|
File diff suppressed because it is too large
Load Diff
2277
docs/tutorial/soil_tutorial.ipynb
Normal file
2277
docs/tutorial/soil_tutorial.ipynb
Normal file
File diff suppressed because one or more lines are too long
1
examples/README.md
Normal file
1
examples/README.md
Normal file
@@ -0,0 +1 @@
|
||||
Some of these examples are close to real life simulations, whereas some others are only a demonstration of Soil's capatibilities.
|
@@ -43,7 +43,7 @@ class Journey:
|
||||
driver: Optional[Driver] = None
|
||||
|
||||
|
||||
class City(EventedEnvironment):
|
||||
class City(Environment):
|
||||
"""
|
||||
An environment with a grid where drivers and passengers will be placed.
|
||||
|
||||
@@ -85,40 +85,41 @@ class Driver(Evented, FSM):
|
||||
journey = None
|
||||
earnings = 0
|
||||
|
||||
def on_receive(self, msg, sender):
|
||||
"""This is not a state. It will run (and block) every time check_messages is invoked"""
|
||||
if self.journey is None and isinstance(msg, Journey) and msg.driver is None:
|
||||
msg.driver = self
|
||||
self.journey = msg
|
||||
# TODO: remove
|
||||
# def on_receive(self, msg, sender):
|
||||
# """This is not a state. It will run (and block) every time process_messages is invoked"""
|
||||
# if self.journey is None and isinstance(msg, Journey) and msg.driver is None:
|
||||
# msg.driver = self
|
||||
# self.journey = msg
|
||||
|
||||
def check_passengers(self):
|
||||
"""If there are no more passengers, stop forever"""
|
||||
c = self.count_agents(agent_class=Passenger)
|
||||
self.debug(f"Passengers left {c}")
|
||||
if not c:
|
||||
self.die("No more passengers")
|
||||
return c
|
||||
|
||||
@default_state
|
||||
@state
|
||||
def wandering(self):
|
||||
@state(default=True)
|
||||
async def wandering(self):
|
||||
"""Move around the city until a journey is accepted"""
|
||||
target = None
|
||||
self.check_passengers()
|
||||
if not self.check_passengers():
|
||||
return self.die("No passengers left")
|
||||
self.journey = None
|
||||
while self.journey is None: # No potential journeys detected (see on_receive)
|
||||
while self.journey is None: # No potential journeys detected
|
||||
if target is None or not self.move_towards(target):
|
||||
target = self.random.choice(
|
||||
self.model.grid.get_neighborhood(self.pos, moore=False)
|
||||
)
|
||||
|
||||
self.check_passengers()
|
||||
if not self.check_passengers():
|
||||
return self.die("No passengers left")
|
||||
# This will call on_receive behind the scenes, and the agent's status will be updated
|
||||
self.check_messages()
|
||||
yield Delta(30) # Wait at least 30 seconds before checking again
|
||||
|
||||
await self.delay(30) # Wait at least 30 seconds before checking again
|
||||
|
||||
try:
|
||||
# Re-send the journey to the passenger, to confirm that we have been selected
|
||||
self.journey = yield self.journey.passenger.ask(self.journey, timeout=60)
|
||||
self.journey = await self.journey.passenger.ask(self.journey, timeout=60, delay=5)
|
||||
except events.TimedOut:
|
||||
# No journey has been accepted. Try again
|
||||
self.journey = None
|
||||
@@ -127,18 +128,19 @@ class Driver(Evented, FSM):
|
||||
return self.driving
|
||||
|
||||
@state
|
||||
def driving(self):
|
||||
async def driving(self):
|
||||
"""The journey has been accepted. Pick them up and take them to their destination"""
|
||||
self.info(f"Driving towards Passenger {self.journey.passenger.unique_id}")
|
||||
while self.move_towards(self.journey.origin):
|
||||
yield
|
||||
await self.delay()
|
||||
self.info(f"Driving {self.journey.passenger.unique_id} from {self.journey.origin} to {self.journey.destination}")
|
||||
while self.move_towards(self.journey.destination, with_passenger=True):
|
||||
yield
|
||||
await self.delay()
|
||||
self.info("Arrived at destination")
|
||||
self.earnings += self.journey.tip
|
||||
self.model.total_earnings += self.journey.tip
|
||||
self.check_passengers()
|
||||
if not self.check_passengers():
|
||||
return self.die("No passengers left")
|
||||
return self.wandering
|
||||
|
||||
def move_towards(self, target, with_passenger=False):
|
||||
@@ -166,16 +168,17 @@ class Driver(Evented, FSM):
|
||||
class Passenger(Evented, FSM):
|
||||
pos = None
|
||||
|
||||
def on_receive(self, msg, sender):
|
||||
"""This is not a state. It will be run synchronously every time `check_messages` is run"""
|
||||
# TODO: Remove
|
||||
# def on_receive(self, msg, sender):
|
||||
# """This is not a state. It will be run synchronously every time `process_messages` is run"""
|
||||
|
||||
if isinstance(msg, Journey):
|
||||
self.journey = msg
|
||||
return msg
|
||||
# if isinstance(msg, Journey):
|
||||
# self.journey = msg
|
||||
# return msg
|
||||
|
||||
@default_state
|
||||
@state
|
||||
def asking(self):
|
||||
async def asking(self):
|
||||
destination = (
|
||||
self.random.randint(0, self.model.grid.height-1),
|
||||
self.random.randint(0, self.model.grid.width-1),
|
||||
@@ -191,30 +194,47 @@ class Passenger(Evented, FSM):
|
||||
timeout = 60
|
||||
expiration = self.now + timeout
|
||||
self.info(f"Asking for journey at: { self.pos }")
|
||||
self.model.broadcast(journey, ttl=timeout, sender=self, agent_class=Driver)
|
||||
self.broadcast(journey, ttl=timeout, agent_class=Driver)
|
||||
while not self.journey:
|
||||
self.debug(f"Waiting for responses at: { self.pos }")
|
||||
try:
|
||||
# This will call check_messages behind the scenes, and the agent's status will be updated
|
||||
# If you want to avoid that, you can call it with: check=False
|
||||
yield self.received(expiration=expiration)
|
||||
offers = await self.received(expiration=expiration, delay=10)
|
||||
accepted = None
|
||||
for event in offers:
|
||||
offer = event.payload
|
||||
if isinstance(offer, Journey):
|
||||
self.journey = offer
|
||||
assert isinstance(event.sender, Driver)
|
||||
try:
|
||||
answer = await event.sender.ask(True, sender=self, timeout=60, delay=5)
|
||||
if answer:
|
||||
accepted = offer
|
||||
self.journey = offer
|
||||
break
|
||||
except events.TimedOut:
|
||||
pass
|
||||
if accepted:
|
||||
for event in offers:
|
||||
if event.payload != accepted:
|
||||
event.sender.tell(False, timeout=60, delay=5)
|
||||
|
||||
except events.TimedOut:
|
||||
self.info(f"Still no response. Waiting at: { self.pos }")
|
||||
self.model.broadcast(
|
||||
journey, ttl=timeout, sender=self, agent_class=Driver
|
||||
self.broadcast(
|
||||
journey, ttl=timeout, agent_class=Driver
|
||||
)
|
||||
expiration = self.now + timeout
|
||||
self.info(f"Got a response! Waiting for driver")
|
||||
return self.driving_home
|
||||
|
||||
@state
|
||||
def driving_home(self):
|
||||
async def driving_home(self):
|
||||
while (
|
||||
self.pos[0] != self.journey.destination[0]
|
||||
or self.pos[1] != self.journey.destination[1]
|
||||
):
|
||||
try:
|
||||
yield self.received(timeout=60)
|
||||
await self.received(timeout=60)
|
||||
except events.TimedOut:
|
||||
pass
|
||||
|
||||
@@ -228,4 +248,4 @@ simulation = Simulation(name="RideHailing",
|
||||
parameters=dict(n_passengers=2))
|
||||
|
||||
if __name__ == "__main__":
|
||||
easy(simulation)
|
||||
easy(simulation)
|
@@ -1,5 +1,4 @@
|
||||
from soil.agents import FSM, state, default_state
|
||||
from soil.time import Delta
|
||||
|
||||
|
||||
class Fibonacci(FSM):
|
||||
@@ -11,17 +10,17 @@ class Fibonacci(FSM):
|
||||
def counting(self):
|
||||
self.log("Stopping at {}".format(self.now))
|
||||
prev, self["prev"] = self["prev"], max([self.now, self["prev"]])
|
||||
return None, Delta(prev)
|
||||
return self.delay(prev)
|
||||
|
||||
|
||||
|
||||
class Odds(FSM):
|
||||
"""Agent that only executes in odd t_steps"""
|
||||
|
||||
@default_state
|
||||
@state
|
||||
@state(default=True)
|
||||
def odds(self):
|
||||
self.log("Stopping at {}".format(self.now))
|
||||
return None, Delta(1 + self.now % 2)
|
||||
return self.delay(1 + (self.now % 2))
|
||||
|
||||
|
||||
from soil import Environment, Simulation
|
||||
@@ -35,7 +34,7 @@ class TimeoutsEnv(Environment):
|
||||
self.add_agent(agent_class=Odds, node_id=1)
|
||||
|
||||
|
||||
sim = Simulation(model=TimeoutsEnv, max_steps=10, interval=1)
|
||||
sim = Simulation(model=TimeoutsEnv, max_steps=10)
|
||||
|
||||
if __name__ == "__main__":
|
||||
sim.run(dump=False)
|
||||
sim.run(dump=False)
|
@@ -33,7 +33,7 @@ class GeneratorEnv(Environment):
|
||||
self.add_agents(CounterModel)
|
||||
|
||||
|
||||
sim = Simulation(model=GeneratorEnv, max_steps=10, interval=1)
|
||||
sim = Simulation(model=GeneratorEnv, max_steps=10)
|
||||
|
||||
if __name__ == '__main__':
|
||||
sim.run(dump=False)
|
@@ -1,7 +1,7 @@
|
||||
from soil import Simulation
|
||||
from social_wealth import MoneyEnv, graph_generator
|
||||
|
||||
sim = Simulation(name="mesa_sim", dump=False, max_steps=10, interval=2, model=MoneyEnv, parameters=dict(generator=graph_generator, N=10, width=50, height=50))
|
||||
sim = Simulation(name="mesa_sim", dump=False, max_steps=10, model=MoneyEnv, parameters=dict(generator=graph_generator, N=10, width=50, height=50))
|
||||
|
||||
if __name__ == "__main__":
|
||||
sim.run()
|
||||
|
@@ -2,13 +2,12 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2017-11-08T16:22:30.732107Z",
|
||||
"start_time": "2017-11-08T17:22:30.059855+01:00"
|
||||
},
|
||||
"collapsed": true
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@@ -28,24 +27,16 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2017-11-08T16:22:35.580593Z",
|
||||
"start_time": "2017-11-08T17:22:35.542745+01:00"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Populating the interactive namespace from numpy and matplotlib\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pylab inline\n",
|
||||
"%matplotlib inline\n",
|
||||
"\n",
|
||||
"from soil import *"
|
||||
]
|
||||
@@ -66,7 +57,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2017-11-08T16:22:37.242327Z",
|
||||
@@ -86,7 +77,7 @@
|
||||
" prob_neighbor_spread: 0.0\r\n",
|
||||
" prob_tv_spread: 0.01\r\n",
|
||||
"interval: 1\r\n",
|
||||
"max_time: 30\r\n",
|
||||
"max_time: 300\r\n",
|
||||
"name: Sim_all_dumb\r\n",
|
||||
"network_agents:\r\n",
|
||||
"- agent_class: DumbViewer\r\n",
|
||||
@@ -110,7 +101,7 @@
|
||||
" prob_neighbor_spread: 0.0\r\n",
|
||||
" prob_tv_spread: 0.01\r\n",
|
||||
"interval: 1\r\n",
|
||||
"max_time: 30\r\n",
|
||||
"max_time: 300\r\n",
|
||||
"name: Sim_half_herd\r\n",
|
||||
"network_agents:\r\n",
|
||||
"- agent_class: DumbViewer\r\n",
|
||||
@@ -142,18 +133,18 @@
|
||||
" prob_neighbor_spread: 0.0\r\n",
|
||||
" prob_tv_spread: 0.01\r\n",
|
||||
"interval: 1\r\n",
|
||||
"max_time: 30\r\n",
|
||||
"max_time: 300\r\n",
|
||||
"name: Sim_all_herd\r\n",
|
||||
"network_agents:\r\n",
|
||||
"- agent_class: HerdViewer\r\n",
|
||||
" state:\r\n",
|
||||
" has_tv: true\r\n",
|
||||
" id: neutral\r\n",
|
||||
" state_id: neutral\r\n",
|
||||
" weight: 1\r\n",
|
||||
"- agent_class: HerdViewer\r\n",
|
||||
" state:\r\n",
|
||||
" has_tv: true\r\n",
|
||||
" id: neutral\r\n",
|
||||
" state_id: neutral\r\n",
|
||||
" weight: 1\r\n",
|
||||
"network_params:\r\n",
|
||||
" generator: barabasi_albert_graph\r\n",
|
||||
@@ -169,13 +160,13 @@
|
||||
" prob_tv_spread: 0.01\r\n",
|
||||
" prob_neighbor_cure: 0.1\r\n",
|
||||
"interval: 1\r\n",
|
||||
"max_time: 30\r\n",
|
||||
"max_time: 300\r\n",
|
||||
"name: Sim_wise_herd\r\n",
|
||||
"network_agents:\r\n",
|
||||
"- agent_class: HerdViewer\r\n",
|
||||
" state:\r\n",
|
||||
" has_tv: true\r\n",
|
||||
" id: neutral\r\n",
|
||||
" state_id: neutral\r\n",
|
||||
" weight: 1\r\n",
|
||||
"- agent_class: WiseViewer\r\n",
|
||||
" state:\r\n",
|
||||
@@ -195,13 +186,13 @@
|
||||
" prob_tv_spread: 0.01\r\n",
|
||||
" prob_neighbor_cure: 0.1\r\n",
|
||||
"interval: 1\r\n",
|
||||
"max_time: 30\r\n",
|
||||
"max_time: 300\r\n",
|
||||
"name: Sim_all_wise\r\n",
|
||||
"network_agents:\r\n",
|
||||
"- agent_class: WiseViewer\r\n",
|
||||
" state:\r\n",
|
||||
" has_tv: true\r\n",
|
||||
" id: neutral\r\n",
|
||||
" state_id: neutral\r\n",
|
||||
" weight: 1\r\n",
|
||||
"- agent_class: WiseViewer\r\n",
|
||||
" state:\r\n",
|
||||
@@ -225,7 +216,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2017-11-08T18:07:46.781745Z",
|
||||
@@ -233,7 +224,24 @@
|
||||
},
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "ValueError",
|
||||
"evalue": "No objects to concatenate",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
|
||||
"Cell \u001b[0;32mIn[4], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m evodumb \u001b[38;5;241m=\u001b[39m \u001b[43manalysis\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mread_data\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43msoil_output/Sim_all_dumb/\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mprocess\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43manalysis\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_count\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mgroup\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mkeys\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mid\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m;\n",
|
||||
"File \u001b[0;32m/mnt/data/home/j/git/lab.gsi/soil/soil/soil/analysis.py:14\u001b[0m, in \u001b[0;36mread_data\u001b[0;34m(group, *args, **kwargs)\u001b[0m\n\u001b[1;32m 12\u001b[0m iterable \u001b[38;5;241m=\u001b[39m _read_data(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m 13\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m group:\n\u001b[0;32m---> 14\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mgroup_trials\u001b[49m\u001b[43m(\u001b[49m\u001b[43miterable\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 15\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 16\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mlist\u001b[39m(iterable)\n",
|
||||
"File \u001b[0;32m/mnt/data/home/j/git/lab.gsi/soil/soil/soil/analysis.py:201\u001b[0m, in \u001b[0;36mgroup_trials\u001b[0;34m(trials, aggfunc)\u001b[0m\n\u001b[1;32m 199\u001b[0m trials \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mlist\u001b[39m(trials)\n\u001b[1;32m 200\u001b[0m trials \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mlist\u001b[39m(\u001b[38;5;28mmap\u001b[39m(\u001b[38;5;28;01mlambda\u001b[39;00m x: x[\u001b[38;5;241m1\u001b[39m] \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(x, \u001b[38;5;28mtuple\u001b[39m) \u001b[38;5;28;01melse\u001b[39;00m x, trials))\n\u001b[0;32m--> 201\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mpd\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mconcat\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtrials\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241m.\u001b[39mgroupby(level\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0\u001b[39m)\u001b[38;5;241m.\u001b[39magg(aggfunc)\u001b[38;5;241m.\u001b[39mreorder_levels([\u001b[38;5;241m2\u001b[39m, \u001b[38;5;241m0\u001b[39m,\u001b[38;5;241m1\u001b[39m] ,axis\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1\u001b[39m)\n",
|
||||
"File \u001b[0;32m/mnt/data/home/j/git/lab.gsi/soil/soil/.env-v0.20/lib/python3.8/site-packages/pandas/util/_decorators.py:331\u001b[0m, in \u001b[0;36mdeprecate_nonkeyword_arguments.<locals>.decorate.<locals>.wrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 325\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(args) \u001b[38;5;241m>\u001b[39m num_allow_args:\n\u001b[1;32m 326\u001b[0m warnings\u001b[38;5;241m.\u001b[39mwarn(\n\u001b[1;32m 327\u001b[0m msg\u001b[38;5;241m.\u001b[39mformat(arguments\u001b[38;5;241m=\u001b[39m_format_argument_list(allow_args)),\n\u001b[1;32m 328\u001b[0m \u001b[38;5;167;01mFutureWarning\u001b[39;00m,\n\u001b[1;32m 329\u001b[0m stacklevel\u001b[38;5;241m=\u001b[39mfind_stack_level(),\n\u001b[1;32m 330\u001b[0m )\n\u001b[0;32m--> 331\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
|
||||
"File \u001b[0;32m/mnt/data/home/j/git/lab.gsi/soil/soil/.env-v0.20/lib/python3.8/site-packages/pandas/core/reshape/concat.py:368\u001b[0m, in \u001b[0;36mconcat\u001b[0;34m(objs, axis, join, ignore_index, keys, levels, names, verify_integrity, sort, copy)\u001b[0m\n\u001b[1;32m 146\u001b[0m \u001b[38;5;129m@deprecate_nonkeyword_arguments\u001b[39m(version\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, allowed_args\u001b[38;5;241m=\u001b[39m[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mobjs\u001b[39m\u001b[38;5;124m\"\u001b[39m])\n\u001b[1;32m 147\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mconcat\u001b[39m(\n\u001b[1;32m 148\u001b[0m objs: Iterable[NDFrame] \u001b[38;5;241m|\u001b[39m Mapping[HashableT, NDFrame],\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 157\u001b[0m copy: \u001b[38;5;28mbool\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m,\n\u001b[1;32m 158\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m DataFrame \u001b[38;5;241m|\u001b[39m Series:\n\u001b[1;32m 159\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 160\u001b[0m \u001b[38;5;124;03m Concatenate pandas objects along a particular axis.\u001b[39;00m\n\u001b[1;32m 161\u001b[0m \n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 366\u001b[0m \u001b[38;5;124;03m 1 3 4\u001b[39;00m\n\u001b[1;32m 367\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m--> 368\u001b[0m op \u001b[38;5;241m=\u001b[39m \u001b[43m_Concatenator\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 369\u001b[0m \u001b[43m \u001b[49m\u001b[43mobjs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 370\u001b[0m \u001b[43m \u001b[49m\u001b[43maxis\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43maxis\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 371\u001b[0m \u001b[43m \u001b[49m\u001b[43mignore_index\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mignore_index\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 372\u001b[0m \u001b[43m \u001b[49m\u001b[43mjoin\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mjoin\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 373\u001b[0m \u001b[43m \u001b[49m\u001b[43mkeys\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mkeys\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 374\u001b[0m \u001b[43m \u001b[49m\u001b[43mlevels\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlevels\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 375\u001b[0m \u001b[43m \u001b[49m\u001b[43mnames\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mnames\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 376\u001b[0m \u001b[43m \u001b[49m\u001b[43mverify_integrity\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverify_integrity\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 377\u001b[0m \u001b[43m \u001b[49m\u001b[43mcopy\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcopy\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 378\u001b[0m \u001b[43m \u001b[49m\u001b[43msort\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43msort\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 379\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 381\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m op\u001b[38;5;241m.\u001b[39mget_result()\n",
|
||||
"File \u001b[0;32m/mnt/data/home/j/git/lab.gsi/soil/soil/.env-v0.20/lib/python3.8/site-packages/pandas/core/reshape/concat.py:425\u001b[0m, in \u001b[0;36m_Concatenator.__init__\u001b[0;34m(self, objs, axis, join, keys, levels, names, ignore_index, verify_integrity, copy, sort)\u001b[0m\n\u001b[1;32m 422\u001b[0m objs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mlist\u001b[39m(objs)\n\u001b[1;32m 424\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(objs) \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[0;32m--> 425\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mNo objects to concatenate\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 427\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m keys \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 428\u001b[0m objs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mlist\u001b[39m(com\u001b[38;5;241m.\u001b[39mnot_none(\u001b[38;5;241m*\u001b[39mobjs))\n",
|
||||
"\u001b[0;31mValueError\u001b[0m: No objects to concatenate"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"evodumb = analysis.read_data('soil_output/Sim_all_dumb/', process=analysis.get_count, group=True, keys=['id']);"
|
||||
]
|
||||
@@ -721,9 +729,9 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"display_name": "venv-soil",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
"name": "venv-soil"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@@ -735,7 +743,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.2"
|
||||
"version": "3.8.10"
|
||||
},
|
||||
"toc": {
|
||||
"colors": {
|
||||
|
@@ -1,4 +1,4 @@
|
||||
from soil.agents import FSM, NetworkAgent, state, default_state, prob
|
||||
from soil.agents import FSM, NetworkAgent, state, default_state
|
||||
from soil.parameters import *
|
||||
import logging
|
||||
|
||||
|
@@ -1,7 +1,7 @@
|
||||
There are two similar implementations of this simulation.
|
||||
|
||||
- `basic`. Using simple primites
|
||||
- `improved`. Using more advanced features such as the `time` module to avoid unnecessary computations (i.e., skip steps), and generator functions.
|
||||
- `improved`. Using more advanced features such as the delays to avoid unnecessary computations (i.e., skip steps).
|
||||
|
||||
The examples can be run directly in the terminal, and they accept command like arguments.
|
||||
For example, to enable the CSV exporter and the Summary exporter, while setting `max_time` to `100` and `seed` to `CustomSeed`:
|
||||
|
@@ -1,23 +1,33 @@
|
||||
from soil import FSM, state, default_state, BaseAgent, NetworkAgent, Environment, Simulation
|
||||
from soil.time import Delta
|
||||
from enum import Enum
|
||||
from collections import Counter
|
||||
import logging
|
||||
from soil import Evented, FSM, state, default_state, BaseAgent, NetworkAgent, Environment, parameters, report, TimedOut
|
||||
import math
|
||||
|
||||
from rabbits_basic_sim import RabbitEnv
|
||||
|
||||
class RabbitsImprovedEnv(Environment):
|
||||
prob_death: parameters.probability = 1e-3
|
||||
|
||||
class RabbitsImprovedEnv(RabbitEnv):
|
||||
def init(self):
|
||||
"""Initialize the environment with the new versions of the agents"""
|
||||
a1 = self.add_node(Male)
|
||||
a2 = self.add_node(Female)
|
||||
a1.add_edge(a2)
|
||||
self.add_agent(RandomAccident)
|
||||
|
||||
@report
|
||||
@property
|
||||
def num_rabbits(self):
|
||||
return self.count_agents(agent_class=Rabbit)
|
||||
|
||||
class Rabbit(FSM, NetworkAgent):
|
||||
@report
|
||||
@property
|
||||
def num_males(self):
|
||||
return self.count_agents(agent_class=Male)
|
||||
|
||||
@report
|
||||
@property
|
||||
def num_females(self):
|
||||
return self.count_agents(agent_class=Female)
|
||||
|
||||
|
||||
class Rabbit(Evented, FSM, NetworkAgent):
|
||||
|
||||
sexual_maturity = 30
|
||||
life_expectancy = 300
|
||||
@@ -32,42 +42,40 @@ class Rabbit(FSM, NetworkAgent):
|
||||
@default_state
|
||||
@state
|
||||
def newborn(self):
|
||||
self.info("I am a newborn.")
|
||||
self.debug("I am a newborn.")
|
||||
self.birth = self.now
|
||||
self.offspring = 0
|
||||
return self.youngling, Delta(self.sexual_maturity - self.age)
|
||||
return self.youngling
|
||||
|
||||
@state
|
||||
def youngling(self):
|
||||
if self.age >= self.sexual_maturity:
|
||||
self.info(f"I am fertile! My age is {self.age}")
|
||||
return self.fertile
|
||||
async def youngling(self):
|
||||
self.debug("I am a youngling.")
|
||||
await self.delay(self.sexual_maturity - self.age)
|
||||
assert self.age >= self.sexual_maturity
|
||||
self.debug(f"I am fertile! My age is {self.age}")
|
||||
return self.fertile
|
||||
|
||||
@state
|
||||
def fertile(self):
|
||||
raise Exception("Each subclass should define its fertile state")
|
||||
|
||||
@state
|
||||
def dead(self):
|
||||
self.die()
|
||||
|
||||
|
||||
class Male(Rabbit):
|
||||
max_females = 5
|
||||
mating_prob = 0.001
|
||||
mating_prob = 0.005
|
||||
|
||||
@state
|
||||
def fertile(self):
|
||||
if self.age > self.life_expectancy:
|
||||
return self.dead
|
||||
return self.die()
|
||||
|
||||
# Males try to mate
|
||||
for f in self.model.agents(
|
||||
agent_class=Female, state_id=Female.fertile.id, limit=self.max_females
|
||||
):
|
||||
self.debug("FOUND A FEMALE: ", repr(f), self.mating_prob)
|
||||
self.debug(f"FOUND A FEMALE: {repr(f)}. Mating with prob {self.mating_prob}")
|
||||
if self.prob(self["mating_prob"]):
|
||||
f.impregnate(self)
|
||||
f.tell(self.unique_id, sender=self, timeout=1)
|
||||
break # Do not try to impregnate other females
|
||||
|
||||
|
||||
@@ -76,78 +84,91 @@ class Female(Rabbit):
|
||||
conception = None
|
||||
|
||||
@state
|
||||
def fertile(self):
|
||||
async def fertile(self):
|
||||
# Just wait for a Male
|
||||
if self.age > self.life_expectancy:
|
||||
return self.dead
|
||||
if self.conception is not None:
|
||||
return self.pregnant
|
||||
|
||||
@property
|
||||
def pregnancy(self):
|
||||
if self.conception is None:
|
||||
return None
|
||||
return self.now - self.conception
|
||||
|
||||
def impregnate(self, male):
|
||||
self.info(f"impregnated by {repr(male)}")
|
||||
self.mate = male
|
||||
self.conception = self.now
|
||||
self.number_of_babies = int(8 + 4 * self.random.random())
|
||||
try:
|
||||
timeout = self.life_expectancy - self.age
|
||||
while timeout > 0:
|
||||
mates = await self.received(timeout=timeout)
|
||||
# assert self.age <= self.life_expectancy
|
||||
for mate in mates:
|
||||
try:
|
||||
male = self.model.agents[mate.payload]
|
||||
except ValueError:
|
||||
continue
|
||||
self.debug(f"impregnated by {repr(male)}")
|
||||
self.mate = male
|
||||
self.number_of_babies = int(8 + 4 * self.random.random())
|
||||
self.conception = self.now
|
||||
return self.pregnant
|
||||
except TimedOut:
|
||||
pass
|
||||
return self.die()
|
||||
|
||||
@state
|
||||
def pregnant(self):
|
||||
async def pregnant(self):
|
||||
self.debug("I am pregnant")
|
||||
# assert self.mate is not None
|
||||
|
||||
when = min(self.gestation, self.life_expectancy - self.age)
|
||||
if when < 0:
|
||||
return self.die()
|
||||
await self.delay(when)
|
||||
|
||||
if self.age > self.life_expectancy:
|
||||
self.info("Dying before giving birth")
|
||||
self.debug("Dying before giving birth")
|
||||
return self.die()
|
||||
|
||||
if self.pregnancy >= self.gestation:
|
||||
self.info("Having {} babies".format(self.number_of_babies))
|
||||
for i in range(self.number_of_babies):
|
||||
state = {}
|
||||
agent_class = self.random.choice([Male, Female])
|
||||
child = self.model.add_node(agent_class=agent_class, **state)
|
||||
child.add_edge(self)
|
||||
if self.mate:
|
||||
child.add_edge(self.mate)
|
||||
self.mate.offspring += 1
|
||||
else:
|
||||
self.debug("The father has passed away")
|
||||
# assert self.now - self.conception >= self.gestation
|
||||
if not self.alive:
|
||||
return self.die()
|
||||
|
||||
self.offspring += 1
|
||||
self.mate = None
|
||||
return self.fertile
|
||||
self.debug("Having {} babies".format(self.number_of_babies))
|
||||
for i in range(self.number_of_babies):
|
||||
state = {}
|
||||
agent_class = self.random.choice([Male, Female])
|
||||
child = self.model.add_node(agent_class=agent_class, **state)
|
||||
child.add_edge(self)
|
||||
try:
|
||||
child.add_edge(self.mate)
|
||||
self.model.agents[self.mate].offspring += 1
|
||||
except ValueError:
|
||||
self.debug("The father has passed away")
|
||||
|
||||
self.offspring += 1
|
||||
self.mate = None
|
||||
self.conception = None
|
||||
return self.fertile
|
||||
|
||||
def die(self):
|
||||
if self.pregnancy is not None:
|
||||
self.info("A mother has died carrying a baby!!")
|
||||
if self.conception is not None:
|
||||
self.debug("A mother has died carrying a baby!!")
|
||||
return super().die()
|
||||
|
||||
|
||||
class RandomAccident(BaseAgent):
|
||||
# Default value, but the value from the environment takes precedence
|
||||
prob_death = 1e-3
|
||||
|
||||
def step(self):
|
||||
rabbits_alive = self.model.G.number_of_nodes()
|
||||
|
||||
if not rabbits_alive:
|
||||
return self.die()
|
||||
alive = self.get_agents(agent_class=Rabbit, alive=True)
|
||||
|
||||
prob_death = self.model.get("prob_death", 1e-100) * math.floor(
|
||||
math.log10(max(1, rabbits_alive))
|
||||
)
|
||||
if not alive:
|
||||
return self.die("No more rabbits to kill")
|
||||
|
||||
num_alive = len(alive)
|
||||
prob_death = min(1, self.prob_death * num_alive/10)
|
||||
self.debug("Killing some rabbits with prob={}!".format(prob_death))
|
||||
for i in self.iter_agents(agent_class=Rabbit):
|
||||
|
||||
for i in alive:
|
||||
if i.state_id == i.dead.id:
|
||||
continue
|
||||
if self.prob(prob_death):
|
||||
self.info("I killed a rabbit: {}".format(i.id))
|
||||
rabbits_alive -= 1
|
||||
i.die()
|
||||
self.debug("Rabbits alive: {}".format(rabbits_alive))
|
||||
self.debug("I killed a rabbit: {}".format(i.unique_id))
|
||||
num_alive -= 1
|
||||
self.model.remove_agent(i)
|
||||
self.debug("Rabbits alive: {}".format(num_alive))
|
||||
|
||||
|
||||
sim = Simulation(model=RabbitsImprovedEnv, max_time=100, seed="MySeed", iterations=1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
sim.run()
|
||||
RabbitsImprovedEnv.run(max_time=1000, seed="MySeed", iterations=1)
|
||||
|
@@ -1,11 +1,9 @@
|
||||
from soil import FSM, state, default_state, BaseAgent, NetworkAgent, Environment, Simulation, report, parameters as params
|
||||
from collections import Counter
|
||||
import logging
|
||||
from soil import FSM, state, default_state, BaseAgent, NetworkAgent, Environment, report, parameters as params
|
||||
import math
|
||||
|
||||
|
||||
class RabbitEnv(Environment):
|
||||
prob_death: params.probability = 1e-100
|
||||
prob_death: params.probability = 1e-3
|
||||
|
||||
def init(self):
|
||||
a1 = self.add_node(Male)
|
||||
@@ -37,7 +35,7 @@ class Rabbit(NetworkAgent, FSM):
|
||||
@default_state
|
||||
@state
|
||||
def newborn(self):
|
||||
self.info("I am a newborn.")
|
||||
self.debug("I am a newborn.")
|
||||
self.age = 0
|
||||
self.offspring = 0
|
||||
return self.youngling
|
||||
@@ -46,7 +44,7 @@ class Rabbit(NetworkAgent, FSM):
|
||||
def youngling(self):
|
||||
self.age += 1
|
||||
if self.age >= self.sexual_maturity:
|
||||
self.info(f"I am fertile! My age is {self.age}")
|
||||
self.debug(f"I am fertile! My age is {self.age}")
|
||||
return self.fertile
|
||||
|
||||
@state
|
||||
@@ -60,7 +58,7 @@ class Rabbit(NetworkAgent, FSM):
|
||||
|
||||
class Male(Rabbit):
|
||||
max_females = 5
|
||||
mating_prob = 0.001
|
||||
mating_prob = 0.005
|
||||
|
||||
@state
|
||||
def fertile(self):
|
||||
@@ -70,9 +68,8 @@ class Male(Rabbit):
|
||||
return self.dead
|
||||
|
||||
# Males try to mate
|
||||
for f in self.model.agents(
|
||||
agent_class=Female, state_id=Female.fertile.id, limit=self.max_females
|
||||
):
|
||||
for f in self.model.agents.filter(
|
||||
agent_class=Female, state_id=Female.fertile.id).limit(self.max_females):
|
||||
self.debug("FOUND A FEMALE: ", repr(f), self.mating_prob)
|
||||
if self.prob(self["mating_prob"]):
|
||||
f.impregnate(self)
|
||||
@@ -93,14 +90,14 @@ class Female(Rabbit):
|
||||
return self.pregnant
|
||||
|
||||
def impregnate(self, male):
|
||||
self.info(f"impregnated by {repr(male)}")
|
||||
self.debug(f"impregnated by {repr(male)}")
|
||||
self.mate = male
|
||||
self.pregnancy = 0
|
||||
self.number_of_babies = int(8 + 4 * self.random.random())
|
||||
|
||||
@state
|
||||
def pregnant(self):
|
||||
self.info("I am pregnant")
|
||||
self.debug("I am pregnant")
|
||||
self.age += 1
|
||||
|
||||
if self.age >= self.life_expectancy:
|
||||
@@ -110,7 +107,7 @@ class Female(Rabbit):
|
||||
self.pregnancy += 1
|
||||
return
|
||||
|
||||
self.info("Having {} babies".format(self.number_of_babies))
|
||||
self.debug("Having {} babies".format(self.number_of_babies))
|
||||
for i in range(self.number_of_babies):
|
||||
state = {}
|
||||
agent_class = self.random.choice([Male, Female])
|
||||
@@ -129,33 +126,32 @@ class Female(Rabbit):
|
||||
|
||||
def die(self):
|
||||
if "pregnancy" in self and self["pregnancy"] > -1:
|
||||
self.info("A mother has died carrying a baby!!")
|
||||
self.debug("A mother has died carrying a baby!!")
|
||||
return super().die()
|
||||
|
||||
|
||||
class RandomAccident(BaseAgent):
|
||||
prob_death = None
|
||||
def step(self):
|
||||
rabbits_alive = self.model.G.number_of_nodes()
|
||||
alive = self.get_agents(agent_class=Rabbit, alive=True)
|
||||
|
||||
if not rabbits_alive:
|
||||
return self.die()
|
||||
if not alive:
|
||||
return self.die("No more rabbits to kill")
|
||||
|
||||
prob_death = self.model.prob_death * math.floor(
|
||||
math.log10(max(1, rabbits_alive))
|
||||
)
|
||||
num_alive = len(alive)
|
||||
prob_death = min(1, self.prob_death * num_alive/10)
|
||||
self.debug("Killing some rabbits with prob={}!".format(prob_death))
|
||||
for i in self.get_agents(agent_class=Rabbit):
|
||||
|
||||
for i in alive:
|
||||
if i.state_id == i.dead.id:
|
||||
continue
|
||||
if self.prob(prob_death):
|
||||
self.info("I killed a rabbit: {}".format(i.id))
|
||||
rabbits_alive -= 1
|
||||
i.die()
|
||||
self.debug("Rabbits alive: {}".format(rabbits_alive))
|
||||
self.debug("I killed a rabbit: {}".format(i.unique_id))
|
||||
num_alive -= 1
|
||||
self.model.remove_agent(i)
|
||||
i.alive = False
|
||||
i.killed = True
|
||||
self.debug("Rabbits alive: {}".format(num_alive))
|
||||
|
||||
|
||||
|
||||
sim = Simulation(model=RabbitEnv, max_time=100, seed="MySeed", iterations=1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
sim.run()
|
||||
RabbitEnv.run(max_time=1000, seed="MySeed", iterations=1)
|
@@ -1,9 +1,7 @@
|
||||
"""
|
||||
Example of setting a
|
||||
Example of a fully programmatic simulation, without definition files.
|
||||
"""
|
||||
from soil import Simulation, agents, Environment
|
||||
from soil.time import Delta
|
||||
|
||||
|
||||
class MyAgent(agents.FSM):
|
||||
@@ -11,22 +9,22 @@ class MyAgent(agents.FSM):
|
||||
An agent that first does a ping
|
||||
"""
|
||||
|
||||
defaults = {"pong_counts": 2}
|
||||
max_pongs = 2
|
||||
|
||||
@agents.default_state
|
||||
@agents.state
|
||||
def ping(self):
|
||||
self.info("Ping")
|
||||
return self.pong, Delta(self.random.expovariate(1 / 16))
|
||||
return self.pong.delay(self.random.expovariate(1 / 16))
|
||||
|
||||
@agents.state
|
||||
def pong(self):
|
||||
self.info("Pong")
|
||||
self.pong_counts -= 1
|
||||
self.info(str(self.pong_counts))
|
||||
if self.pong_counts < 1:
|
||||
self.max_pongs -= 1
|
||||
self.info(str(self.max_pongs), "pongs remaining")
|
||||
if self.max_pongs < 1:
|
||||
return self.die()
|
||||
return None, Delta(self.random.expovariate(1 / 16))
|
||||
return self.delay(self.random.expovariate(1 / 16))
|
||||
|
||||
|
||||
class RandomEnv(Environment):
|
||||
|
@@ -1,5 +1,6 @@
|
||||
import networkx as nx
|
||||
from soil.agents import Geo, NetworkAgent, FSM, custom, state, default_state
|
||||
from soil.agents import FSM, state, default_state
|
||||
from soil.agents.geo import Geo
|
||||
from soil import Environment, Simulation
|
||||
from soil.parameters import *
|
||||
from soil.utils import int_seed
|
||||
@@ -39,8 +40,8 @@ class TerroristEnvironment(Environment):
|
||||
HavenModel
|
||||
], [self.ratio_civil, self.ratio_leader, self.ratio_training, self.ratio_haven])
|
||||
|
||||
def generator(self, *args, **kwargs):
|
||||
return nx.random_geometric_graph(*args, **kwargs, seed=int_seed(self._seed))
|
||||
def generator(self, *args, seed=None, **kwargs):
|
||||
return nx.random_geometric_graph(*args, **kwargs, seed=seed or int_seed(self._seed))
|
||||
|
||||
class TerroristSpreadModel(FSM, Geo):
|
||||
"""
|
||||
|
@@ -21,5 +21,4 @@ class TorvaldsEnv(Environment):
|
||||
|
||||
sim = Simulation(name='torvalds_example',
|
||||
max_steps=10,
|
||||
interval=2,
|
||||
model=TorvaldsEnv)
|
File diff suppressed because one or more lines are too long
@@ -6,7 +6,6 @@ pandas>=1
|
||||
SALib>=1.3
|
||||
Jinja2
|
||||
Mesa>=1.2
|
||||
pydantic>=1.9
|
||||
sqlalchemy>=1.4
|
||||
typing-extensions>=4.4
|
||||
annotated-types>=0.4
|
||||
|
4
setup.py
4
setup.py
@@ -17,9 +17,9 @@ def parse_requirements(filename):
|
||||
install_reqs = parse_requirements("requirements.txt")
|
||||
test_reqs = parse_requirements("test-requirements.txt")
|
||||
extras_require={
|
||||
'mesa': ['mesa>=0.8.9'],
|
||||
'geo': ['scipy>=1.3'],
|
||||
'web': ['tornado']
|
||||
'web': ['tornado'],
|
||||
'ipython': ['ipython==8.12', 'nbformat==5.8'],
|
||||
}
|
||||
extras_require['all'] = [dep for package in extras_require.values() for dep in package]
|
||||
|
||||
|
@@ -1 +1 @@
|
||||
1.0.0rc1
|
||||
1.0.0rc10
|
@@ -16,11 +16,10 @@ except NameError:
|
||||
basestring = str
|
||||
|
||||
from pathlib import Path
|
||||
from .analysis import *
|
||||
from .agents import *
|
||||
from . import agents
|
||||
from .simulation import *
|
||||
from .environment import Environment, EventedEnvironment
|
||||
from .environment import Environment
|
||||
from .datacollection import SoilCollector
|
||||
from . import serialization
|
||||
from .utils import logger
|
||||
@@ -118,13 +117,13 @@ def main(
|
||||
)
|
||||
parser.add_argument(
|
||||
"--max_time",
|
||||
default="-1",
|
||||
default="",
|
||||
help="Set maximum time for the simulation to run. ",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--max_steps",
|
||||
default="-1",
|
||||
default="",
|
||||
help="Set maximum number of steps for the simulation to run.",
|
||||
)
|
||||
|
||||
@@ -250,14 +249,16 @@ def main(
|
||||
if args.only_convert:
|
||||
print(sim.to_yaml())
|
||||
continue
|
||||
max_time = float(args.max_time) if args.max_time != "-1" else None
|
||||
max_steps = float(args.max_steps) if args.max_steps != "-1" else None
|
||||
res.append(sim.run(max_time=max_time, max_steps=max_steps))
|
||||
d = {}
|
||||
if args.max_time:
|
||||
d["max_time"] = float(args.max_time)
|
||||
if args.max_steps:
|
||||
d["max_steps"] = int(args.max_steps)
|
||||
res.append(sim.run(**d))
|
||||
|
||||
except Exception as ex:
|
||||
if args.pdb:
|
||||
from .debugging import post_mortem
|
||||
|
||||
print(traceback.format_exc())
|
||||
post_mortem()
|
||||
else:
|
||||
|
@@ -1,85 +1,23 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from collections import OrderedDict, defaultdict
|
||||
from collections.abc import MutableMapping, Mapping, Set
|
||||
from abc import ABCMeta
|
||||
from copy import deepcopy, copy
|
||||
from functools import partial, wraps
|
||||
from itertools import islice, chain
|
||||
from collections.abc import MutableMapping
|
||||
from copy import deepcopy
|
||||
import inspect
|
||||
import types
|
||||
import textwrap
|
||||
import networkx as nx
|
||||
import warnings
|
||||
import sys
|
||||
|
||||
from typing import Any
|
||||
from mesa import Agent as MesaAgent
|
||||
|
||||
from mesa import Agent as MesaAgent, Model
|
||||
from typing import Dict, List
|
||||
from .. import utils, time
|
||||
|
||||
from .. import serialization, network, utils, time, config
|
||||
from .meta import MetaAgent
|
||||
|
||||
|
||||
IGNORED_FIELDS = ("model", "logger")
|
||||
|
||||
|
||||
class MetaAgent(ABCMeta):
|
||||
def __new__(mcls, name, bases, namespace):
|
||||
defaults = {}
|
||||
|
||||
# Re-use defaults from inherited classes
|
||||
for i in bases:
|
||||
if isinstance(i, MetaAgent):
|
||||
defaults.update(i._defaults)
|
||||
|
||||
new_nmspc = {
|
||||
"_defaults": defaults,
|
||||
"_last_return": None,
|
||||
"_last_except": None,
|
||||
}
|
||||
|
||||
for attr, func in namespace.items():
|
||||
if attr == "step" and inspect.isgeneratorfunction(func):
|
||||
orig_func = func
|
||||
new_nmspc["_coroutine"] = None
|
||||
|
||||
@wraps(func)
|
||||
def func(self):
|
||||
while True:
|
||||
if not self._coroutine:
|
||||
self._coroutine = orig_func(self)
|
||||
try:
|
||||
if self._last_except:
|
||||
return self._coroutine.throw(self._last_except)
|
||||
else:
|
||||
return self._coroutine.send(self._last_return)
|
||||
except StopIteration as ex:
|
||||
self._coroutine = None
|
||||
return ex.value
|
||||
finally:
|
||||
self._last_return = None
|
||||
self._last_except = None
|
||||
|
||||
func.id = name or func.__name__
|
||||
func.is_default = False
|
||||
new_nmspc[attr] = func
|
||||
elif (
|
||||
isinstance(func, types.FunctionType)
|
||||
or isinstance(func, property)
|
||||
or isinstance(func, classmethod)
|
||||
or attr[0] == "_"
|
||||
):
|
||||
new_nmspc[attr] = func
|
||||
elif attr == "defaults":
|
||||
defaults.update(func)
|
||||
else:
|
||||
defaults[attr] = copy(func)
|
||||
|
||||
return super().__new__(mcls=mcls, name=name, bases=bases, namespace=new_nmspc)
|
||||
|
||||
|
||||
class BaseAgent(MesaAgent, MutableMapping, metaclass=MetaAgent):
|
||||
"""
|
||||
A special type of Mesa Agent that:
|
||||
@@ -92,8 +30,11 @@ class BaseAgent(MesaAgent, MutableMapping, metaclass=MetaAgent):
|
||||
Any attribute that is not preceded by an underscore (`_`) will also be added to its state.
|
||||
"""
|
||||
|
||||
def __init__(self, unique_id, model, name=None, init=True, interval=None, **kwargs):
|
||||
assert isinstance(unique_id, int)
|
||||
def __init__(self, unique_id=None, model=None, name=None, init=True, **kwargs):
|
||||
# Ideally, model should be the first argument, but Mesa's Agent class has unique_id first
|
||||
assert not (model is None), "Must provide a model"
|
||||
if unique_id is None:
|
||||
unique_id = model.next_id()
|
||||
super().__init__(unique_id=unique_id, model=model)
|
||||
|
||||
self.name = (
|
||||
@@ -102,7 +43,6 @@ class BaseAgent(MesaAgent, MutableMapping, metaclass=MetaAgent):
|
||||
|
||||
self.alive = True
|
||||
|
||||
self.interval = interval or self.get("interval", 1)
|
||||
logger = utils.logger.getChild(getattr(self.model, "id", self.model)).getChild(
|
||||
self.name
|
||||
)
|
||||
@@ -111,13 +51,18 @@ class BaseAgent(MesaAgent, MutableMapping, metaclass=MetaAgent):
|
||||
if hasattr(self, "level"):
|
||||
self.logger.setLevel(self.level)
|
||||
|
||||
for k in self._defaults:
|
||||
v = getattr(model, k, None)
|
||||
if v is not None:
|
||||
setattr(self, k, v)
|
||||
|
||||
for (k, v) in self._defaults.items():
|
||||
if not hasattr(self, k) or getattr(self, k) is None:
|
||||
setattr(self, k, deepcopy(v))
|
||||
|
||||
for (k, v) in kwargs.items():
|
||||
|
||||
setattr(self, k, v)
|
||||
|
||||
if init:
|
||||
self.init()
|
||||
|
||||
@@ -128,11 +73,11 @@ class BaseAgent(MesaAgent, MutableMapping, metaclass=MetaAgent):
|
||||
return hash(self.unique_id)
|
||||
|
||||
def prob(self, probability):
|
||||
return prob(probability, self.model.random)
|
||||
return utils.prob(probability, self.model.random)
|
||||
|
||||
@classmethod
|
||||
def w(cls, **kwargs):
|
||||
return custom(cls, **kwargs)
|
||||
return utils.custom(cls, **kwargs)
|
||||
|
||||
# TODO: refactor to clean up mesa compatibility
|
||||
@property
|
||||
@@ -142,20 +87,12 @@ class BaseAgent(MesaAgent, MutableMapping, metaclass=MetaAgent):
|
||||
print(msg, file=sys.stderr)
|
||||
return self.unique_id
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, model, attrs, warn_extra=True):
|
||||
ignored = {}
|
||||
args = {}
|
||||
for k, v in attrs.items():
|
||||
if k in inspect.signature(cls).parameters:
|
||||
args[k] = v
|
||||
else:
|
||||
ignored[k] = v
|
||||
if ignored and warn_extra:
|
||||
utils.logger.info(
|
||||
f"Ignoring the following arguments for agent class { agent_class.__name__ }: { ignored }"
|
||||
)
|
||||
return cls(model=model, **args)
|
||||
@property
|
||||
def env(self):
|
||||
msg = "This attribute is deprecated. Use `model` instead"
|
||||
warnings.warn(msg, DeprecationWarning)
|
||||
print(msg, file=sys.stderr)
|
||||
return self.model
|
||||
|
||||
def __getitem__(self, key):
|
||||
try:
|
||||
@@ -189,11 +126,13 @@ class BaseAgent(MesaAgent, MutableMapping, metaclass=MetaAgent):
|
||||
return it
|
||||
|
||||
def get(self, key, default=None):
|
||||
if key in self:
|
||||
return self[key]
|
||||
elif key in self.model:
|
||||
return self.model[key]
|
||||
return default
|
||||
try:
|
||||
return getattr(self, key)
|
||||
except AttributeError:
|
||||
try:
|
||||
return getattr(self.model, key)
|
||||
except AttributeError:
|
||||
return default
|
||||
|
||||
@property
|
||||
def now(self):
|
||||
@@ -206,17 +145,14 @@ class BaseAgent(MesaAgent, MutableMapping, metaclass=MetaAgent):
|
||||
def die(self, msg=None):
|
||||
if msg:
|
||||
self.info("Agent dying:", msg)
|
||||
self.debug(f"agent dying")
|
||||
else:
|
||||
self.debug(f"agent dying")
|
||||
self.alive = False
|
||||
try:
|
||||
self.model.schedule.remove(self)
|
||||
except KeyError:
|
||||
pass
|
||||
return time.NEVER
|
||||
return time.Delay(time.INFINITY)
|
||||
|
||||
def step(self):
|
||||
raise NotImplementedError("Agent must implement step method")
|
||||
|
||||
|
||||
def _check_alive(self):
|
||||
if not self.alive:
|
||||
raise time.DeadAgent(self.unique_id)
|
||||
@@ -266,407 +202,30 @@ class BaseAgent(MesaAgent, MutableMapping, metaclass=MetaAgent):
|
||||
def __repr__(self):
|
||||
return f"{self.__class__.__name__}({self.unique_id})"
|
||||
|
||||
def at(self, at):
|
||||
return time.Delay(float(at) - self.now)
|
||||
|
||||
def prob(prob, random):
|
||||
"""
|
||||
A true/False uniform distribution with a given probability.
|
||||
To be used like this:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
if prob(0.3):
|
||||
do_something()
|
||||
|
||||
"""
|
||||
r = random.random()
|
||||
return r < prob
|
||||
|
||||
|
||||
def calculate_distribution(network_agents=None, agent_class=None):
|
||||
"""
|
||||
Calculate the threshold values (thresholds for a uniform distribution)
|
||||
of an agent distribution given the weights of each agent type.
|
||||
|
||||
The input has this form: ::
|
||||
|
||||
[
|
||||
{'agent_class': 'agent_class_1',
|
||||
'weight': 0.2,
|
||||
'state': {
|
||||
'id': 0
|
||||
}
|
||||
},
|
||||
{'agent_class': 'agent_class_2',
|
||||
'weight': 0.8,
|
||||
'state': {
|
||||
'id': 1
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
In this example, 20% of the nodes will be marked as type
|
||||
'agent_class_1'.
|
||||
"""
|
||||
if network_agents:
|
||||
network_agents = [
|
||||
deepcopy(agent) for agent in network_agents if not hasattr(agent, "id")
|
||||
]
|
||||
elif agent_class:
|
||||
network_agents = [{"agent_class": agent_class}]
|
||||
else:
|
||||
raise ValueError("Specify a distribution or a default agent type")
|
||||
|
||||
# Fix missing weights and incompatible types
|
||||
for x in network_agents:
|
||||
x["weight"] = float(x.get("weight", 1))
|
||||
|
||||
# Calculate the thresholds
|
||||
total = sum(x["weight"] for x in network_agents)
|
||||
acc = 0
|
||||
for v in network_agents:
|
||||
if "ids" in v:
|
||||
continue
|
||||
upper = acc + (v["weight"] / total)
|
||||
v["threshold"] = [acc, upper]
|
||||
acc = upper
|
||||
return network_agents
|
||||
|
||||
|
||||
def _serialize_type(agent_class, known_modules=[], **kwargs):
|
||||
if isinstance(agent_class, str):
|
||||
return agent_class
|
||||
known_modules += ["soil.agents"]
|
||||
return serialization.serialize(agent_class, known_modules=known_modules, **kwargs)[
|
||||
1
|
||||
] # Get the name of the class
|
||||
|
||||
|
||||
def _deserialize_type(agent_class, known_modules=[]):
|
||||
if not isinstance(agent_class, str):
|
||||
return agent_class
|
||||
known = known_modules + ["soil.agents", "soil.agents.custom"]
|
||||
agent_class = serialization.deserializer(agent_class, known_modules=known)
|
||||
return agent_class
|
||||
|
||||
|
||||
class AgentView(Mapping, Set):
|
||||
"""A lazy-loaded list of agents."""
|
||||
|
||||
__slots__ = ("_agents",)
|
||||
|
||||
def __init__(self, agents):
|
||||
self._agents = agents
|
||||
|
||||
def __getstate__(self):
|
||||
return {"_agents": self._agents}
|
||||
|
||||
def __setstate__(self, state):
|
||||
self._agents = state["_agents"]
|
||||
|
||||
# Mapping methods
|
||||
def __len__(self):
|
||||
return len(self._agents)
|
||||
|
||||
def __iter__(self):
|
||||
yield from self._agents.values()
|
||||
|
||||
def __getitem__(self, agent_id):
|
||||
if isinstance(agent_id, slice):
|
||||
raise ValueError(f"Slicing is not supported")
|
||||
if agent_id in self._agents:
|
||||
return self._agents[agent_id]
|
||||
raise ValueError(f"Agent {agent_id} not found")
|
||||
|
||||
def filter(self, *args, **kwargs):
|
||||
yield from filter_agents(self._agents, *args, **kwargs)
|
||||
|
||||
def one(self, *args, **kwargs):
|
||||
return next(filter_agents(self._agents, *args, **kwargs))
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
return list(self.filter(*args, **kwargs))
|
||||
|
||||
def __contains__(self, agent_id):
|
||||
return agent_id in self._agents
|
||||
|
||||
def __str__(self):
|
||||
return str(list(unique_id for unique_id in self.keys()))
|
||||
|
||||
def __repr__(self):
|
||||
return f"{self.__class__.__name__}({self})"
|
||||
|
||||
|
||||
def filter_agents(
|
||||
agents: dict,
|
||||
*id_args,
|
||||
unique_id=None,
|
||||
state_id=None,
|
||||
agent_class=None,
|
||||
ignore=None,
|
||||
state=None,
|
||||
limit=None,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Filter agents given as a dict, by the criteria given as arguments (e.g., certain type or state id).
|
||||
"""
|
||||
assert isinstance(agents, dict)
|
||||
|
||||
ids = []
|
||||
|
||||
if unique_id is not None:
|
||||
if isinstance(unique_id, list):
|
||||
ids += unique_id
|
||||
else:
|
||||
ids.append(unique_id)
|
||||
|
||||
if id_args:
|
||||
ids += id_args
|
||||
|
||||
if ids:
|
||||
f = (agents[aid] for aid in ids if aid in agents)
|
||||
else:
|
||||
f = agents.values()
|
||||
|
||||
if state_id is not None and not isinstance(state_id, (tuple, list)):
|
||||
state_id = tuple([state_id])
|
||||
|
||||
if agent_class is not None:
|
||||
agent_class = _deserialize_type(agent_class)
|
||||
try:
|
||||
agent_class = tuple(agent_class)
|
||||
except TypeError:
|
||||
agent_class = tuple([agent_class])
|
||||
|
||||
if ignore:
|
||||
f = filter(lambda x: x not in ignore, f)
|
||||
|
||||
if state_id is not None:
|
||||
f = filter(lambda agent: agent.get("state_id", None) in state_id, f)
|
||||
|
||||
if agent_class is not None:
|
||||
f = filter(lambda agent: isinstance(agent, agent_class), f)
|
||||
|
||||
state = state or dict()
|
||||
state.update(kwargs)
|
||||
|
||||
for k, v in state.items():
|
||||
f = filter(lambda agent: getattr(agent, k, None) == v, f)
|
||||
|
||||
if limit is not None:
|
||||
f = islice(f, limit)
|
||||
|
||||
yield from f
|
||||
|
||||
|
||||
def from_config(
|
||||
cfg: config.AgentConfig, random, topology: nx.Graph = None
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
This function turns an agentconfig into a list of individual "agent specifications", which are just a dictionary
|
||||
with the parameters that the environment will use to construct each agent.
|
||||
|
||||
This function does NOT return a list of agents, mostly because some attributes to the agent are not known at the
|
||||
time of calling this function, such as `unique_id`.
|
||||
"""
|
||||
default = cfg or config.AgentConfig()
|
||||
if not isinstance(cfg, config.AgentConfig):
|
||||
cfg = config.AgentConfig(**cfg)
|
||||
|
||||
agents = []
|
||||
|
||||
assigned_total = 0
|
||||
assigned_network = 0
|
||||
|
||||
if cfg.fixed is not None:
|
||||
agents, assigned_total, assigned_network = _from_fixed(
|
||||
cfg.fixed, topology=cfg.topology, default=cfg
|
||||
)
|
||||
|
||||
n = cfg.n
|
||||
|
||||
if cfg.distribution:
|
||||
topo_size = len(topology) if topology else 0
|
||||
|
||||
networked = []
|
||||
total = []
|
||||
|
||||
for d in cfg.distribution:
|
||||
if d.strategy == config.Strategy.topology:
|
||||
topo = d.topology if ("topology" in d.__fields_set__) else cfg.topology
|
||||
if not topo:
|
||||
raise ValueError(
|
||||
'The "topology" strategy only works if the topology parameter is set to True'
|
||||
)
|
||||
if not topo_size:
|
||||
raise ValueError(
|
||||
f"Topology does not have enough free nodes to assign one to the agent"
|
||||
)
|
||||
|
||||
networked.append(d)
|
||||
|
||||
if d.strategy == config.Strategy.total:
|
||||
if not cfg.n:
|
||||
raise ValueError(
|
||||
'Cannot use the "total" strategy without providing the total number of agents'
|
||||
)
|
||||
total.append(d)
|
||||
|
||||
if networked:
|
||||
new_agents = _from_distro(
|
||||
networked,
|
||||
n=topo_size - assigned_network,
|
||||
topology=topo,
|
||||
default=cfg,
|
||||
random=random,
|
||||
)
|
||||
assigned_total += len(new_agents)
|
||||
assigned_network += len(new_agents)
|
||||
agents += new_agents
|
||||
|
||||
if total:
|
||||
remaining = n - assigned_total
|
||||
agents += _from_distro(total, n=remaining, default=cfg, random=random)
|
||||
|
||||
if assigned_network < topo_size:
|
||||
utils.logger.warn(
|
||||
f"The total number of agents does not match the total number of nodes in "
|
||||
"every topology. This may be due to a definition error: assigned: "
|
||||
f"{ assigned } total size: { topo_size }"
|
||||
)
|
||||
|
||||
return agents
|
||||
|
||||
|
||||
def _from_fixed(
|
||||
lst: List[config.FixedAgentConfig],
|
||||
topology: bool,
|
||||
default: config.SingleAgentConfig,
|
||||
) -> List[Dict[str, Any]]:
|
||||
agents = []
|
||||
|
||||
counts_total = 0
|
||||
counts_network = 0
|
||||
|
||||
for fixed in lst:
|
||||
agent = {}
|
||||
if default:
|
||||
agent = default.state.copy()
|
||||
agent.update(fixed.state)
|
||||
cls = serialization.deserialize(
|
||||
fixed.agent_class or (default and default.agent_class)
|
||||
)
|
||||
agent["agent_class"] = cls
|
||||
topo = (
|
||||
fixed.topology
|
||||
if ("topology" in fixed.__fields_set__)
|
||||
else topology or default.topology
|
||||
)
|
||||
|
||||
if topo:
|
||||
agent["topology"] = True
|
||||
counts_network += 1
|
||||
if not fixed.hidden:
|
||||
counts_total += 1
|
||||
agents.append(agent)
|
||||
|
||||
return agents, counts_total, counts_network
|
||||
|
||||
|
||||
def _from_distro(
|
||||
distro: List[config.AgentDistro],
|
||||
n: int,
|
||||
default: config.SingleAgentConfig,
|
||||
random,
|
||||
topology: str = None
|
||||
) -> List[Dict[str, Any]]:
|
||||
|
||||
agents = []
|
||||
|
||||
if n is None:
|
||||
if any(lambda dist: dist.n is None, distro):
|
||||
raise ValueError(
|
||||
"You must provide a total number of agents, or the number of each type"
|
||||
)
|
||||
n = sum(dist.n for dist in distro)
|
||||
|
||||
weights = list(dist.weight if dist.weight is not None else 1 for dist in distro)
|
||||
minw = min(weights)
|
||||
norm = list(weight / minw for weight in weights)
|
||||
total = sum(norm)
|
||||
chunk = n // total
|
||||
|
||||
# random.choices would be enough to get a weighted distribution. But it can vary a lot for smaller k
|
||||
# So instead we calculate our own distribution to make sure the actual ratios are close to what we would expect
|
||||
|
||||
# Calculate how many times each has to appear
|
||||
indices = list(
|
||||
chain.from_iterable([idx] * int(n * chunk) for (idx, n) in enumerate(norm))
|
||||
)
|
||||
|
||||
# Complete with random agents following the original weight distribution
|
||||
if len(indices) < n:
|
||||
indices += random.choices(
|
||||
list(range(len(distro))),
|
||||
weights=[d.weight for d in distro],
|
||||
k=n - len(indices),
|
||||
)
|
||||
|
||||
# Deserialize classes for efficiency
|
||||
classes = list(
|
||||
serialization.deserialize(i.agent_class or default.agent_class) for i in distro
|
||||
)
|
||||
|
||||
# Add them in random order
|
||||
random.shuffle(indices)
|
||||
|
||||
for idx in indices:
|
||||
d = distro[idx]
|
||||
agent = d.state.copy()
|
||||
cls = classes[idx]
|
||||
agent["agent_class"] = cls
|
||||
if default:
|
||||
agent.update(default.state)
|
||||
topology = (
|
||||
d.topology
|
||||
if ("topology" in d.__fields_set__)
|
||||
else topology or default.topology
|
||||
)
|
||||
if topology:
|
||||
agent["topology"] = topology
|
||||
agents.append(agent)
|
||||
|
||||
return agents
|
||||
def delay(self, delay=1):
|
||||
return time.Delay(delay)
|
||||
|
||||
|
||||
from .network_agents import *
|
||||
from .fsm import *
|
||||
from .evented import *
|
||||
from typing import Optional
|
||||
from .view import *
|
||||
|
||||
|
||||
class Agent(NetworkAgent, FSM, EventedAgent):
|
||||
"""Default agent class, has both network and event capabilities"""
|
||||
class Noop(EventedAgent, BaseAgent):
|
||||
def step(self):
|
||||
return
|
||||
|
||||
|
||||
from ..environment import NetworkEnvironment
|
||||
class Agent(FSM, EventedAgent, NetworkAgent):
|
||||
"""Default agent class, has network, FSM and event capabilities"""
|
||||
|
||||
|
||||
# Additional types of agents
|
||||
from .BassModel import *
|
||||
from .IndependentCascadeModel import *
|
||||
from .SISaModel import *
|
||||
from .CounterModel import *
|
||||
|
||||
|
||||
try:
|
||||
import scipy
|
||||
from .Geo import Geo
|
||||
except ImportError:
|
||||
import sys
|
||||
|
||||
print("Could not load the Geo Agent, scipy is not installed", file=sys.stderr)
|
||||
|
||||
|
||||
def custom(cls, **kwargs):
|
||||
"""Create a new class from a template class and keyword arguments"""
|
||||
return type(cls.__name__, (cls,), kwargs)
|
||||
|
@@ -1,77 +1,34 @@
|
||||
from . import BaseAgent
|
||||
from ..events import Message, Tell, Ask, TimedOut
|
||||
from ..time import BaseCond
|
||||
from .. import environment, events
|
||||
from functools import partial
|
||||
from collections import deque
|
||||
from types import coroutine
|
||||
|
||||
|
||||
class ReceivedOrTimeout(BaseCond):
|
||||
def __init__(
|
||||
self, agent, expiration=None, timeout=None, check=True, ignore=False, **kwargs
|
||||
):
|
||||
if expiration is None:
|
||||
if timeout is not None:
|
||||
expiration = agent.now + timeout
|
||||
self.expiration = expiration
|
||||
self.ignore = ignore
|
||||
self.check = check
|
||||
super().__init__(**kwargs)
|
||||
|
||||
def expired(self, time):
|
||||
return self.expiration and self.expiration < time
|
||||
|
||||
def ready(self, agent, time):
|
||||
return len(agent._inbox) or self.expired(time)
|
||||
|
||||
def return_value(self, agent):
|
||||
if not self.ignore and self.expired(agent.now):
|
||||
raise TimedOut("No messages received")
|
||||
if self.check:
|
||||
agent.check_messages()
|
||||
return None
|
||||
|
||||
def schedule_next(self, time, delta, first=False):
|
||||
if self._delta is not None:
|
||||
delta = self._delta
|
||||
return (time + delta, self)
|
||||
|
||||
def __repr__(self):
|
||||
return f"ReceivedOrTimeout(expires={self.expiration})"
|
||||
# from soilent import Scheduler
|
||||
|
||||
|
||||
class EventedAgent(BaseAgent):
|
||||
# scheduler_class = Scheduler
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self._inbox = deque()
|
||||
self._processed = 0
|
||||
assert isinstance(self.model, environment.EventedEnvironment), "EventedAgent requires an EventedEnvironment"
|
||||
self.model.register(self)
|
||||
|
||||
def on_receive(self, *args, **kwargs):
|
||||
pass
|
||||
def received(self, **kwargs):
|
||||
return self.model.received(agent=self, **kwargs)
|
||||
|
||||
def received(self, *args, **kwargs):
|
||||
return ReceivedOrTimeout(self, *args, **kwargs)
|
||||
def tell(self, msg, **kwargs):
|
||||
return self.model.tell(msg, recipient=self, **kwargs)
|
||||
|
||||
def tell(self, msg, sender=None):
|
||||
self._inbox.append(Tell(timestamp=self.now, payload=msg, sender=sender))
|
||||
def broadcast(self, msg, **kwargs):
|
||||
return self.model.broadcast(msg, sender=self, **kwargs)
|
||||
|
||||
def ask(self, msg, timeout=None, **kwargs):
|
||||
ask = Ask(timestamp=self.now, payload=msg, sender=self)
|
||||
self._inbox.append(ask)
|
||||
expiration = float("inf") if timeout is None else self.now + timeout
|
||||
return ask.replied(expiration=expiration, **kwargs)
|
||||
def ask(self, msg, **kwargs):
|
||||
return self.model.ask(msg, recipient=self, **kwargs)
|
||||
|
||||
def check_messages(self):
|
||||
changed = False
|
||||
while self._inbox:
|
||||
msg = self._inbox.popleft()
|
||||
self._processed += 1
|
||||
if msg.expired(self.now):
|
||||
continue
|
||||
changed = True
|
||||
reply = self.on_receive(msg.payload, sender=msg.sender)
|
||||
if isinstance(msg, Ask):
|
||||
msg.reply = reply
|
||||
return changed
|
||||
def process_messages(self):
|
||||
return self.model.process_messages(self.model.inbox_for(self))
|
||||
|
||||
|
||||
Evented = EventedAgent
|
||||
|
@@ -1,47 +1,79 @@
|
||||
from . import MetaAgent, BaseAgent
|
||||
from ..time import Delta
|
||||
|
||||
from .. import time
|
||||
from types import coroutine
|
||||
from functools import partial, wraps
|
||||
import inspect
|
||||
|
||||
|
||||
class State:
|
||||
__slots__ = ("awaitable", "f", "attribute", "generator", "name", "default")
|
||||
|
||||
def __init__(self, f, name, default, generator, awaitable):
|
||||
self.f = f
|
||||
self.name = name
|
||||
self.attribute = "_{}".format(name)
|
||||
self.generator = generator
|
||||
self.awaitable = awaitable
|
||||
self.default = default
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return self.name
|
||||
|
||||
def __get__(self, obj, owner=None):
|
||||
if obj is None:
|
||||
return self
|
||||
try:
|
||||
return getattr(obj, self.attribute)
|
||||
except AttributeError:
|
||||
b = self.bind(obj)
|
||||
setattr(obj, self.attribute, b)
|
||||
return b
|
||||
|
||||
def bind(self, obj):
|
||||
bs = BoundState(self.f, self.name, self.default, self.generator, self.awaitable, obj=obj)
|
||||
setattr(obj, self.name, bs)
|
||||
return bs
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
raise Exception("States should not be called directly")
|
||||
|
||||
|
||||
class BoundState(State):
|
||||
__slots__ = ("obj", )
|
||||
|
||||
def __init__(self, *args, obj):
|
||||
super().__init__(*args)
|
||||
self.obj = obj
|
||||
|
||||
@coroutine
|
||||
def __call__(self):
|
||||
if self.generator or self.awaitable:
|
||||
f = self.f
|
||||
next_state = yield from f(self.obj)
|
||||
return next_state
|
||||
|
||||
else:
|
||||
return self.f(self.obj)
|
||||
|
||||
|
||||
def delay(self, delta=0):
|
||||
return self, self.obj.delay(delta)
|
||||
|
||||
def at(self, when):
|
||||
return self, self.obj.at(when)
|
||||
|
||||
|
||||
def state(name=None, default=False):
|
||||
def decorator(func, name=None):
|
||||
"""
|
||||
A state function should return either a state id, or a tuple (state_id, when)
|
||||
The default value for state_id is the current state id.
|
||||
The default value for when is the interval defined in the environment.
|
||||
"""
|
||||
if inspect.isgeneratorfunction(func):
|
||||
orig_func = func
|
||||
|
||||
@wraps(func)
|
||||
def func(self):
|
||||
while True:
|
||||
if not self._coroutine:
|
||||
self._coroutine = orig_func(self)
|
||||
|
||||
try:
|
||||
if self._last_except:
|
||||
n = self._coroutine.throw(self._last_except)
|
||||
else:
|
||||
n = self._coroutine.send(self._last_return)
|
||||
if n:
|
||||
return None, n
|
||||
return n
|
||||
except StopIteration as ex:
|
||||
self._coroutine = None
|
||||
next_state = ex.value
|
||||
if next_state is not None:
|
||||
self._set_state(next_state)
|
||||
return next_state
|
||||
finally:
|
||||
self._last_return = None
|
||||
self._last_except = None
|
||||
|
||||
func.id = name or func.__name__
|
||||
func.is_default = default
|
||||
return func
|
||||
name = name or func.__name__
|
||||
generator = inspect.isgeneratorfunction(func)
|
||||
awaitable = inspect.iscoroutinefunction(func) or inspect.isasyncgen(func)
|
||||
return State(func, name, default, generator, awaitable)
|
||||
|
||||
if callable(name):
|
||||
return decorator(name)
|
||||
@@ -50,7 +82,7 @@ def state(name=None, default=False):
|
||||
|
||||
|
||||
def default_state(func):
|
||||
func.is_default = True
|
||||
func.default = True
|
||||
return func
|
||||
|
||||
|
||||
@@ -62,42 +94,54 @@ class MetaFSM(MetaAgent):
|
||||
for i in bases:
|
||||
if isinstance(i, MetaFSM):
|
||||
for state_id, state in i._states.items():
|
||||
if state.is_default:
|
||||
if state.default:
|
||||
default_state = state
|
||||
states[state_id] = state
|
||||
|
||||
# Add new states
|
||||
for attr, func in namespace.items():
|
||||
if hasattr(func, "id"):
|
||||
if func.is_default:
|
||||
if isinstance(func, State):
|
||||
if func.default:
|
||||
default_state = func
|
||||
states[func.id] = func
|
||||
states[func.name] = func
|
||||
|
||||
namespace.update(
|
||||
{
|
||||
"_default_state": default_state,
|
||||
"_state": default_state,
|
||||
"_states": states,
|
||||
}
|
||||
)
|
||||
|
||||
return super(MetaFSM, mcls).__new__(
|
||||
cls = super(MetaFSM, mcls).__new__(
|
||||
mcls=mcls, name=name, bases=bases, namespace=namespace
|
||||
)
|
||||
for (k, v) in states.items():
|
||||
setattr(cls, k, v)
|
||||
return cls
|
||||
|
||||
|
||||
class FSM(BaseAgent, metaclass=MetaFSM):
|
||||
def __init__(self, init=True, **kwargs):
|
||||
def __init__(self, init=True, state_id=None, **kwargs):
|
||||
super().__init__(**kwargs, init=False)
|
||||
if not hasattr(self, "state_id"):
|
||||
if not self._default_state:
|
||||
raise ValueError(
|
||||
"No default state specified for {}".format(self.unique_id)
|
||||
)
|
||||
self.state_id = self._default_state.id
|
||||
bound_states = {}
|
||||
for (k, v) in list(self._states.items()):
|
||||
if isinstance(v, State):
|
||||
v = v.bind(self)
|
||||
bound_states[k] = v
|
||||
setattr(self, k, v)
|
||||
|
||||
self._states = bound_states
|
||||
|
||||
if state_id is not None:
|
||||
self._set_state(state_id)
|
||||
else:
|
||||
self._set_state(self._state)
|
||||
# If more than "dead" state is defined, but no default state
|
||||
if len(self._states) > 1 and not self._state:
|
||||
raise ValueError(
|
||||
f"No default state specified for {type(self)}({self.unique_id})"
|
||||
)
|
||||
|
||||
self._coroutine = None
|
||||
self.default_interval = Delta(self.model.interval)
|
||||
self._set_state(self.state_id)
|
||||
if init:
|
||||
self.init()
|
||||
|
||||
@@ -105,44 +149,53 @@ class FSM(BaseAgent, metaclass=MetaFSM):
|
||||
def states(cls):
|
||||
return list(cls._states.keys())
|
||||
|
||||
@property
|
||||
def state_id(self):
|
||||
return self._state.name
|
||||
|
||||
def set_state(self, value):
|
||||
if self.now > 0:
|
||||
raise ValueError("Cannot change state after init")
|
||||
self._set_state(value)
|
||||
|
||||
@coroutine
|
||||
def step(self):
|
||||
self.debug(f"Agent {self.unique_id} @ state {self.state_id}")
|
||||
|
||||
self._check_alive()
|
||||
next_state = self._states[self.state_id](self)
|
||||
|
||||
when = None
|
||||
try:
|
||||
next_state, *when = next_state
|
||||
if not when:
|
||||
when = None
|
||||
elif len(when) == 1:
|
||||
when = when[0]
|
||||
if self._state is None:
|
||||
if len(self._states) == 1:
|
||||
raise Exception("Agent class has no valid states: {}. Make sure to define states or define a custom step function".format(self.__class__.__name__))
|
||||
else:
|
||||
raise ValueError(
|
||||
"Too many values returned. Only state (and time) allowed"
|
||||
)
|
||||
except TypeError:
|
||||
pass
|
||||
raise Exception("Invalid state (None) for agent {}".format(self))
|
||||
|
||||
if next_state is not None:
|
||||
next_state = yield from self._state()
|
||||
|
||||
try:
|
||||
next_state, when = next_state
|
||||
self._set_state(next_state)
|
||||
return when
|
||||
except (TypeError, ValueError) as ex:
|
||||
try:
|
||||
self._set_state(next_state)
|
||||
return None
|
||||
except ValueError:
|
||||
return next_state
|
||||
|
||||
return when or self.default_interval
|
||||
|
||||
def _set_state(self, state, when=None):
|
||||
if hasattr(state, "id"):
|
||||
state = state.id
|
||||
if state not in self._states:
|
||||
def _set_state(self, state):
|
||||
if state is None:
|
||||
return
|
||||
if isinstance(state, str):
|
||||
if state not in self._states:
|
||||
raise ValueError("{} is not a valid state".format(state))
|
||||
state = self._states[state]
|
||||
if isinstance(state, State):
|
||||
state = state.bind(self)
|
||||
elif not isinstance(state, BoundState):
|
||||
raise ValueError("{} is not a valid state".format(state))
|
||||
self.state_id = state
|
||||
if when is not None:
|
||||
self.model.schedule.add(self, when=when)
|
||||
return state
|
||||
self._state = state
|
||||
|
||||
def die(self, *args, **kwargs):
|
||||
return self.dead, super().die(*args, **kwargs)
|
||||
super().die(*args, **kwargs)
|
||||
return self.dead.at(time.INFINITY)
|
||||
|
||||
@state
|
||||
def dead(self):
|
||||
return self.die()
|
||||
return time.INFINITY
|
||||
|
64
soil/agents/meta.py
Normal file
64
soil/agents/meta.py
Normal file
@@ -0,0 +1,64 @@
|
||||
from abc import ABCMeta
|
||||
from copy import copy
|
||||
from functools import wraps
|
||||
from .. import time
|
||||
from ..decorators import syncify, while_alive
|
||||
|
||||
import types
|
||||
import inspect
|
||||
|
||||
|
||||
class MetaAnnotations(ABCMeta):
|
||||
"""This metaclass sets default values for agents based on class attributes"""
|
||||
def __new__(mcls, name, bases, namespace):
|
||||
defaults = {}
|
||||
|
||||
# Re-use defaults from inherited classes
|
||||
for i in bases:
|
||||
if isinstance(i, MetaAgent):
|
||||
defaults.update(i._defaults)
|
||||
|
||||
new_nmspc = {
|
||||
"_defaults": defaults,
|
||||
}
|
||||
|
||||
for attr, func in namespace.items():
|
||||
if (
|
||||
isinstance(func, types.FunctionType)
|
||||
or isinstance(func, property)
|
||||
or isinstance(func, classmethod)
|
||||
or attr[0] == "_"
|
||||
):
|
||||
new_nmspc[attr] = func
|
||||
elif attr == "defaults":
|
||||
defaults.update(func)
|
||||
elif inspect.isfunction(func):
|
||||
new_nmspc[attr] = func
|
||||
else:
|
||||
defaults[attr] = copy(func)
|
||||
|
||||
return super().__new__(mcls, name, bases, new_nmspc)
|
||||
|
||||
|
||||
class AutoAgent(ABCMeta):
|
||||
def __new__(mcls, name, bases, namespace):
|
||||
if "step" in namespace:
|
||||
func = namespace["step"]
|
||||
namespace["_orig_step"] = func
|
||||
if inspect.isfunction(func):
|
||||
if inspect.isgeneratorfunction(func) or inspect.iscoroutinefunction(func):
|
||||
func = syncify(func, method=True)
|
||||
namespace["step"] = while_alive(func)
|
||||
elif inspect.isasyncgenfunction(func):
|
||||
raise ValueError("Illegal step function: {}. It probably mixes both async/await and yield".format(func))
|
||||
else:
|
||||
raise ValueError("Illegal step function: {}".format(func))
|
||||
|
||||
# Add attributes for their use in the decorated functions
|
||||
return super().__new__(mcls, name, bases, namespace)
|
||||
|
||||
|
||||
class MetaAgent(AutoAgent, MetaAnnotations):
|
||||
"""This metaclass sets default values for agents based on class attributes"""
|
||||
pass
|
||||
|
@@ -1,12 +1,14 @@
|
||||
from . import BaseAgent
|
||||
from .. import environment
|
||||
|
||||
|
||||
class NetworkAgent(BaseAgent):
|
||||
def __init__(self, *args, topology=None, init=True, node_id=None, **kwargs):
|
||||
super().__init__(*args, init=False, **kwargs)
|
||||
assert isinstance(self.model, environment.NetworkEnvironment), "NetworkAgent requires a NetworkEnvironment"
|
||||
|
||||
self.G = topology or self.model.G
|
||||
assert self.G
|
||||
assert self.G is not None, "Network agents should have a network"
|
||||
if node_id is None:
|
||||
nodes = self.random.choices(list(self.G.nodes), k=len(self.G))
|
||||
for n_id in nodes:
|
||||
@@ -16,7 +18,7 @@ class NetworkAgent(BaseAgent):
|
||||
else:
|
||||
node_id = len(self.G)
|
||||
self.info(f"All nodes ({len(self.G)}) have an agent assigned, adding a new node to the graph for agent {self.unique_id}")
|
||||
self.G.add_node(node_id)
|
||||
self.G.add_node(node_id, find_unassigned=True)
|
||||
assert node_id is not None
|
||||
self.G.nodes[node_id]["agent"] = self
|
||||
self.node_id = node_id
|
||||
@@ -25,8 +27,6 @@ class NetworkAgent(BaseAgent):
|
||||
|
||||
def count_neighbors(self, state_id=None, **kwargs):
|
||||
return len(self.get_neighbors(state_id=state_id, **kwargs))
|
||||
if init:
|
||||
self.init()
|
||||
|
||||
def iter_neighbors(self, **kwargs):
|
||||
return self.iter_agents(limit_neighbors=True, **kwargs)
|
||||
|
139
soil/agents/view.py
Normal file
139
soil/agents/view.py
Normal file
@@ -0,0 +1,139 @@
|
||||
from collections.abc import Mapping, Set
|
||||
from itertools import islice
|
||||
from mesa import Agent
|
||||
|
||||
|
||||
class AgentView(Mapping, Set):
|
||||
"""A lazy-loaded list of agents."""
|
||||
|
||||
__slots__ = ("_agents", "agents_by_type")
|
||||
|
||||
def __init__(self, agents, agents_by_type):
|
||||
self._agents = agents
|
||||
self.agents_by_type = agents_by_type
|
||||
|
||||
def __getstate__(self):
|
||||
return {"_agents": self._agents}
|
||||
|
||||
def __setstate__(self, state):
|
||||
self._agents = state["_agents"]
|
||||
|
||||
# Mapping methods
|
||||
def __len__(self):
|
||||
return len(self._agents)
|
||||
|
||||
def __iter__(self):
|
||||
yield from self._agents.values()
|
||||
|
||||
def __getitem__(self, agent_id):
|
||||
if isinstance(agent_id, slice):
|
||||
raise ValueError(f"Slicing is not supported")
|
||||
if agent_id in self._agents:
|
||||
return self._agents[agent_id]
|
||||
raise ValueError(f"Agent {agent_id} not found")
|
||||
|
||||
def filter(self, agent_class=None, include_subclasses=True, **kwargs):
|
||||
if agent_class and self.agents_by_type:
|
||||
if not include_subclasses:
|
||||
return filter_agents(self.agents_by_type[agent_class],
|
||||
**kwargs)
|
||||
else:
|
||||
d = {}
|
||||
for k, v in self.agents_by_type.items():
|
||||
if (k == agent_class) or issubclass(k, agent_class):
|
||||
d.update(v)
|
||||
return filter_agents(d, **kwargs)
|
||||
return filter_agents(self._agents, agent_class=agent_class, **kwargs)
|
||||
|
||||
|
||||
def one(self, *args, **kwargs):
|
||||
try:
|
||||
return next(self.filter(*args, **kwargs))
|
||||
except StopIteration:
|
||||
return None
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
return list(self.filter(*args, **kwargs))
|
||||
|
||||
def __contains__(self, agent_id):
|
||||
if isinstance(agent_id, Agent):
|
||||
agent_id = agent_id.unique_id
|
||||
return agent_id in self._agents
|
||||
|
||||
def __str__(self):
|
||||
return str(list(unique_id for unique_id in self.keys()))
|
||||
|
||||
def __repr__(self):
|
||||
return f"{self.__class__.__name__}({self})"
|
||||
|
||||
|
||||
def filter_agents(
|
||||
agents: dict,
|
||||
*id_args,
|
||||
unique_id=None,
|
||||
state_id=None,
|
||||
agent_class=None,
|
||||
ignore=None,
|
||||
state=None,
|
||||
limit=None,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Filter agents given as a dict, by the criteria given as arguments (e.g., certain type or state id).
|
||||
"""
|
||||
assert isinstance(agents, dict)
|
||||
|
||||
ids = []
|
||||
|
||||
if unique_id is not None:
|
||||
if isinstance(unique_id, list):
|
||||
ids += unique_id
|
||||
else:
|
||||
ids.append(unique_id)
|
||||
|
||||
if id_args:
|
||||
ids += id_args
|
||||
|
||||
if ids:
|
||||
f = list(agents[aid] for aid in ids if aid in agents)
|
||||
else:
|
||||
f = agents.values()
|
||||
|
||||
if state_id is not None and not isinstance(state_id, (tuple, list)):
|
||||
state_id = tuple([state_id])
|
||||
|
||||
if ignore:
|
||||
f = filter(lambda x: x not in ignore, f)
|
||||
|
||||
if state_id is not None:
|
||||
f = filter(lambda agent: agent.get("state_id", None) in state_id, f)
|
||||
|
||||
if agent_class is not None:
|
||||
f = filter(lambda agent: isinstance(agent, agent_class), f)
|
||||
|
||||
state = state or dict()
|
||||
state.update(kwargs)
|
||||
|
||||
for k, vs in state.items():
|
||||
if not isinstance(vs, list):
|
||||
vs = [vs]
|
||||
f = filter(lambda agent: any(getattr(agent, k, None) == v for v in vs), f)
|
||||
|
||||
if limit is not None:
|
||||
f = islice(f, limit)
|
||||
|
||||
return AgentResult(f)
|
||||
|
||||
class AgentResult:
|
||||
def __init__(self, iterator):
|
||||
self.iterator = iterator
|
||||
|
||||
def limit(self, limit):
|
||||
self.iterator = islice(self.iterator, limit)
|
||||
return self
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.iterator)
|
||||
|
||||
def __next__(self):
|
||||
return next(self.iterator)
|
@@ -1,12 +1,11 @@
|
||||
import os
|
||||
import sys
|
||||
import sqlalchemy
|
||||
import pandas as pd
|
||||
from collections import namedtuple
|
||||
|
||||
def plot(env, agent_df=None, model_df=None, steps=False, ignore=["agent_count", ]):
|
||||
"""Plot the model dataframe and agent dataframe together."""
|
||||
if agent_df is None:
|
||||
agent_df = env.agent_df()
|
||||
if model_df is None:
|
||||
model_df = env.model_df()
|
||||
ignore = list(ignore)
|
||||
@@ -16,12 +15,22 @@ def plot(env, agent_df=None, model_df=None, steps=False, ignore=["agent_count",
|
||||
ignore.append("time")
|
||||
|
||||
ax = model_df.drop(ignore, axis='columns').plot();
|
||||
if agent_df is None:
|
||||
try:
|
||||
agent_df = env.agent_df()
|
||||
except UserWarning:
|
||||
print("No agent dataframe provided and no agent reporters found. "
|
||||
"Skipping agent plot.", file=sys.stderr)
|
||||
return
|
||||
if not agent_df.empty:
|
||||
agent_df.unstack().apply(lambda x: x.value_counts(),
|
||||
axis=1).fillna(0).plot(ax=ax, secondary_y=True);
|
||||
axis=1).fillna(0).plot(ax=ax,
|
||||
secondary_y=True)
|
||||
|
||||
|
||||
Results = namedtuple("Results", ["config", "parameters", "env", "agents"])
|
||||
#TODO implement reading from CSV and SQLITE
|
||||
#TODO implement reading from CSV
|
||||
|
||||
def read_sql(fpath=None, name=None, include_agents=False):
|
||||
if not (fpath is None) ^ (name is None):
|
||||
raise ValueError("Specify either a path or a simulation name")
|
||||
@@ -35,15 +44,10 @@ def read_sql(fpath=None, name=None, include_agents=False):
|
||||
with engine.connect() as conn:
|
||||
env = pd.read_sql_table("env", con=conn,
|
||||
index_col="step").reset_index().set_index([
|
||||
"simulation_id", "params_id",
|
||||
"iteration_id", "step"
|
||||
"params_id", "iteration_id", "step"
|
||||
])
|
||||
agents = pd.read_sql_table("agents", con=conn, index_col=["simulation_id", "params_id", "iteration_id", "step", "agent_id"])
|
||||
agents = pd.read_sql_table("agents", con=conn, index_col=["params_id", "iteration_id", "step", "agent_id"])
|
||||
config = pd.read_sql_table("configuration", con=conn, index_col="simulation_id")
|
||||
parameters = pd.read_sql_table("parameters", con=conn, index_col=["iteration_id", "params_id", "simulation_id"])
|
||||
try:
|
||||
parameters = parameters.pivot(columns="key", values="value")
|
||||
except Exception as e:
|
||||
print(f"warning: coult not pivot parameters: {e}")
|
||||
parameters = pd.read_sql_table("parameters", con=conn, index_col=["simulation_id", "params_id", "iteration_id"])
|
||||
|
||||
return Results(config, parameters, env, agents)
|
||||
|
@@ -9,7 +9,7 @@ class SoilCollector(MDC):
|
||||
if 'agent_count' not in model_reporters:
|
||||
model_reporters['agent_count'] = lambda m: m.schedule.get_agent_count()
|
||||
if 'time' not in model_reporters:
|
||||
model_reporters['time'] = lambda m: m.now
|
||||
model_reporters['time'] = lambda m: m.schedule.time
|
||||
# if 'state_id' not in agent_reporters:
|
||||
# agent_reporters['state_id'] = lambda agent: getattr(agent, 'state_id', None)
|
||||
|
||||
|
@@ -1,6 +1,42 @@
|
||||
from functools import wraps
|
||||
from .time import INFINITY
|
||||
|
||||
def report(f: property):
|
||||
if isinstance(f, property):
|
||||
setattr(f.fget, "add_to_report", True)
|
||||
else:
|
||||
setattr(f, "add_to_report", True)
|
||||
return f
|
||||
return f
|
||||
|
||||
|
||||
def syncify(func, method=True):
|
||||
_coroutine = None
|
||||
|
||||
@wraps(func)
|
||||
def wrapped(*args, **kwargs):
|
||||
if not method:
|
||||
nonlocal _coroutine
|
||||
else:
|
||||
_coroutine = getattr(args[0], "_coroutine", None)
|
||||
_coroutine = _coroutine or func(*args, **kwargs)
|
||||
try:
|
||||
val = _coroutine.send(None)
|
||||
except StopIteration as ex:
|
||||
_coroutine = None
|
||||
val = ex.value
|
||||
finally:
|
||||
if method:
|
||||
args[0]._coroutine = _coroutine
|
||||
return val
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
def while_alive(func):
|
||||
@wraps(func)
|
||||
def wrapped(self, *args, **kwargs):
|
||||
if self.alive:
|
||||
return func(self, *args, **kwargs)
|
||||
return INFINITY
|
||||
|
||||
return wrapped
|
@@ -1,22 +1,20 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import sqlite3
|
||||
import math
|
||||
import sys
|
||||
import logging
|
||||
import inspect
|
||||
from datetime import datetime
|
||||
|
||||
from typing import Any, Callable, Dict, Optional, Union, List, Type
|
||||
from collections import namedtuple
|
||||
from time import time as current_time
|
||||
from copy import deepcopy
|
||||
|
||||
from types import coroutine
|
||||
|
||||
import networkx as nx
|
||||
|
||||
from mesa import Model, Agent
|
||||
|
||||
from . import agents as agentmod, datacollection, serialization, utils, time, network, events
|
||||
from mesa import Model
|
||||
from time import time as current_time
|
||||
|
||||
from . import agents as agentmod, datacollection, utils, time, network, events
|
||||
|
||||
|
||||
# TODO: maybe add metaclass to read attributes of a model
|
||||
@@ -35,6 +33,9 @@ class BaseEnvironment(Model):
|
||||
"""
|
||||
|
||||
collector_class = datacollection.SoilCollector
|
||||
schedule_class = time.TimedActivation
|
||||
start_time = 0
|
||||
time_format = "%Y-%m-%d %H:%M:%S"
|
||||
|
||||
def __new__(cls,
|
||||
*args: Any,
|
||||
@@ -46,10 +47,10 @@ class BaseEnvironment(Model):
|
||||
tables: Optional[Any] = None,
|
||||
**kwargs: Any) -> Any:
|
||||
"""Create a new model with a default seed value"""
|
||||
seed = seed or str(current_time())
|
||||
self = super().__new__(cls, *args, seed=seed, **kwargs)
|
||||
self.dir_path = dir_path or os.getcwd()
|
||||
collector_class = collector_class or cls.collector_class
|
||||
collector_class = serialization.deserialize(collector_class)
|
||||
self.datacollector = collector_class(
|
||||
model_reporters=model_reporters,
|
||||
agent_reporters=agent_reporters,
|
||||
@@ -60,7 +61,7 @@ class BaseEnvironment(Model):
|
||||
if isinstance(v, property):
|
||||
v = v.fget
|
||||
if getattr(v, "add_to_report", False):
|
||||
self.add_model_reporter(k, v)
|
||||
self.add_model_reporter(k, k)
|
||||
|
||||
return self
|
||||
|
||||
@@ -70,13 +71,15 @@ class BaseEnvironment(Model):
|
||||
id="unnamed_env",
|
||||
seed="default",
|
||||
dir_path=None,
|
||||
schedule_class=time.TimedActivation,
|
||||
interval=1,
|
||||
schedule=None,
|
||||
schedule_class=None,
|
||||
logger = None,
|
||||
agents: Optional[Dict] = None,
|
||||
collector_class: type = datacollection.SoilCollector,
|
||||
agent_reporters: Optional[Any] = None,
|
||||
model_reporters: Optional[Any] = None,
|
||||
start_time=None,
|
||||
time_format=None,
|
||||
tables: Optional[Any] = None,
|
||||
init: bool = True,
|
||||
**env_params,
|
||||
@@ -93,14 +96,21 @@ class BaseEnvironment(Model):
|
||||
self.logger = logger
|
||||
else:
|
||||
self.logger = utils.logger.getChild(self.id)
|
||||
if start_time is not None:
|
||||
self.start_time = start_time
|
||||
if time_format is not None:
|
||||
self.time_format = time_format
|
||||
|
||||
if schedule_class is None:
|
||||
schedule_class = time.TimedActivation
|
||||
else:
|
||||
schedule_class = serialization.deserialize(schedule_class)
|
||||
if isinstance(self.start_time, str):
|
||||
self.start_time = datetime.strptime(self.start_time, self.time_format)
|
||||
if isinstance(self.start_time, datetime):
|
||||
self.start_time = self.start_time.timestamp()
|
||||
|
||||
self.interval = interval
|
||||
self.schedule = schedule_class(self)
|
||||
self.schedule = schedule
|
||||
if schedule is None:
|
||||
if schedule_class is None:
|
||||
schedule_class = self.schedule_class
|
||||
self.schedule = schedule_class(self, time=self.start_time)
|
||||
|
||||
for (k, v) in env_params.items():
|
||||
self[k] = v
|
||||
@@ -116,22 +126,23 @@ class BaseEnvironment(Model):
|
||||
|
||||
@property
|
||||
def agents(self):
|
||||
return agentmod.AgentView(self.schedule._agents)
|
||||
return agentmod.AgentView(self.schedule._agents, getattr(self.schedule, "agents_by_type", None))
|
||||
|
||||
def agent(self, *args, **kwargs):
|
||||
return agentmod.AgentView(self.schedule._agents).one(*args, **kwargs)
|
||||
return agentmod.AgentView(self.schedule._agents, self.schedule.agents_by_type).one(*args, **kwargs)
|
||||
|
||||
def count_agents(self, *args, **kwargs):
|
||||
return sum(1 for i in self.agents(*args, **kwargs))
|
||||
|
||||
|
||||
def agent_df(self, steps=False):
|
||||
df = self.datacollector.get_agent_vars_dataframe()
|
||||
df.index.rename(["step", "agent_id"], inplace=True)
|
||||
if steps:
|
||||
df.index.rename(["step", "agent_id"], inplace=True)
|
||||
return df
|
||||
df = df.reset_index()
|
||||
model_df = self.datacollector.get_model_vars_dataframe()
|
||||
df.index = df.index.set_levels(model_df.time, level=0).rename(["time", "agent_id"])
|
||||
return df
|
||||
df['time'] = df.apply(lambda row: model_df.loc[row.step].time, axis=1)
|
||||
return df.groupby(["time", "agent_id"]).last()
|
||||
|
||||
def model_df(self, steps=False):
|
||||
df = self.datacollector.get_model_vars_dataframe()
|
||||
@@ -142,7 +153,7 @@ class BaseEnvironment(Model):
|
||||
|
||||
@property
|
||||
def now(self):
|
||||
if self.schedule:
|
||||
if self.schedule is not None:
|
||||
return self.schedule.time
|
||||
raise Exception(
|
||||
"The environment has not been scheduled, so it has no sense of time"
|
||||
@@ -161,11 +172,15 @@ class BaseEnvironment(Model):
|
||||
if unique_id is None:
|
||||
unique_id = self.next_id()
|
||||
|
||||
a = serialization.deserialize(agent_class)(unique_id=unique_id, model=self, **agent)
|
||||
a = agent_class(unique_id=unique_id, model=self, **agent)
|
||||
|
||||
self.schedule.add(a)
|
||||
return a
|
||||
|
||||
def remove_agent(self, agent):
|
||||
agent.alive = False
|
||||
self.schedule.remove(agent)
|
||||
|
||||
def add_agents(self, agent_classes: List[type], k, weights: Optional[List[float]] = None, **kwargs):
|
||||
if isinstance(agent_classes, type):
|
||||
agent_classes = [agent_classes]
|
||||
@@ -194,32 +209,43 @@ class BaseEnvironment(Model):
|
||||
super().step()
|
||||
self.schedule.step()
|
||||
self.datacollector.collect(self)
|
||||
if self.now == time.INFINITY:
|
||||
self.running = False
|
||||
|
||||
if self.logger.isEnabledFor(logging.DEBUG):
|
||||
msg = "Model data:\n"
|
||||
max_width = max(len(k) for k in self.datacollector.model_vars.keys())
|
||||
for (k, v) in self.datacollector.model_vars.items():
|
||||
msg += f"\t{k:<{max_width}}: {v[-1]:>6}\n"
|
||||
# msg += f"\t{k:<{max_width}}"
|
||||
msg += f"\t{k:<{max_width}}: {v[-1]}\n"
|
||||
self.logger.debug(f"--- Steps: {self.schedule.steps:^5} - Time: {self.now:^5} --- " + msg)
|
||||
|
||||
def add_model_reporter(self, name, func=None):
|
||||
if not func:
|
||||
func = lambda env: getattr(env, name)
|
||||
func = name
|
||||
self.datacollector._new_model_reporter(name, func)
|
||||
|
||||
def add_agent_reporter(self, name, agent_type=None):
|
||||
def add_agent_reporter(self, name, reporter=None, agent_class=None, *, agent_type=None):
|
||||
if agent_type:
|
||||
reporter = lambda a: getattr(a, name) if isinstance(a, agent_type) else None
|
||||
else:
|
||||
reporter = lambda a: getattr(a, name, None)
|
||||
print("agent_type is deprecated, use agent_class instead", file=sys.stderr)
|
||||
agent_class = agent_type or agent_class
|
||||
if not reporter and not agent_class:
|
||||
reporter = name
|
||||
if agent_class:
|
||||
if reporter:
|
||||
_orig = reporter
|
||||
else:
|
||||
_orig = lambda a: getattr(a, name)
|
||||
reporter = lambda a: (_orig(a) if isinstance(a, agent_class) else None)
|
||||
self.datacollector._new_agent_reporter(name, reporter)
|
||||
|
||||
@classmethod
|
||||
def run(cls, *,
|
||||
name=None,
|
||||
iterations=1,
|
||||
num_processes=1, **kwargs):
|
||||
from .simulation import Simulation
|
||||
return Simulation(name=cls.__name__,
|
||||
return Simulation(name=name or cls.__name__,
|
||||
model=cls, iterations=iterations,
|
||||
num_processes=num_processes, **kwargs).run()
|
||||
|
||||
@@ -277,8 +303,6 @@ class NetworkEnvironment(BaseEnvironment):
|
||||
super().__init__(*args, **kwargs, init=False)
|
||||
|
||||
self.agent_class = agent_class
|
||||
if agent_class:
|
||||
self.agent_class = serialization.deserialize(agent_class)
|
||||
if self.agent_class:
|
||||
self.populate_network(self.agent_class)
|
||||
self._check_agent_nodes()
|
||||
@@ -297,6 +321,11 @@ class NetworkEnvironment(BaseEnvironment):
|
||||
self.G.nodes[node_id]["agent"] = a
|
||||
return a
|
||||
|
||||
def remove_agent(self, agent, remove_node=True):
|
||||
super().remove_agent(agent)
|
||||
if remove_node and hasattr(agent, "remove_node"):
|
||||
agent.remove_node()
|
||||
|
||||
def add_agents(self, *args, k=None, **kwargs):
|
||||
if not k and not self.G:
|
||||
raise ValueError("Cannot add agents to an empty network")
|
||||
@@ -308,7 +337,15 @@ class NetworkEnvironment(BaseEnvironment):
|
||||
elif path is not None:
|
||||
topology = network.from_topology(path, dir_path=self.dir_path)
|
||||
elif generator is not None:
|
||||
topology = network.from_params(generator=generator, dir_path=self.dir_path, **network_params)
|
||||
params = dict(generator=generator,
|
||||
dir_path=self.dir_path,
|
||||
seed=self.random,
|
||||
**network_params)
|
||||
try:
|
||||
topology = network.from_params(**params)
|
||||
except TypeError:
|
||||
del params["seed"]
|
||||
topology = network.from_params(**params)
|
||||
else:
|
||||
raise ValueError("topology must be a networkx.Graph or a string, or network_generator must be provided")
|
||||
self.G = topology
|
||||
@@ -326,15 +363,17 @@ class NetworkEnvironment(BaseEnvironment):
|
||||
if getattr(agent, "alive", True):
|
||||
yield agent
|
||||
|
||||
def add_node(self, agent_class, unique_id=None, node_id=None, **kwargs):
|
||||
def add_node(self, agent_class, unique_id=None, node_id=None, find_unassigned=False, **kwargs):
|
||||
if unique_id is None:
|
||||
unique_id = self.next_id()
|
||||
if node_id is None:
|
||||
node_id = network.find_unassigned(
|
||||
G=self.G, shuffle=True, random=self.random
|
||||
)
|
||||
if find_unassigned:
|
||||
node_id = network.find_unassigned(
|
||||
G=self.G, shuffle=True, random=self.random
|
||||
)
|
||||
if node_id is None:
|
||||
node_id = f"node_for_{unique_id}"
|
||||
node_id = f"Node_for_agent_{unique_id}"
|
||||
assert node_id not in self.G.nodes
|
||||
|
||||
if node_id not in self.G.nodes:
|
||||
self.G.add_node(node_id)
|
||||
@@ -404,27 +443,115 @@ class NetworkEnvironment(BaseEnvironment):
|
||||
|
||||
|
||||
class EventedEnvironment(BaseEnvironment):
|
||||
def broadcast(self, msg, sender=None, expiration=None, ttl=None, **kwargs):
|
||||
for agent in self.agents(**kwargs):
|
||||
if agent == sender:
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._inbox = dict()
|
||||
super().__init__(*args, **kwargs)
|
||||
self._can_reschedule = hasattr(self.schedule, "add_callback") and hasattr(self.schedule, "remove_callback")
|
||||
self._can_reschedule = True
|
||||
self._callbacks = {}
|
||||
|
||||
def register(self, agent):
|
||||
self._inbox[agent.unique_id] = []
|
||||
|
||||
def inbox_for(self, agent):
|
||||
try:
|
||||
return self._inbox[agent.unique_id]
|
||||
except KeyError:
|
||||
raise ValueError(f"Trying to access inbox for unregistered agent: {agent} (class: {type(agent)}). "
|
||||
"Make sure your agent is of type EventedAgent and it is registered with the environment.")
|
||||
|
||||
@coroutine
|
||||
def _polling_callback(self, agent, expiration, delay):
|
||||
# this wakes the agent up at every step. It is better to wait until timeout (or inf)
|
||||
# and if a message is received before that, reschedule the agent
|
||||
# (That is implemented in the `received` method)
|
||||
inbox = self.inbox_for(agent)
|
||||
while self.now < expiration:
|
||||
if inbox:
|
||||
return self.process_messages(inbox)
|
||||
yield time.Delay(delay)
|
||||
raise events.TimedOut("No message received")
|
||||
|
||||
@coroutine
|
||||
def received(self, agent, expiration=None, timeout=None, delay=1):
|
||||
if not expiration:
|
||||
if timeout:
|
||||
expiration = self.now + timeout
|
||||
else:
|
||||
expiration = float("inf")
|
||||
inbox = self.inbox_for(agent)
|
||||
if inbox:
|
||||
return self.process_messages(inbox)
|
||||
|
||||
if self._can_reschedule:
|
||||
checked = False
|
||||
def cb():
|
||||
nonlocal checked
|
||||
if checked:
|
||||
return time.INFINITY
|
||||
checked = True
|
||||
self.schedule.add_callback(self.now, agent.step)
|
||||
self.schedule.add_callback(expiration, cb)
|
||||
self._callbacks[agent.unique_id] = cb
|
||||
yield time.INFINITY
|
||||
res = yield from self._polling_callback(agent, expiration, delay)
|
||||
return res
|
||||
|
||||
|
||||
def tell(self, msg, recipient, sender=None, expiration=None, timeout=None, **kwargs):
|
||||
if expiration is None:
|
||||
expiration = float("inf") if timeout is None else self.now + timeout
|
||||
self._add_to_inbox(recipient.unique_id,
|
||||
events.Tell(timestamp=self.now,
|
||||
payload=msg,
|
||||
sender=sender,
|
||||
expiration=expiration,
|
||||
**kwargs))
|
||||
|
||||
def broadcast(self, msg, sender, ttl=None, expiration=None, agent_class=None):
|
||||
expiration = expiration if ttl is None else self.now + ttl
|
||||
# This only works for Soil environments. Mesa agents do not have an `agents` method
|
||||
sender_id = sender.unique_id
|
||||
for (agent_id, inbox) in self._inbox.items():
|
||||
if agent_id == sender_id:
|
||||
continue
|
||||
self.logger.debug(f"Telling {repr(agent)}: {msg} ttl={ttl}")
|
||||
try:
|
||||
inbox = agent._inbox
|
||||
except AttributeError:
|
||||
self.logger.info(
|
||||
f"Agent {agent.unique_id} cannot receive events because it does not have an inbox"
|
||||
)
|
||||
if agent_class and not isinstance(self.agents(unique_id=agent_id), agent_class):
|
||||
continue
|
||||
# Allow for AttributeError exceptions in this part of the code
|
||||
inbox.append(
|
||||
self.logger.debug(f"Telling {agent_id}: {msg} ttl={ttl}")
|
||||
self._add_to_inbox(agent_id,
|
||||
events.Tell(
|
||||
payload=msg,
|
||||
sender=sender,
|
||||
expiration=expiration if ttl is None else self.now + ttl,
|
||||
expiration=expiration,
|
||||
)
|
||||
)
|
||||
def _add_to_inbox(self, inbox_id, msg):
|
||||
self._inbox[inbox_id].append(msg)
|
||||
if inbox_id in self._callbacks:
|
||||
cb = self._callbacks.pop(inbox_id)
|
||||
cb()
|
||||
|
||||
@coroutine
|
||||
def ask(self, msg, recipient, sender=None, expiration=None, timeout=None, delay=1):
|
||||
ask = events.Ask(timestamp=self.now, payload=msg, sender=sender)
|
||||
self._add_to_inbox(recipient.unique_id, ask)
|
||||
expiration = float("inf") if timeout is None else self.now + timeout
|
||||
while self.now < expiration:
|
||||
if ask.reply:
|
||||
return ask.reply
|
||||
yield time.Delay(delay)
|
||||
raise events.TimedOut("No reply received")
|
||||
|
||||
def process_messages(self, inbox):
|
||||
valid = list()
|
||||
for msg in inbox:
|
||||
if msg.expired(self.now):
|
||||
continue
|
||||
valid.append(msg)
|
||||
inbox.clear()
|
||||
return valid
|
||||
|
||||
|
||||
class Environment(NetworkEnvironment, EventedEnvironment):
|
||||
"""Default environment class, has both network and event capabilities"""
|
||||
class Environment(EventedEnvironment, NetworkEnvironment):
|
||||
pass
|
||||
|
@@ -1,4 +1,3 @@
|
||||
from .time import BaseCond
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any
|
||||
from uuid import uuid4
|
||||
@@ -19,37 +18,18 @@ class Message:
|
||||
def expired(self, when):
|
||||
return self.expiration is not None and self.expiration < when
|
||||
|
||||
|
||||
class Reply(Message):
|
||||
source: Message
|
||||
|
||||
|
||||
class ReplyCond(BaseCond):
|
||||
def __init__(self, ask, *args, **kwargs):
|
||||
self._ask = ask
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def ready(self, agent, time):
|
||||
return self._ask.reply is not None or self._ask.expired(time)
|
||||
|
||||
def return_value(self, agent):
|
||||
if self._ask.expired(agent.now):
|
||||
raise TimedOut()
|
||||
return self._ask.reply
|
||||
|
||||
def __repr__(self):
|
||||
return f"ReplyCond({self._ask.id})"
|
||||
|
||||
|
||||
class Ask(Message):
|
||||
reply: Message = None
|
||||
|
||||
def replied(self, expiration=None):
|
||||
return ReplyCond(self)
|
||||
|
||||
|
||||
class Tell(Message):
|
||||
pass
|
||||
def __post_init__(self):
|
||||
assert self.sender is not None, "Tell requires a sender"
|
||||
|
||||
|
||||
|
||||
class TimedOut(Exception):
|
||||
|
@@ -1,12 +1,11 @@
|
||||
import os
|
||||
import sys
|
||||
from time import time as current_time
|
||||
from datetime import datetime
|
||||
from io import BytesIO
|
||||
from sqlalchemy import create_engine
|
||||
from textwrap import dedent, indent
|
||||
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import networkx as nx
|
||||
import pandas as pd
|
||||
|
||||
@@ -76,6 +75,13 @@ class Exporter:
|
||||
def iteration_end(self, env, params, params_id):
|
||||
"""Method to call when a iteration ends"""
|
||||
pass
|
||||
|
||||
def env_id(self, env):
|
||||
try:
|
||||
return env.id
|
||||
except AttributeError:
|
||||
return f"{env.__class__.__name__}_{current_time()}"
|
||||
|
||||
|
||||
def output(self, f, mode="w", **kwargs):
|
||||
if not self.dump:
|
||||
@@ -88,20 +94,21 @@ class Exporter:
|
||||
pass
|
||||
return open_or_reuse(f, mode=mode, backup=self.simulation.backup, **kwargs)
|
||||
|
||||
def get_dfs(self, env, **kwargs):
|
||||
def get_dfs(self, env, params_id, **kwargs):
|
||||
yield from get_dc_dfs(env.datacollector,
|
||||
simulation_id=self.simulation.id,
|
||||
iteration_id=env.id,
|
||||
params_id,
|
||||
iteration_id=self.env_id(env),
|
||||
**kwargs)
|
||||
|
||||
|
||||
def get_dc_dfs(dc, **kwargs):
|
||||
def get_dc_dfs(dc, params_id, **kwargs):
|
||||
dfs = {}
|
||||
dfe = dc.get_model_vars_dataframe()
|
||||
dfe.index.rename("step", inplace=True)
|
||||
dfs["env"] = dfe
|
||||
kwargs["params_id"] = params_id
|
||||
try:
|
||||
dfa = dc.get_agent_vars_dataframe()
|
||||
dfa = dc.get_agent_vars_dataframe()
|
||||
dfa.index.rename(["step", "agent_id"], inplace=True)
|
||||
dfs["agents"] = dfa
|
||||
except UserWarning:
|
||||
@@ -110,9 +117,13 @@ def get_dc_dfs(dc, **kwargs):
|
||||
dfs[table_name] = dc.get_table_dataframe(table_name)
|
||||
for (name, df) in dfs.items():
|
||||
for (k, v) in kwargs.items():
|
||||
df[k] = v
|
||||
df.set_index(["simulation_id", "iteration_id"], append=True, inplace=True)
|
||||
|
||||
if v:
|
||||
df[k] = v
|
||||
else:
|
||||
df[k] = pd.Series(dtype="object")
|
||||
df.reset_index(inplace=True)
|
||||
df.set_index(["params_id", "iteration_id"], inplace=True)
|
||||
|
||||
yield from dfs.items()
|
||||
|
||||
|
||||
@@ -124,21 +135,28 @@ class SQLite(Exporter):
|
||||
if not self.dump:
|
||||
logger.debug("NOT dumping results")
|
||||
return
|
||||
|
||||
from sqlalchemy import create_engine
|
||||
|
||||
self.dbpath = os.path.join(self.outdir, f"{self.simulation.name}.sqlite")
|
||||
logger.info("Dumping results to %s", self.dbpath)
|
||||
if self.simulation.backup:
|
||||
try_backup(self.dbpath, remove=True)
|
||||
|
||||
|
||||
if self.simulation.overwrite:
|
||||
if os.path.exists(self.dbpath):
|
||||
os.remove(self.dbpath)
|
||||
|
||||
|
||||
outdir = os.path.dirname(self.dbpath)
|
||||
if outdir and not os.path.exists(outdir):
|
||||
os.makedirs(outdir)
|
||||
|
||||
self.engine = create_engine(f"sqlite:///{self.dbpath}", echo=False)
|
||||
|
||||
sim_dict = {k: serialize(v)[0] for (k,v) in self.simulation.to_dict().items()}
|
||||
sim_dict["simulation_id"] = self.simulation.id
|
||||
df = pd.DataFrame([sim_dict])
|
||||
df.to_sql("configuration", con=self.engine, if_exists="append")
|
||||
df.reset_index().to_sql("configuration", con=self.engine, if_exists="append", index=False)
|
||||
|
||||
def iteration_end(self, env, params, params_id, *args, **kwargs):
|
||||
if not self.dump:
|
||||
@@ -146,17 +164,30 @@ class SQLite(Exporter):
|
||||
return
|
||||
|
||||
with timer(
|
||||
"Dumping simulation {} iteration {}".format(self.simulation.name, env.id)
|
||||
"Dumping simulation {} iteration {}".format(self.simulation.name, self.env_id(env))
|
||||
):
|
||||
|
||||
pd.DataFrame([{"simulation_id": self.simulation.id,
|
||||
d = {"simulation_id": self.simulation.id,
|
||||
"params_id": params_id,
|
||||
"iteration_id": env.id,
|
||||
"key": k,
|
||||
"value": serialize(v)[0]} for (k,v) in params.items()]).to_sql("parameters", con=self.engine, if_exists="append")
|
||||
"iteration_id": self.env_id(env),
|
||||
}
|
||||
for (k,v) in params.items():
|
||||
d[k] = serialize(v)[0]
|
||||
|
||||
pd.DataFrame([d]).reset_index().to_sql("parameters",
|
||||
con=self.engine,
|
||||
if_exists="append",
|
||||
index=False)
|
||||
pd.DataFrame([{
|
||||
"simulation_id": self.simulation.id,
|
||||
"params_id": params_id,
|
||||
"iteration_id": self.env_id(env),
|
||||
}]).reset_index().to_sql("iterations",
|
||||
con=self.engine,
|
||||
if_exists="append",
|
||||
index=False)
|
||||
|
||||
for (t, df) in self.get_dfs(env, params_id=params_id):
|
||||
df.to_sql(t, con=self.engine, if_exists="append")
|
||||
df.reset_index().to_sql(t, con=self.engine, if_exists="append", index=False)
|
||||
|
||||
class csv(Exporter):
|
||||
"""Export the state of each environment (and its agents) a CSV file for the simulation"""
|
||||
@@ -167,15 +198,14 @@ class csv(Exporter):
|
||||
def iteration_end(self, env, params, params_id, *args, **kwargs):
|
||||
with timer(
|
||||
"[CSV] Dumping simulation {} iteration {} @ dir {}".format(
|
||||
self.simulation.name, env.id, self.outdir
|
||||
self.simulation.name, self.env_id(env), self.outdir
|
||||
)
|
||||
):
|
||||
for (df_name, df) in self.get_dfs(env, params_id=params_id):
|
||||
with self.output("{}.{}.csv".format(env.id, df_name), mode="a") as f:
|
||||
with self.output("{}.{}.csv".format(self.env_id(env), df_name), mode="a") as f:
|
||||
df.to_csv(f)
|
||||
|
||||
|
||||
# TODO: reimplement GEXF exporting without history
|
||||
class gexf(Exporter):
|
||||
def iteration_end(self, env, *args, **kwargs):
|
||||
if not self.dump:
|
||||
@@ -183,11 +213,10 @@ class gexf(Exporter):
|
||||
return
|
||||
|
||||
with timer(
|
||||
"[GEXF] Dumping simulation {} iteration {}".format(self.simulation.name, env.id)
|
||||
"[GEXF] Dumping simulation {} iteration {}".format(self.simulation.name, self.env_id(env))
|
||||
):
|
||||
with self.output("{}.gexf".format(env.id), mode="wb") as f:
|
||||
network.dump_gexf(env.history_to_graph(), f)
|
||||
self.dump_gexf(env, f)
|
||||
with self.output("{}.gexf".format(self.env_id(env)), mode="wb") as f:
|
||||
nx.write_gexf(env.G, f)
|
||||
|
||||
|
||||
class dummy(Exporter):
|
||||
@@ -210,6 +239,7 @@ class dummy(Exporter):
|
||||
|
||||
class graphdrawing(Exporter):
|
||||
def iteration_end(self, env, *args, **kwargs):
|
||||
import matplotlib.pyplot as plt
|
||||
# Outside effects
|
||||
f = plt.figure()
|
||||
nx.draw(
|
||||
@@ -219,16 +249,16 @@ class graphdrawing(Exporter):
|
||||
pos=nx.spring_layout(env.G, scale=100),
|
||||
ax=f.add_subplot(111),
|
||||
)
|
||||
with open("graph-{}.png".format(env.id)) as f:
|
||||
with open("graph-{}.png".format(self.env_id(env))) as f:
|
||||
f.savefig(f)
|
||||
|
||||
|
||||
class summary(Exporter):
|
||||
"""Print a summary of each iteration to sys.stdout"""
|
||||
|
||||
def iteration_end(self, env, *args, **kwargs):
|
||||
def iteration_end(self, env, params_id, *args, **kwargs):
|
||||
msg = ""
|
||||
for (t, df) in self.get_dfs(env):
|
||||
for (t, df) in self.get_dfs(env, params_id):
|
||||
if not len(df):
|
||||
continue
|
||||
tabs = "\t" * 2
|
||||
@@ -262,21 +292,5 @@ class YAML(Exporter):
|
||||
logger.info(f"Dumping simulation configuration to {self.outdir}")
|
||||
f.write(self.simulation.to_yaml())
|
||||
|
||||
class default(Exporter):
|
||||
"""Default exporter. Writes sqlite results, as well as the simulation YAML"""
|
||||
|
||||
def __init__(self, *args, exporter_cls=[], **kwargs):
|
||||
exporter_cls = exporter_cls or [YAML, SQLite]
|
||||
self.inner = [cls(*args, **kwargs) for cls in exporter_cls]
|
||||
|
||||
def sim_start(self, *args, **kwargs):
|
||||
for exporter in self.inner:
|
||||
exporter.sim_start(*args, **kwargs)
|
||||
|
||||
def sim_end(self, *args, **kwargs):
|
||||
for exporter in self.inner:
|
||||
exporter.sim_end(*args, **kwargs)
|
||||
|
||||
def iteration_end(self, *args, **kwargs):
|
||||
for exporter in self.inner:
|
||||
exporter.iteration_end(*args, **kwargs)
|
||||
default = SQLite
|
@@ -8,6 +8,8 @@ import importlib.machinery, importlib.util
|
||||
from glob import glob
|
||||
from itertools import product, chain
|
||||
|
||||
from contextlib import contextmanager
|
||||
|
||||
import yaml
|
||||
import networkx as nx
|
||||
|
||||
@@ -101,7 +103,13 @@ def load_config(cfg):
|
||||
yield from load_files(cfg)
|
||||
|
||||
|
||||
builtins = importlib.import_module("builtins")
|
||||
_BUILTINS = None
|
||||
|
||||
def builtins():
|
||||
global _BUILTINS
|
||||
if not _BUILTINS:
|
||||
_BUILTINS = importlib.import_module("builtins")
|
||||
return _BUILTINS
|
||||
|
||||
KNOWN_MODULES = {
|
||||
'soil': None,
|
||||
@@ -110,12 +118,12 @@ KNOWN_MODULES = {
|
||||
|
||||
MODULE_FILES = {}
|
||||
|
||||
def add_source_file(file):
|
||||
def _add_source_file(file):
|
||||
"""Add a file to the list of known modules"""
|
||||
file = os.path.abspath(file)
|
||||
if file in MODULE_FILES:
|
||||
logger.warning(f"File {file} already added as module {MODULE_FILES[file]}. Reloading")
|
||||
remove_source_file(file)
|
||||
_remove_source_file(file)
|
||||
modname = f"imported_module_{len(MODULE_FILES)}"
|
||||
loader = importlib.machinery.SourceFileLoader(modname, file)
|
||||
spec = importlib.util.spec_from_loader(loader.name, loader)
|
||||
@@ -124,7 +132,7 @@ def add_source_file(file):
|
||||
MODULE_FILES[file] = modname
|
||||
KNOWN_MODULES[modname] = my_module
|
||||
|
||||
def remove_source_file(file):
|
||||
def _remove_source_file(file):
|
||||
"""Remove a file from the list of known modules"""
|
||||
file = os.path.abspath(file)
|
||||
modname = None
|
||||
@@ -134,6 +142,18 @@ def remove_source_file(file):
|
||||
except KeyError as ex:
|
||||
raise ValueError(f"File {file} had not been added as a module: {ex}")
|
||||
|
||||
|
||||
@contextmanager
|
||||
def with_source(file=None):
|
||||
"""Add a file to the list of known modules, and remove it afterwards"""
|
||||
if file:
|
||||
_add_source_file(file)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if file:
|
||||
_remove_source_file(file)
|
||||
|
||||
def get_module(modname):
|
||||
"""Get a module from the list of known modules"""
|
||||
if modname not in KNOWN_MODULES or KNOWN_MODULES[modname] is None:
|
||||
@@ -149,7 +169,7 @@ def name(value, known_modules=KNOWN_MODULES):
|
||||
if not isinstance(value, type): # Get the class name first
|
||||
value = type(value)
|
||||
tname = value.__name__
|
||||
if hasattr(builtins, tname):
|
||||
if hasattr(builtins(), tname):
|
||||
return tname
|
||||
modname = value.__module__
|
||||
if modname == "__main__":
|
||||
@@ -164,7 +184,7 @@ def name(value, known_modules=KNOWN_MODULES):
|
||||
|
||||
|
||||
def serializer(type_):
|
||||
if type_ != "str" and hasattr(builtins, type_):
|
||||
if type_ != "str":
|
||||
return repr
|
||||
return lambda x: x
|
||||
|
||||
@@ -202,8 +222,8 @@ def deserializer(type_, known_modules=KNOWN_MODULES):
|
||||
return lambda x="": x
|
||||
if type_ == "None":
|
||||
return lambda x=None: None
|
||||
if hasattr(builtins, type_): # Check if it's a builtin type
|
||||
cls = getattr(builtins, type_)
|
||||
if hasattr(builtins(), type_): # Check if it's a builtin type
|
||||
cls = getattr(builtins(), type_)
|
||||
return lambda x=None: ast.literal_eval(x) if x is not None else cls()
|
||||
match = IS_CLASS.match(type_)
|
||||
if match:
|
||||
|
@@ -23,7 +23,7 @@ import json
|
||||
|
||||
|
||||
from . import serialization, exporters, utils, basestring, agents
|
||||
from .environment import Environment
|
||||
from . import environment
|
||||
from .utils import logger, run_and_return_exceptions
|
||||
from .debugging import set_trace
|
||||
|
||||
@@ -44,13 +44,11 @@ def do_not_run():
|
||||
|
||||
def _iter_queued():
|
||||
while _QUEUED:
|
||||
(cls, params) = _QUEUED.pop(0)
|
||||
yield replace(cls, parameters=params)
|
||||
slf = _QUEUED.pop(0)
|
||||
yield slf
|
||||
|
||||
|
||||
# TODO: change documentation for simulation
|
||||
# TODO: rename iterations to iterations
|
||||
# TODO: make parameters a dict of iterable/any
|
||||
@dataclass
|
||||
class Simulation:
|
||||
"""
|
||||
@@ -68,7 +66,6 @@ class Simulation:
|
||||
dir_path: The directory path to use for the simulation.
|
||||
max_time: The maximum time to run the simulation.
|
||||
max_steps: The maximum number of steps to run the simulation.
|
||||
interval: The interval to use for the simulation.
|
||||
iterations: The number of iterations (times) to run the simulation.
|
||||
num_processes: The number of processes to use for the simulation. If greater than one, simulations will be performed in parallel. This may make debugging and error handling difficult.
|
||||
tables: The tables to use in the simulation datacollector
|
||||
@@ -96,7 +93,6 @@ class Simulation:
|
||||
dir_path: str = field(default_factory=lambda: os.getcwd())
|
||||
max_time: float = None
|
||||
max_steps: int = None
|
||||
interval: int = 1
|
||||
iterations: int = 1
|
||||
num_processes: Optional[int] = 1
|
||||
exporters: Optional[List[str]] = field(default_factory=lambda: [exporters.default])
|
||||
@@ -119,46 +115,26 @@ class Simulation:
|
||||
self.logger = logger.getChild(self.name)
|
||||
self.logger.setLevel(self.level)
|
||||
|
||||
if self.source_file:
|
||||
source_file = self.source_file
|
||||
if not os.path.isabs(source_file):
|
||||
source_file = os.path.abspath(os.path.join(self.dir_path, source_file))
|
||||
serialization.add_source_file(source_file)
|
||||
self.source_file = source_file
|
||||
if self.source_file and (not os.path.isabs(self.source_file)):
|
||||
self.source_file = os.path.abspath(os.path.join(self.dir_path, self.source_file))
|
||||
with serialization.with_source(self.source_file):
|
||||
|
||||
if isinstance(self.model, str):
|
||||
self.model = serialization.deserialize(self.model)
|
||||
if isinstance(self.model, str):
|
||||
self.model = serialization.deserialize(self.model)
|
||||
|
||||
def deserialize_reporters(reporters):
|
||||
for (k, v) in reporters.items():
|
||||
if isinstance(v, str) and v.startswith("py:"):
|
||||
reporters[k] = serialization.deserialize(v.split(":", 1)[1])
|
||||
return reporters
|
||||
|
||||
self.agent_reporters = deserialize_reporters(self.agent_reporters)
|
||||
self.model_reporters = deserialize_reporters(self.model_reporters)
|
||||
self.tables = deserialize_reporters(self.tables)
|
||||
if self.source_file:
|
||||
serialization.remove_source_file(self.source_file)
|
||||
self.id = f"{self.name}_{current_time()}"
|
||||
self.agent_reporters = self.agent_reporters
|
||||
self.model_reporters = self.model_reporters
|
||||
self.tables = self.tables
|
||||
self.id = f"{self.name}_{current_time()}"
|
||||
|
||||
def run(self, **kwargs):
|
||||
"""Run the simulation and return the list of resulting environments"""
|
||||
if kwargs:
|
||||
return replace(self, **kwargs).run()
|
||||
res = replace(self, **kwargs)
|
||||
return res.run()
|
||||
|
||||
self.logger.debug(
|
||||
dedent(
|
||||
"""
|
||||
Simulation:
|
||||
---
|
||||
"""
|
||||
)
|
||||
+ self.to_yaml()
|
||||
)
|
||||
param_combinations = self._collect_params(**kwargs)
|
||||
if _AVOID_RUNNING:
|
||||
_QUEUED.extend((self, param) for param in param_combinations)
|
||||
_QUEUED.append(self)
|
||||
return []
|
||||
|
||||
self.logger.debug("Using exporters: %s", self.exporters or [])
|
||||
@@ -178,6 +154,8 @@ class Simulation:
|
||||
for exporter in exporters:
|
||||
exporter.sim_start()
|
||||
|
||||
param_combinations = self._collect_params(**kwargs)
|
||||
|
||||
for params in tqdm(param_combinations, desc=self.name, unit="configuration"):
|
||||
for (k, v) in params.items():
|
||||
tqdm.write(f"{k} = {v}")
|
||||
@@ -217,10 +195,7 @@ class Simulation:
|
||||
):
|
||||
"""Run the simulation and yield the resulting environments."""
|
||||
|
||||
try:
|
||||
if self.source_file:
|
||||
serialization.add_source_file(self.source_file)
|
||||
|
||||
with serialization.with_source(self.source_file):
|
||||
with utils.timer(f"running for config {params}"):
|
||||
if self.dry_run:
|
||||
def func(*args, **kwargs):
|
||||
@@ -231,15 +206,13 @@ class Simulation:
|
||||
for env in tqdm(utils.run_parallel(
|
||||
func=func,
|
||||
iterable=range(self.iterations),
|
||||
num_processes=self.num_processes,
|
||||
**params,
|
||||
), total=self.iterations, leave=False):
|
||||
if env is None and self.dry_run:
|
||||
continue
|
||||
|
||||
yield env
|
||||
finally:
|
||||
if self.source_file:
|
||||
serialization.remove_source_file(self.source_file)
|
||||
|
||||
def _get_env(self, iteration_id, params):
|
||||
"""Create an environment for a iteration of the simulation"""
|
||||
@@ -255,7 +228,6 @@ class Simulation:
|
||||
id=iteration_id,
|
||||
seed=f"{self.seed}_iteration_{iteration_id}",
|
||||
dir_path=self.dir_path,
|
||||
interval=self.interval,
|
||||
logger=self.logger.getChild(iteration_id),
|
||||
agent_reporters=agent_reporters,
|
||||
model_reporters=model_reporters,
|
||||
@@ -369,9 +341,13 @@ def iter_from_py(pyfile, module_name='imported_file', **kwargs):
|
||||
sims.append(sim)
|
||||
for sim in _iter_queued():
|
||||
sims.append(sim)
|
||||
# Try to find environments to run, because we did not import a script that ran simulations
|
||||
if not sims:
|
||||
for (_name, sim) in inspect.getmembers(module, lambda x: inspect.isclass(x) and issubclass(x, Simulation)):
|
||||
sims.append(sim(**kwargs))
|
||||
for (_name, env) in inspect.getmembers(module,
|
||||
lambda x: inspect.isclass(x) and
|
||||
issubclass(x, environment.Environment) and
|
||||
(getattr(x, "__module__", None) != environment.__name__)):
|
||||
sims.append(Simulation(model=env, **kwargs))
|
||||
del sys.modules[module_name]
|
||||
assert not _AVOID_RUNNING
|
||||
if not sims:
|
||||
|
393
soil/time.py
393
soil/time.py
@@ -1,11 +1,14 @@
|
||||
from mesa.time import BaseScheduler
|
||||
from queue import Empty
|
||||
from heapq import heappush, heappop, heapreplace
|
||||
from collections import deque, defaultdict
|
||||
import math
|
||||
import logging
|
||||
|
||||
from inspect import getsource
|
||||
from numbers import Number
|
||||
from textwrap import dedent
|
||||
import random as random_std
|
||||
|
||||
from .utils import logger
|
||||
from mesa import Agent as MesaAgent
|
||||
@@ -13,194 +16,282 @@ from mesa import Agent as MesaAgent
|
||||
|
||||
INFINITY = float("inf")
|
||||
|
||||
class Delay:
|
||||
"""A delay object which can be used both as a return value and as an awaitable (in async code)."""
|
||||
__slots__ = ("delta", )
|
||||
def __init__(self, delta):
|
||||
self.delta = float(delta)
|
||||
|
||||
def __float__(self):
|
||||
return self.delta
|
||||
|
||||
def __eq__(self, other):
|
||||
return float(self) == float(other)
|
||||
|
||||
def __await__(self):
|
||||
return (yield self.delta)
|
||||
|
||||
class When:
|
||||
def __init__(self, when):
|
||||
raise Exception("The use of When is deprecated. Use the `Agent.at` and `Agent.delay` methods instead")
|
||||
|
||||
class Delta:
|
||||
def __init__(self, delta):
|
||||
raise Exception("The use of Delay is deprecated. Use the `Agent.at` and `Agent.delay` methods instead")
|
||||
|
||||
class DeadAgent(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class When:
|
||||
def __init__(self, time):
|
||||
if isinstance(time, When):
|
||||
return time
|
||||
self._time = time
|
||||
|
||||
def abs(self, time):
|
||||
return self._time
|
||||
|
||||
def schedule_next(self, time, delta, first=False):
|
||||
return (self._time, None)
|
||||
|
||||
|
||||
NEVER = When(INFINITY)
|
||||
|
||||
|
||||
class Delta(When):
|
||||
def __init__(self, delta):
|
||||
self._delta = delta
|
||||
|
||||
def abs(self, time):
|
||||
return self._time + self._delta
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, Delta):
|
||||
return self._delta == other._delta
|
||||
return False
|
||||
|
||||
def schedule_next(self, time, delta, first=False):
|
||||
return (time + self._delta, None)
|
||||
class Event(object):
|
||||
def __init__(self, when: float, func, order=1):
|
||||
self.when = when
|
||||
self.func = func
|
||||
self.order = order
|
||||
|
||||
def __repr__(self):
|
||||
return str(f"Delta({self._delta})")
|
||||
return f'Event @ {self.when} - Func: {self.func}'
|
||||
|
||||
def __lt__(self, other):
|
||||
return (self.when < other.when) or (self.when == other.when and self.order < other.order)
|
||||
|
||||
|
||||
class BaseCond:
|
||||
def __init__(self, msg=None, delta=None, eager=False):
|
||||
self._msg = msg
|
||||
self._delta = delta
|
||||
self.eager = eager
|
||||
|
||||
def schedule_next(self, time, delta, first=False):
|
||||
if first and self.eager:
|
||||
return (time, self)
|
||||
if self._delta:
|
||||
delta = self._delta
|
||||
return (time + delta, self)
|
||||
|
||||
def return_value(self, agent):
|
||||
return None
|
||||
|
||||
def __repr__(self):
|
||||
return self._msg or self.__class__.__name__
|
||||
|
||||
|
||||
class Cond(BaseCond):
|
||||
def __init__(self, func, *args, **kwargs):
|
||||
self._func = func
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def ready(self, agent, time):
|
||||
return self._func(agent)
|
||||
|
||||
def __repr__(self):
|
||||
if self._msg:
|
||||
return self._msg
|
||||
return str(f'Cond("{dedent(getsource(self._func)).strip()}")')
|
||||
|
||||
|
||||
class TimedActivation(BaseScheduler):
|
||||
"""A scheduler which activates each agent when the agent requests.
|
||||
In each activation, each agent will update its 'next_time'.
|
||||
class PQueueSchedule:
|
||||
"""
|
||||
A scheduler which activates each function with a delay returned by the function at each step.
|
||||
If no delay is returned, a default of 1 is used.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, shuffle=True, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self._next = {}
|
||||
def __init__(self, shuffle=True, seed=None, time=0, **kwargs):
|
||||
self._queue = []
|
||||
self._shuffle = shuffle
|
||||
# self.step_interval = getattr(self.model, "interval", 1)
|
||||
self.step_interval = self.model.interval
|
||||
self.logger = getattr(self.model, "logger", logger).getChild(f"time_{ self.model }")
|
||||
self.time = time
|
||||
self.steps = 0
|
||||
self.random = random_std.Random(seed)
|
||||
self.next_time = self.time
|
||||
|
||||
def add(self, agent: MesaAgent, when=None):
|
||||
def insert(self, when, callback, replace=False):
|
||||
if when is None:
|
||||
when = self.time
|
||||
elif isinstance(when, When):
|
||||
when = when.abs()
|
||||
|
||||
self._schedule(agent, None, when)
|
||||
super().add(agent)
|
||||
|
||||
def _schedule(self, agent, condition=None, when=None, replace=False):
|
||||
if condition:
|
||||
if not when:
|
||||
when, condition = condition.schedule_next(
|
||||
when or self.time, self.step_interval
|
||||
)
|
||||
else:
|
||||
if when is None:
|
||||
when = self.time + self.step_interval
|
||||
condition = None
|
||||
when = float(when)
|
||||
order = 1
|
||||
if self._shuffle:
|
||||
key = (when, self.model.random.random(), condition)
|
||||
else:
|
||||
key = (when, agent.unique_id, condition)
|
||||
self._next[agent.unique_id] = key
|
||||
order = self.random.random()
|
||||
event = Event(when, callback, order=order)
|
||||
if replace:
|
||||
heapreplace(self._queue, (key, agent))
|
||||
heapreplace(self._queue, event)
|
||||
else:
|
||||
heappush(self._queue, (key, agent))
|
||||
heappush(self._queue, event)
|
||||
|
||||
def remove(self, callback):
|
||||
for i, event in enumerate(self._queue):
|
||||
if callback == event.func:
|
||||
del self._queue[i]
|
||||
break
|
||||
|
||||
def __len__(self):
|
||||
return len(self._queue)
|
||||
|
||||
def step(self) -> None:
|
||||
"""
|
||||
Executes events in order, one at a time. After each step,
|
||||
an event will signal when it wants to be scheduled next.
|
||||
"""
|
||||
|
||||
if self.time == INFINITY:
|
||||
return
|
||||
|
||||
next_time = INFINITY
|
||||
|
||||
now = self.time
|
||||
|
||||
while self._queue:
|
||||
event = self._queue[0]
|
||||
when = event.when
|
||||
if when > now:
|
||||
next_time = when
|
||||
break
|
||||
|
||||
when = event.func()
|
||||
when = float(when) if when is not None else 1.0
|
||||
|
||||
if when == INFINITY:
|
||||
heappop(self._queue)
|
||||
continue
|
||||
|
||||
when += now
|
||||
|
||||
self.insert(when, event.func, replace=True)
|
||||
|
||||
self.steps += 1
|
||||
|
||||
self.time = next_time
|
||||
|
||||
if next_time == INFINITY:
|
||||
self.time = INFINITY
|
||||
return
|
||||
|
||||
|
||||
class Schedule:
|
||||
def __init__(self, shuffle=True, seed=None, time=0, **kwargs):
|
||||
self._queue = deque()
|
||||
self._shuffle = shuffle
|
||||
self.time = time
|
||||
self.steps = 0
|
||||
self.random = random_std.Random(seed)
|
||||
self.next_time = self.time
|
||||
|
||||
def _find_loc(self, when=None):
|
||||
if when is None:
|
||||
when = self.time
|
||||
else:
|
||||
when = float(when)
|
||||
when = when or self.time
|
||||
pos = len(self._queue)
|
||||
for (ix, l) in enumerate(self._queue):
|
||||
if l[0] == when:
|
||||
return l[1]
|
||||
if l[0] > when:
|
||||
pos = ix
|
||||
break
|
||||
lst = []
|
||||
self._queue.insert(pos, (when, lst))
|
||||
return lst
|
||||
|
||||
def insert(self, when, func, replace=False):
|
||||
if when == INFINITY:
|
||||
return
|
||||
lst = self._find_loc(when)
|
||||
lst.append(func)
|
||||
|
||||
def add_bulk(self, funcs, when=None):
|
||||
lst = self._find_loc(when)
|
||||
n = len(funcs)
|
||||
#TODO: remove for performance
|
||||
before = len(self)
|
||||
lst.extend(funcs)
|
||||
assert len(self) == before + n
|
||||
|
||||
def remove(self, func):
|
||||
for bucket in self._queue:
|
||||
for (ix, e) in enumerate(bucket):
|
||||
if e == func:
|
||||
bucket.remove(ix)
|
||||
return
|
||||
|
||||
def __len__(self):
|
||||
return sum(len(bucket[1]) for bucket in self._queue)
|
||||
|
||||
def step(self) -> None:
|
||||
"""
|
||||
Executes events in order, one at a time. After each step,
|
||||
an event will signal when it wants to be scheduled next.
|
||||
"""
|
||||
if not self._queue:
|
||||
return
|
||||
|
||||
now = self.time
|
||||
|
||||
next_time = self._queue[0][0]
|
||||
|
||||
if next_time > now:
|
||||
self.time = next_time
|
||||
return
|
||||
|
||||
bucket = self._queue.popleft()[1]
|
||||
if self._shuffle:
|
||||
self.random.shuffle(bucket)
|
||||
next_batch = defaultdict(list)
|
||||
for func in bucket:
|
||||
when = func()
|
||||
when = float(when) if when is not None else 1
|
||||
|
||||
if when == INFINITY:
|
||||
continue
|
||||
|
||||
when += now
|
||||
next_batch[when].append(func)
|
||||
|
||||
for (when, bucket) in next_batch.items():
|
||||
self.add_bulk(bucket, when)
|
||||
|
||||
self.steps += 1
|
||||
if self._queue:
|
||||
self.time = self._queue[0][0]
|
||||
else:
|
||||
self.time = INFINITY
|
||||
|
||||
|
||||
class InnerActivation(BaseScheduler):
|
||||
inner_class = Schedule
|
||||
|
||||
def __init__(self, model, shuffle=True, time=0, **kwargs):
|
||||
self.model = model
|
||||
self.logger = getattr(self.model, "logger", logger).getChild(f"time_{ self.model }")
|
||||
self._agents = {}
|
||||
self.agents_by_type = defaultdict(dict)
|
||||
self.inner = self.inner_class(shuffle=shuffle, seed=self.model._seed, time=time)
|
||||
|
||||
@property
|
||||
def steps(self):
|
||||
return self.inner.steps
|
||||
|
||||
@property
|
||||
def time(self):
|
||||
return self.inner.time
|
||||
|
||||
def add(self, agent: MesaAgent, when=None):
|
||||
when = when or self.inner.time
|
||||
self.inner.insert(when, agent.step)
|
||||
agent_class = type(agent)
|
||||
self.agents_by_type[agent_class][agent.unique_id] = agent
|
||||
super().add(agent)
|
||||
|
||||
def add_callback(self, when, cb):
|
||||
self.inner.insert(when, cb)
|
||||
|
||||
def remove_callback(self, when, cb):
|
||||
self.inner.remove(cb)
|
||||
|
||||
def remove(self, agent):
|
||||
del self._agents[agent.unique_id]
|
||||
del self.agents_by_type[type(agent)][agent.unique_id]
|
||||
self.inner.remove(agent.step)
|
||||
|
||||
def step(self) -> None:
|
||||
"""
|
||||
Executes agents in order, one at a time. After each step,
|
||||
an agent will signal when it wants to be scheduled next.
|
||||
"""
|
||||
self.inner.step()
|
||||
|
||||
self.logger.debug(f"Simulation step {self.time}")
|
||||
if not self.model.running or self.time == INFINITY:
|
||||
return
|
||||
def __len__(self):
|
||||
return len(self.inner)
|
||||
|
||||
self.logger.debug(f"Queue length: %s", len(self._queue))
|
||||
|
||||
while self._queue:
|
||||
((when, _id, cond), agent) = self._queue[0]
|
||||
if when > self.time:
|
||||
break
|
||||
class BucketTimedActivation(InnerActivation):
|
||||
inner_class = Schedule
|
||||
|
||||
if cond:
|
||||
if not cond.ready(agent, self.time):
|
||||
self._schedule(agent, cond, replace=True)
|
||||
continue
|
||||
try:
|
||||
agent._last_return = cond.return_value(agent)
|
||||
except Exception as ex:
|
||||
agent._last_except = ex
|
||||
else:
|
||||
agent._last_return = None
|
||||
agent._last_except = None
|
||||
|
||||
self.logger.debug("Stepping agent %s", agent)
|
||||
self._next.pop(agent.unique_id, None)
|
||||
class PQueueActivation(InnerActivation):
|
||||
inner_class = PQueueSchedule
|
||||
|
||||
try:
|
||||
returned = agent.step()
|
||||
except DeadAgent:
|
||||
agent.alive = False
|
||||
heappop(self._queue)
|
||||
continue
|
||||
|
||||
# Check status for MESA agents
|
||||
if not getattr(agent, "alive", True):
|
||||
heappop(self._queue)
|
||||
continue
|
||||
#Set the bucket implementation as default
|
||||
TimedActivation = BucketTimedActivation
|
||||
|
||||
if returned:
|
||||
next_check = returned.schedule_next(
|
||||
self.time, self.step_interval, first=True
|
||||
)
|
||||
self._schedule(agent, when=next_check[0], condition=next_check[1], replace=True)
|
||||
else:
|
||||
next_check = (self.time + self.step_interval, None)
|
||||
try:
|
||||
from soilent.soilent import BucketScheduler, PQueueScheduler
|
||||
|
||||
self._schedule(agent, replace=True)
|
||||
class SoilentActivation(InnerActivation):
|
||||
inner_class = BucketScheduler
|
||||
class SoilentPQueueActivation(InnerActivation):
|
||||
inner_class = PQueueScheduler
|
||||
|
||||
self.steps += 1
|
||||
|
||||
if not self._queue:
|
||||
self.model.running = False
|
||||
self.time = INFINITY
|
||||
return
|
||||
|
||||
next_time = self._queue[0][0][0]
|
||||
|
||||
if next_time < self.time:
|
||||
raise Exception(
|
||||
f"An agent has been scheduled for a time in the past, there is probably an error ({when} < {self.time})"
|
||||
)
|
||||
self.logger.debug("Updating time step: %s -> %s ", self.time, next_time)
|
||||
|
||||
self.time = next_time
|
||||
# TimedActivation = SoilentBucketActivation
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
class ShuffledTimedActivation(TimedActivation):
|
||||
|
@@ -93,15 +93,12 @@ def flatten_dict(d):
|
||||
|
||||
def _flatten_dict(d, prefix=""):
|
||||
if not isinstance(d, dict):
|
||||
# print('END:', prefix, d)
|
||||
yield prefix, d
|
||||
return
|
||||
if prefix:
|
||||
prefix = prefix + "."
|
||||
for k, v in d.items():
|
||||
# print(k, v)
|
||||
res = list(_flatten_dict(v, prefix="{}{}".format(prefix, k)))
|
||||
# print('RES:', res)
|
||||
yield from res
|
||||
|
||||
|
||||
@@ -142,6 +139,7 @@ def run_and_return_exceptions(func, *args, **kwargs):
|
||||
|
||||
def run_parallel(func, iterable, num_processes=1, **kwargs):
|
||||
if num_processes > 1 and not os.environ.get("SOIL_DEBUG", None):
|
||||
logger.info("Running simulations in {} processes".format(num_processes))
|
||||
if num_processes < 1:
|
||||
num_processes = cpu_count() - num_processes
|
||||
p = Pool(processes=num_processes)
|
||||
@@ -157,4 +155,24 @@ def run_parallel(func, iterable, num_processes=1, **kwargs):
|
||||
|
||||
|
||||
def int_seed(seed: str):
|
||||
return int.from_bytes(seed.encode(), "little")
|
||||
return int.from_bytes(seed.encode(), "little")
|
||||
|
||||
|
||||
def prob(prob, random):
|
||||
"""
|
||||
A true/False uniform distribution with a given probability.
|
||||
To be used like this:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
if prob(0.3):
|
||||
do_something()
|
||||
|
||||
"""
|
||||
r = random.random()
|
||||
return r < prob
|
||||
|
||||
|
||||
def custom(cls, **kwargs):
|
||||
"""Create a new class from a template class and keyword arguments"""
|
||||
return type(cls.__name__, (cls,), kwargs)
|
@@ -2,3 +2,6 @@ pytest
|
||||
pytest-profiling
|
||||
scipy>=1.3
|
||||
tornado
|
||||
nbconvert==7.3.1
|
||||
nbformat==5.8.0
|
||||
jupyter==1.0.0
|
@@ -1,7 +1,7 @@
|
||||
from unittest import TestCase
|
||||
import pytest
|
||||
|
||||
from soil import agents, environment
|
||||
from soil import agents, events, environment
|
||||
from soil import time as stime
|
||||
|
||||
|
||||
@@ -17,16 +17,17 @@ class TestAgents(TestCase):
|
||||
"""The last step of a dead agent should return time.INFINITY"""
|
||||
d = Dead(unique_id=0, model=environment.Environment())
|
||||
ret = d.step()
|
||||
assert ret == stime.NEVER
|
||||
assert ret == stime.INFINITY
|
||||
|
||||
def test_die_raises_exception(self):
|
||||
"""A dead agent should raise an exception if it is stepped after death"""
|
||||
"""A dead agent should continue returning INFINITY after death"""
|
||||
d = Dead(unique_id=0, model=environment.Environment())
|
||||
assert d.alive
|
||||
d.step()
|
||||
assert not d.alive
|
||||
with pytest.raises(stime.DeadAgent):
|
||||
d.step()
|
||||
when = float(d.step())
|
||||
assert not d.alive
|
||||
assert when == stime.INFINITY
|
||||
|
||||
def test_agent_generator(self):
|
||||
"""
|
||||
@@ -52,23 +53,73 @@ class TestAgents(TestCase):
|
||||
|
||||
def test_state_decorator(self):
|
||||
class MyAgent(agents.FSM):
|
||||
run = 0
|
||||
times_run = 0
|
||||
|
||||
@agents.state("original", default=True)
|
||||
def root(self):
|
||||
self.run += 1
|
||||
return self.other
|
||||
|
||||
@agents.state
|
||||
def other(self):
|
||||
self.run += 1
|
||||
self.times_run += 1
|
||||
|
||||
assert MyAgent.other.id == "other"
|
||||
e = environment.Environment()
|
||||
a = e.add_agent(MyAgent)
|
||||
e.step()
|
||||
assert a.run == 1
|
||||
assert a.times_run == 0
|
||||
a.step()
|
||||
print("DONE")
|
||||
assert a.times_run == 1
|
||||
assert a.state_id == MyAgent.other.id
|
||||
a.step()
|
||||
assert a.times_run == 2
|
||||
|
||||
def test_state_decorator_multiple(self):
|
||||
class MyAgent(agents.FSM):
|
||||
times_run = 0
|
||||
|
||||
@agents.state(default=True)
|
||||
def one(self):
|
||||
return self.two
|
||||
|
||||
@agents.state
|
||||
def two(self):
|
||||
return self.one
|
||||
|
||||
e = environment.Environment()
|
||||
first = e.add_agent(MyAgent, state_id=MyAgent.one)
|
||||
second = e.add_agent(MyAgent, state_id=MyAgent.two)
|
||||
assert first.state_id == MyAgent.one.id
|
||||
assert second.state_id == MyAgent.two.id
|
||||
e.step()
|
||||
assert first.state_id == MyAgent.two.id
|
||||
assert second.state_id == MyAgent.one.id
|
||||
|
||||
def test_state_decorator_multiple_async(self):
|
||||
class MyAgent(agents.FSM):
|
||||
times_run = 0
|
||||
|
||||
@agents.state(default=True)
|
||||
def one(self):
|
||||
yield self.delay(1)
|
||||
return self.two
|
||||
|
||||
@agents.state
|
||||
def two(self):
|
||||
yield self.delay(1)
|
||||
return self.one
|
||||
|
||||
e = environment.Environment()
|
||||
first = e.add_agent(MyAgent, state_id=MyAgent.one)
|
||||
second = e.add_agent(MyAgent, state_id=MyAgent.two)
|
||||
for i in range(2):
|
||||
assert first.state_id == MyAgent.one.id
|
||||
assert second.state_id == MyAgent.two.id
|
||||
e.step()
|
||||
for i in range(2):
|
||||
assert first.state_id == MyAgent.two.id
|
||||
assert second.state_id == MyAgent.one.id
|
||||
e.step()
|
||||
|
||||
def test_broadcast(self):
|
||||
"""
|
||||
@@ -77,30 +128,28 @@ class TestAgents(TestCase):
|
||||
"""
|
||||
|
||||
class BCast(agents.Evented):
|
||||
pings_received = 0
|
||||
pings_received = []
|
||||
|
||||
def step(self):
|
||||
print(self.model.broadcast)
|
||||
try:
|
||||
self.model.broadcast("PING")
|
||||
except Exception as ex:
|
||||
print(ex)
|
||||
async def step(self):
|
||||
self.broadcast("PING")
|
||||
print("PING sent")
|
||||
while True:
|
||||
self.check_messages()
|
||||
yield
|
||||
msgs = await self.received()
|
||||
self.pings_received += msgs
|
||||
|
||||
def on_receive(self, msg, sender=None):
|
||||
self.pings_received += 1
|
||||
e = environment.Environment()
|
||||
|
||||
e = environment.EventedEnvironment()
|
||||
|
||||
for i in range(10):
|
||||
num_agents = 10
|
||||
for i in range(num_agents):
|
||||
e.add_agent(agent_class=BCast)
|
||||
e.step()
|
||||
pings_received = lambda: [a.pings_received for a in e.agents]
|
||||
assert sorted(pings_received()) == list(range(1, 11))
|
||||
# Agents are executed in order, so the first agent should have not received any messages
|
||||
pings_received = lambda: [len(a.pings_received) for a in e.agents]
|
||||
assert sorted(pings_received()) == list(range(0, num_agents))
|
||||
e.step()
|
||||
assert all(x == 10 for x in pings_received())
|
||||
# After the second step, every agent should have received a broadcast from every other agent
|
||||
received = pings_received()
|
||||
assert all(x == (num_agents - 1) for x in received)
|
||||
|
||||
def test_ask_messages(self):
|
||||
"""
|
||||
@@ -132,23 +181,22 @@ class TestAgents(TestCase):
|
||||
while True:
|
||||
if pongs or not pings: # First agent, or anyone after that
|
||||
pings.append(self.now)
|
||||
response = yield target.ask("PING")
|
||||
response = yield from target.ask("PING")
|
||||
responses.append(response)
|
||||
else:
|
||||
print("NOT sending ping")
|
||||
print("Checking msgs")
|
||||
# Do not block if we have already received a PING
|
||||
if not self.check_messages():
|
||||
yield self.received()
|
||||
print("done")
|
||||
msgs = yield from self.received()
|
||||
for ping in msgs:
|
||||
if ping.payload == "PING":
|
||||
ping.reply = "PONG"
|
||||
pongs.append(self.now)
|
||||
else:
|
||||
raise Exception("This should never happen")
|
||||
|
||||
def on_receive(self, msg, sender=None):
|
||||
if msg == "PING":
|
||||
pongs.append(self.now)
|
||||
return "PONG"
|
||||
raise Exception("This should never happen")
|
||||
|
||||
e = environment.EventedEnvironment(schedule_class=stime.OrderedTimedActivation)
|
||||
e = environment.Environment(schedule_class=stime.OrderedTimedActivation)
|
||||
for i in range(2):
|
||||
e.add_agent(agent_class=Ping)
|
||||
assert e.now == 0
|
||||
@@ -174,4 +222,303 @@ class TestAgents(TestCase):
|
||||
assert len(ev) == 1
|
||||
assert ev[0].unique_id == 1
|
||||
null = list(e.agents(unique_ids=[0, 1], agent_class=agents.NetworkAgent))
|
||||
assert not null
|
||||
assert not null
|
||||
|
||||
def test_agent_return(self):
|
||||
'''
|
||||
An agent should be able to cycle through different states and control when it
|
||||
should be awaken.
|
||||
'''
|
||||
class TestAgent(agents.Agent):
|
||||
@agents.state(default=True)
|
||||
def one(self):
|
||||
return self.two
|
||||
|
||||
@agents.state
|
||||
def two(self):
|
||||
return self.three.at(10)
|
||||
|
||||
@agents.state
|
||||
def three(self):
|
||||
return self.four.delay(1)
|
||||
|
||||
@agents.state
|
||||
def four(self):
|
||||
yield self.delay(2)
|
||||
return self.five.delay(3)
|
||||
|
||||
@agents.state
|
||||
def five(self):
|
||||
return self.delay(1)
|
||||
|
||||
model = environment.Environment()
|
||||
a = model.add_agent(TestAgent)
|
||||
assert a.state_id == TestAgent.one.id
|
||||
assert a.now == 0
|
||||
model.step()
|
||||
assert a.state_id == TestAgent.two.id
|
||||
assert a.now == 1
|
||||
model.step()
|
||||
assert a.state_id == TestAgent.three.id
|
||||
assert a.now == 10
|
||||
model.step()
|
||||
assert a.state_id == TestAgent.four.id
|
||||
assert a.now == 11
|
||||
model.step()
|
||||
assert a.state_id == TestAgent.four.id
|
||||
assert a.now == 13
|
||||
model.step()
|
||||
assert a.state_id == TestAgent.five.id
|
||||
assert a.now == 16
|
||||
model.step()
|
||||
assert a.state_id == TestAgent.five.id
|
||||
assert a.now == 17
|
||||
|
||||
def test_agent_async(self):
|
||||
'''
|
||||
Async functions should also be valid states.
|
||||
'''
|
||||
|
||||
class TestAgent(agents.Agent):
|
||||
@agents.state(default=True)
|
||||
def one(self):
|
||||
return self.two
|
||||
|
||||
@agents.state
|
||||
def two(self):
|
||||
return self.three.at(10)
|
||||
|
||||
@agents.state
|
||||
def three(self):
|
||||
return self.four.delay(1)
|
||||
|
||||
@agents.state
|
||||
async def four(self):
|
||||
await self.delay(2)
|
||||
return self.five.delay(3)
|
||||
|
||||
@agents.state
|
||||
def five(self):
|
||||
return self.delay(1)
|
||||
|
||||
model = environment.Environment()
|
||||
a = model.add_agent(TestAgent)
|
||||
assert a.now == 0
|
||||
assert a.state_id == TestAgent.one.id
|
||||
model.step()
|
||||
assert a.now == 1
|
||||
assert a.state_id == TestAgent.two.id
|
||||
model.step()
|
||||
assert a.now == 10
|
||||
assert a.state_id == TestAgent.three.id
|
||||
model.step()
|
||||
assert a.state_id == TestAgent.four.id
|
||||
assert a.now == 11
|
||||
model.step()
|
||||
assert a.state_id == TestAgent.four.id
|
||||
assert a.now == 13
|
||||
model.step()
|
||||
assert a.state_id == TestAgent.five.id
|
||||
assert a.now == 16
|
||||
model.step()
|
||||
assert a.state_id == TestAgent.five.id
|
||||
assert a.now == 17
|
||||
|
||||
def test_agent_return_step(self):
|
||||
'''
|
||||
The same result as the previous test should be achievable by manually
|
||||
handling the agent state.
|
||||
'''
|
||||
class TestAgent(agents.Agent):
|
||||
my_state = 1
|
||||
my_count = 0
|
||||
|
||||
def step(self):
|
||||
if self.my_state == 1:
|
||||
self.my_state = 2
|
||||
return None
|
||||
elif self.my_state == 2:
|
||||
self.my_state = 3
|
||||
return self.at(10)
|
||||
elif self.my_state == 3:
|
||||
self.my_state = 4
|
||||
self.my_count = 0
|
||||
return self.delay(1)
|
||||
elif self.my_state == 4:
|
||||
self.my_count += 1
|
||||
if self.my_count == 1:
|
||||
return self.delay(2)
|
||||
self.my_state = 5
|
||||
return self.delay(3)
|
||||
elif self.my_state == 5:
|
||||
return self.delay(1)
|
||||
|
||||
model = environment.Environment()
|
||||
a = model.add_agent(TestAgent)
|
||||
assert a.my_state == 1
|
||||
assert a.now == 0
|
||||
model.step()
|
||||
assert a.now == 1
|
||||
assert a.my_state == 2
|
||||
model.step()
|
||||
assert a.now == 10
|
||||
assert a.my_state == 3
|
||||
model.step()
|
||||
assert a.now == 11
|
||||
assert a.my_state == 4
|
||||
model.step()
|
||||
assert a.now == 13
|
||||
assert a.my_state == 4
|
||||
model.step()
|
||||
assert a.now == 16
|
||||
assert a.my_state == 5
|
||||
model.step()
|
||||
assert a.now == 17
|
||||
assert a.my_state == 5
|
||||
|
||||
def test_agent_return_step_async(self):
|
||||
'''
|
||||
The same result as the previous test should be achievable by manually
|
||||
handling the agent state.
|
||||
'''
|
||||
class TestAgent(agents.Agent):
|
||||
my_state = 1
|
||||
|
||||
async def step(self):
|
||||
self.my_state = 2
|
||||
await self.delay()
|
||||
self.my_state = 3
|
||||
await self.at(10)
|
||||
self.my_state = 4
|
||||
await self.delay(1)
|
||||
await self.delay(2)
|
||||
self.my_state = 5
|
||||
await self.delay(3)
|
||||
while True:
|
||||
await self.delay(1)
|
||||
|
||||
model = environment.Environment()
|
||||
a = model.add_agent(TestAgent)
|
||||
assert a.my_state == 1
|
||||
assert a.now == 0
|
||||
model.step()
|
||||
assert a.now == 1
|
||||
assert a.my_state == 2
|
||||
model.step()
|
||||
assert a.now == 10
|
||||
assert a.my_state == 3
|
||||
model.step()
|
||||
assert a.now == 11
|
||||
assert a.my_state == 4
|
||||
model.step()
|
||||
assert a.now == 13
|
||||
assert a.my_state == 4
|
||||
model.step()
|
||||
assert a.now == 16
|
||||
assert a.my_state == 5
|
||||
model.step()
|
||||
assert a.now == 17
|
||||
assert a.my_state == 5
|
||||
|
||||
def test_receive(self):
|
||||
'''
|
||||
An agent should be able to receive a message after waiting
|
||||
'''
|
||||
model = environment.Environment()
|
||||
class TestAgent(agents.Agent):
|
||||
sent = False
|
||||
woken = 0
|
||||
def step(self):
|
||||
self.woken += 1
|
||||
return super().step()
|
||||
|
||||
@agents.state(default=True)
|
||||
async def one(self):
|
||||
try:
|
||||
self.sent = await self.received(timeout=15)
|
||||
return self.two.at(20)
|
||||
except events.TimedOut:
|
||||
pass
|
||||
@agents.state
|
||||
def two(self):
|
||||
return self.die()
|
||||
|
||||
a = model.add_agent(TestAgent)
|
||||
|
||||
class Sender(agents.Agent):
|
||||
async def step(self):
|
||||
await self.delay(10)
|
||||
a.tell(1)
|
||||
return stime.INFINITY
|
||||
|
||||
b = model.add_agent(Sender)
|
||||
|
||||
# Start and wait
|
||||
model.step()
|
||||
assert model.now == 10
|
||||
assert a.woken == 1
|
||||
assert not a.sent
|
||||
|
||||
# Sending the message
|
||||
model.step()
|
||||
assert model.now == 10
|
||||
assert a.woken == 1
|
||||
assert not a.sent
|
||||
|
||||
# The receiver callback
|
||||
model.step()
|
||||
assert model.now == 15
|
||||
assert a.woken == 2
|
||||
assert a.sent[0].payload == 1
|
||||
|
||||
# The timeout
|
||||
model.step()
|
||||
assert model.now == 20
|
||||
assert a.woken == 2
|
||||
|
||||
# The last state of the agent
|
||||
model.step()
|
||||
assert a.woken == 3
|
||||
assert model.now == float('inf')
|
||||
|
||||
def test_receive_timeout(self):
|
||||
'''
|
||||
A timeout should be raised if no messages are received after an expiration time
|
||||
'''
|
||||
model = environment.Environment()
|
||||
timedout = False
|
||||
class TestAgent(agents.Agent):
|
||||
@agents.state(default=True)
|
||||
def one(self):
|
||||
try:
|
||||
yield from self.received(timeout=10)
|
||||
raise AssertionError('Should have raised an error.')
|
||||
except events.TimedOut:
|
||||
nonlocal timedout
|
||||
timedout = True
|
||||
|
||||
a = model.add_agent(TestAgent)
|
||||
|
||||
model.step()
|
||||
assert model.now == 10
|
||||
model.step()
|
||||
# Wake up the callback
|
||||
assert model.now == 10
|
||||
assert not timedout
|
||||
# The actual timeout
|
||||
model.step()
|
||||
assert model.now == 11
|
||||
assert timedout
|
||||
|
||||
def test_attributes(self):
|
||||
"""Attributes should be individual per agent"""
|
||||
|
||||
class MyAgent(agents.Agent):
|
||||
my_attribute = 0
|
||||
|
||||
model = environment.Environment()
|
||||
a = MyAgent(model=model)
|
||||
assert a.my_attribute == 0
|
||||
b = MyAgent(model=model, my_attribute=1)
|
||||
assert b.my_attribute == 1
|
||||
assert a.my_attribute == 0
|
||||
|
@@ -29,15 +29,12 @@ class TestConfig(TestCase):
|
||||
def test_torvalds_config(self):
|
||||
sim = simulation.from_config(os.path.join(ROOT, "test_config.yml"))
|
||||
MAX_STEPS = 10
|
||||
INTERVAL = 2
|
||||
assert sim.interval == INTERVAL
|
||||
assert sim.max_steps == MAX_STEPS
|
||||
envs = sim.run()
|
||||
assert len(envs) == 1
|
||||
env = envs[0]
|
||||
assert env.interval == 2
|
||||
assert env.count_agents() == 3
|
||||
assert env.now == INTERVAL * MAX_STEPS
|
||||
assert env.now == MAX_STEPS
|
||||
|
||||
|
||||
def make_example_test(path, cfg):
|
||||
|
@@ -1,5 +1,4 @@
|
||||
---
|
||||
source_file: "../examples/torvalds_sim.py"
|
||||
model: "TorvaldsEnv"
|
||||
max_steps: 10
|
||||
interval: 2
|
||||
max_steps: 10
|
@@ -81,14 +81,15 @@ class Exporters(TestCase):
|
||||
model=ConstantEnv,
|
||||
name="exporter_sim",
|
||||
exporters=[
|
||||
exporters.default,
|
||||
exporters.YAML,
|
||||
exporters.SQLite,
|
||||
exporters.csv,
|
||||
],
|
||||
exporter_params={"copy_to": output},
|
||||
parameters=dict(
|
||||
network_generator="complete_graph",
|
||||
network_params={"n": n_nodes},
|
||||
agent_class="CounterModel",
|
||||
agent_class=agents.CounterModel,
|
||||
agent_reporters={"times": "times"},
|
||||
),
|
||||
max_time=max_time,
|
||||
|
18
tests/test_ipython.py
Normal file
18
tests/test_ipython.py
Normal file
@@ -0,0 +1,18 @@
|
||||
from unittest import TestCase
|
||||
import os
|
||||
import nbformat
|
||||
from nbconvert.preprocessors import ExecutePreprocessor
|
||||
|
||||
ROOT = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
class TestNotebooks(TestCase):
|
||||
def test_tutorial(self):
|
||||
notebook = os.path.join(ROOT, "../docs/tutorial/soil_tutorial.ipynb")
|
||||
with open(notebook) as f:
|
||||
nb = nbformat.read(f, as_version=4)
|
||||
ep = ExecutePreprocessor(timeout=60000)
|
||||
try:
|
||||
assert ep.preprocess(nb) is not None, f"Got empty notebook for {notebook}"
|
||||
except Exception:
|
||||
assert False, f"Failed executing {notebook}"
|
||||
|
@@ -6,9 +6,7 @@ import networkx as nx
|
||||
from functools import partial
|
||||
|
||||
from os.path import join
|
||||
from soil import simulation, Environment, agents, network, serialization, utils, config, from_file
|
||||
from soil.time import Delta
|
||||
|
||||
from soil import simulation, Environment, agents, serialization, from_file, time
|
||||
from mesa import Agent as MesaAgent
|
||||
|
||||
ROOT = os.path.abspath(os.path.dirname(__file__))
|
||||
@@ -114,7 +112,6 @@ class TestMain(TestCase):
|
||||
def test_serialize_class(self):
|
||||
ser, name = serialization.serialize(agents.BaseAgent, known_modules=[])
|
||||
assert name == "soil.agents.BaseAgent"
|
||||
assert ser == agents.BaseAgent
|
||||
|
||||
ser, name = serialization.serialize(
|
||||
agents.BaseAgent,
|
||||
@@ -123,11 +120,9 @@ class TestMain(TestCase):
|
||||
],
|
||||
)
|
||||
assert name == "BaseAgent"
|
||||
assert ser == agents.BaseAgent
|
||||
|
||||
ser, name = serialization.serialize(CustomAgent)
|
||||
assert name == "test_main.CustomAgent"
|
||||
assert ser == CustomAgent
|
||||
pickle.dumps(ser)
|
||||
|
||||
def test_serialize_builtin_types(self):
|
||||
@@ -140,9 +135,9 @@ class TestMain(TestCase):
|
||||
|
||||
def test_serialize_agent_class(self):
|
||||
"""A class from soil.agents should be serialized without the module part"""
|
||||
ser = agents._serialize_type(CustomAgent)
|
||||
ser = serialization.serialize(CustomAgent, known_modules=["soil.agents"])[1]
|
||||
assert ser == "test_main.CustomAgent"
|
||||
ser = agents._serialize_type(agents.BaseAgent)
|
||||
ser = serialization.serialize(agents.BaseAgent, known_modules=["soil.agents"])[1]
|
||||
assert ser == "BaseAgent"
|
||||
pickle.dumps(ser)
|
||||
|
||||
@@ -168,7 +163,6 @@ class TestMain(TestCase):
|
||||
|
||||
def test_fsm(self):
|
||||
"""Basic state change"""
|
||||
|
||||
class ToggleAgent(agents.FSM):
|
||||
@agents.default_state
|
||||
@agents.state
|
||||
@@ -193,17 +187,17 @@ class TestMain(TestCase):
|
||||
@agents.default_state
|
||||
@agents.state
|
||||
def ping(self):
|
||||
return self.pong, 2
|
||||
return self.pong.delay(2)
|
||||
|
||||
@agents.state
|
||||
def pong(self):
|
||||
return self.ping
|
||||
|
||||
a = ToggleAgent(unique_id=1, model=Environment())
|
||||
when = a.step()
|
||||
when = float(a.step())
|
||||
assert when == 2
|
||||
when = a.step()
|
||||
assert when == Delta(a.interval)
|
||||
assert when == None
|
||||
|
||||
def test_load_sim(self):
|
||||
"""Make sure at least one of the examples can be loaded"""
|
||||
@@ -232,4 +226,87 @@ class TestMain(TestCase):
|
||||
assert len(configs) == len(a) * len(b)
|
||||
for i in a:
|
||||
for j in b:
|
||||
assert {"a": i, "b": j} in configs
|
||||
assert {"a": i, "b": j} in configs
|
||||
|
||||
def test_agent_reporters(self):
|
||||
"""An environment should be able to set its own reporters"""
|
||||
class Noop2(agents.Noop):
|
||||
pass
|
||||
|
||||
e = Environment()
|
||||
e.add_agent(agents.Noop)
|
||||
e.add_agent(Noop2)
|
||||
e.add_agent_reporter("now")
|
||||
e.add_agent_reporter("base", lambda a: "base", agent_class=agents.Noop)
|
||||
e.add_agent_reporter("subclass", lambda a:"subclass", agent_class=Noop2)
|
||||
e.step()
|
||||
|
||||
# Step 0 is not present because we added the reporters
|
||||
# after initialization.
|
||||
df = e.agent_df()
|
||||
assert "now" in df.columns
|
||||
assert "base" in df.columns
|
||||
assert "subclass" in df.columns
|
||||
assert df["now"][(1,0)] == 1
|
||||
assert df["now"][(1,1)] == 1
|
||||
assert df["base"][(1,0)] == "base"
|
||||
assert df["base"][(1,1)] == "base"
|
||||
assert df["subclass"][(1,0)] is None
|
||||
assert df["subclass"][(1,1)] == "subclass"
|
||||
|
||||
def test_remove_agent(self):
|
||||
"""An agent that is scheduled should be removed from the schedule"""
|
||||
model = Environment()
|
||||
model.add_agent(agents.Noop)
|
||||
model.step()
|
||||
model.remove_agent(model.agents[0])
|
||||
assert not model.agents
|
||||
when = model.step()
|
||||
assert when == None
|
||||
assert not model.running
|
||||
|
||||
def test_remove_agent(self):
|
||||
"""An agent that is scheduled should be removed from the schedule"""
|
||||
|
||||
allagents = []
|
||||
class Removed(agents.BaseAgent):
|
||||
def step(self):
|
||||
nonlocal allagents
|
||||
assert self.alive
|
||||
assert self in self.model.agents
|
||||
for agent in allagents:
|
||||
self.model.remove_agent(agent)
|
||||
|
||||
model = Environment()
|
||||
a1 = model.add_agent(Removed)
|
||||
a2 = model.add_agent(Removed)
|
||||
allagents = [a1, a2]
|
||||
model.step()
|
||||
assert not model.agents
|
||||
|
||||
def test_agent_df(self):
|
||||
'''The agent dataframe should have the right columns'''
|
||||
|
||||
class PeterPan(agents.BaseAgent):
|
||||
steps = 0
|
||||
|
||||
def step(self):
|
||||
self.steps += 1
|
||||
return self.delay(0)
|
||||
|
||||
class AgentDF(Environment):
|
||||
def init(self):
|
||||
self.add_agent(PeterPan)
|
||||
self.add_agent_reporter("steps")
|
||||
|
||||
e = AgentDF()
|
||||
df = e.agent_df()
|
||||
assert df["steps"][(0,0)] == 0
|
||||
e.step()
|
||||
df = e.agent_df()
|
||||
assert len(df) == 1
|
||||
assert df["steps"][(0,0)] == 1
|
||||
e.step()
|
||||
df = e.agent_df()
|
||||
assert len(df) == 1
|
||||
assert df["steps"][(0,0)] == 2
|
@@ -4,26 +4,6 @@ from soil import time, agents, environment
|
||||
|
||||
|
||||
class TestMain(TestCase):
|
||||
def test_cond(self):
|
||||
"""
|
||||
A condition should match a When if the concition is True
|
||||
"""
|
||||
|
||||
t = time.Cond(lambda t: True)
|
||||
f = time.Cond(lambda t: False)
|
||||
for i in range(10):
|
||||
w = time.When(i)
|
||||
assert w == t
|
||||
assert w is not f
|
||||
|
||||
def test_cond(self):
|
||||
"""
|
||||
Comparing a Cond to a Delta should always return False
|
||||
"""
|
||||
|
||||
c = time.Cond(lambda t: False)
|
||||
d = time.Delta(1)
|
||||
assert c is not d
|
||||
|
||||
def test_cond_env(self):
|
||||
""" """
|
||||
@@ -36,11 +16,12 @@ class TestMain(TestCase):
|
||||
|
||||
class CondAgent(agents.BaseAgent):
|
||||
def step(self):
|
||||
nonlocal done
|
||||
nonlocal done, times_started, times_asleep, times_awakened
|
||||
times_started.append(self.now)
|
||||
while True:
|
||||
times_asleep.append(self.now)
|
||||
yield time.Cond(lambda agent: agent.now >= 10, delta=2)
|
||||
while self.now < 10:
|
||||
yield self.delay(2)
|
||||
times_awakened.append(self.now)
|
||||
if self.now >= 10:
|
||||
break
|
||||
@@ -57,7 +38,6 @@ class TestMain(TestCase):
|
||||
assert times_started == [0]
|
||||
assert times_awakened == [10]
|
||||
assert done == [10]
|
||||
# The first time will produce the Cond.
|
||||
assert env.schedule.steps == 6
|
||||
assert len(times) == 6
|
||||
|
||||
@@ -65,11 +45,10 @@ class TestMain(TestCase):
|
||||
times.append(env.now)
|
||||
env.step()
|
||||
|
||||
assert times == [0, 2, 4, 6, 8, 10, 11]
|
||||
assert times == [0, 2, 4, 6, 8, 10, 11, 12]
|
||||
assert env.schedule.time == 13
|
||||
assert times_started == [0, 11]
|
||||
assert times_awakened == [10]
|
||||
assert done == [10]
|
||||
# Once more to yield the cond, another one to continue
|
||||
assert env.schedule.steps == 7
|
||||
assert len(times) == 7
|
||||
assert times_started == [0, 11, 12]
|
||||
assert times_awakened == [10, 11, 12]
|
||||
assert done == [10, 11, 12]
|
||||
assert env.schedule.steps == 8
|
||||
assert len(times) == 8
|
||||
|
Reference in New Issue
Block a user