mirror of
https://github.com/gsi-upm/soil
synced 2024-11-22 03:02:28 +00:00
WIP
This commit is contained in:
parent
1a8313e4f6
commit
6f7481769e
@ -36,13 +36,13 @@ def main():
|
|||||||
parser.add_argument('--module', '-m', type=str,
|
parser.add_argument('--module', '-m', type=str,
|
||||||
help='file containing the code of any custom agents.')
|
help='file containing the code of any custom agents.')
|
||||||
parser.add_argument('--dry-run', '--dry', action='store_true',
|
parser.add_argument('--dry-run', '--dry', action='store_true',
|
||||||
help='Do not store the results of the simulation.')
|
help='Do not store the results of the simulation to disk, show in terminal instead.')
|
||||||
parser.add_argument('--pdb', action='store_true',
|
parser.add_argument('--pdb', action='store_true',
|
||||||
help='Use a pdb console in case of exception.')
|
help='Use a pdb console in case of exception.')
|
||||||
parser.add_argument('--graph', '-g', action='store_true',
|
parser.add_argument('--graph', '-g', action='store_true',
|
||||||
help='Dump GEXF graph. Defaults to false.')
|
help='Dump each trial\'s network topology as a GEXF graph. Defaults to false.')
|
||||||
parser.add_argument('--csv', action='store_true',
|
parser.add_argument('--csv', action='store_true',
|
||||||
help='Dump history in CSV format. Defaults to false.')
|
help='Dump all data collected in CSV format. Defaults to false.')
|
||||||
parser.add_argument('--level', type=str,
|
parser.add_argument('--level', type=str,
|
||||||
help='Logging level')
|
help='Logging level')
|
||||||
parser.add_argument('--output', '-o', type=str, default="soil_output",
|
parser.add_argument('--output', '-o', type=str, default="soil_output",
|
||||||
|
@ -26,7 +26,14 @@ class DeadAgent(Exception):
|
|||||||
|
|
||||||
class BaseAgent(Agent):
|
class BaseAgent(Agent):
|
||||||
"""
|
"""
|
||||||
A special Agent that keeps track of its state history.
|
A special type of Mesa Agent that:
|
||||||
|
|
||||||
|
* Can be used as a dictionary to access its state.
|
||||||
|
* Has logging built-in
|
||||||
|
* Can be given default arguments through a defaults class attribute,
|
||||||
|
which will be used on construction to initialize each agent's state
|
||||||
|
|
||||||
|
Any attribute that is not preceded by an underscore (`_`) will also be added to its state.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
defaults = {}
|
defaults = {}
|
||||||
@ -61,6 +68,9 @@ class BaseAgent(Agent):
|
|||||||
for (k, v) in kwargs.items():
|
for (k, v) in kwargs.items():
|
||||||
setattr(self, k, v)
|
setattr(self, k, v)
|
||||||
|
|
||||||
|
for (k, v) in getattr(self, 'defaults', {}).items():
|
||||||
|
if not hasattr(self, k) or getattr(self, k) is None:
|
||||||
|
setattr(self, k, v)
|
||||||
|
|
||||||
# TODO: refactor to clean up mesa compatibility
|
# TODO: refactor to clean up mesa compatibility
|
||||||
@property
|
@property
|
||||||
@ -79,7 +89,6 @@ class BaseAgent(Agent):
|
|||||||
def state(self):
|
def state(self):
|
||||||
'''
|
'''
|
||||||
Return the agent itself, which behaves as a dictionary.
|
Return the agent itself, which behaves as a dictionary.
|
||||||
Changes made to `agent.state` will be reflected in the history.
|
|
||||||
|
|
||||||
This method shouldn't be used, but is kept here for backwards compatibility.
|
This method shouldn't be used, but is kept here for backwards compatibility.
|
||||||
'''
|
'''
|
||||||
|
251
soil/config.py
Normal file
251
soil/config.py
Normal file
@ -0,0 +1,251 @@
|
|||||||
|
import yaml
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import networkx as nx
|
||||||
|
import collections.abc
|
||||||
|
|
||||||
|
from . import serialization, utils, basestring, agents
|
||||||
|
|
||||||
|
class Config(collections.abc.Mapping):
|
||||||
|
"""
|
||||||
|
|
||||||
|
1) agent type can be specified by name or by class.
|
||||||
|
2) instead of just one type, a network agents distribution can be used.
|
||||||
|
The distribution specifies the weight (or probability) of each
|
||||||
|
agent type in the topology. This is an example distribution: ::
|
||||||
|
|
||||||
|
[
|
||||||
|
{'agent_type': 'agent_type_1',
|
||||||
|
'weight': 0.2,
|
||||||
|
'state': {
|
||||||
|
'id': 0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{'agent_type': 'agent_type_2',
|
||||||
|
'weight': 0.8,
|
||||||
|
'state': {
|
||||||
|
'id': 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
In this example, 20% of the nodes will be marked as type
|
||||||
|
'agent_type_1'.
|
||||||
|
3) if no initial state is given, each node's state will be set
|
||||||
|
to `{'id': 0}`.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
---------
|
||||||
|
name : str, optional
|
||||||
|
name of the Simulation
|
||||||
|
group : str, optional
|
||||||
|
a group name can be used to link simulations
|
||||||
|
topology (optional): networkx.Graph instance or Node-Link topology as a dict or string (will be loaded with `json_graph.node_link_graph(topology`).
|
||||||
|
network_params : dict
|
||||||
|
parameters used to create a topology with networkx, if no topology is given
|
||||||
|
network_agents : dict
|
||||||
|
definition of agents to populate the topology with
|
||||||
|
agent_type : NetworkAgent subclass, optional
|
||||||
|
Default type of NetworkAgent to use for nodes not specified in network_agents
|
||||||
|
states : list, optional
|
||||||
|
List of initial states corresponding to the nodes in the topology. Basic form is a list of integers
|
||||||
|
whose value indicates the state
|
||||||
|
dir_path: str, optional
|
||||||
|
Directory path to load simulation assets (files, modules...)
|
||||||
|
seed : str, optional
|
||||||
|
Seed to use for the random generator
|
||||||
|
num_trials : int, optional
|
||||||
|
Number of independent simulation runs
|
||||||
|
max_time : int, optional
|
||||||
|
Maximum step/time for each simulation
|
||||||
|
environment_params : dict, optional
|
||||||
|
Dictionary of globally-shared environmental parameters
|
||||||
|
environment_agents: dict, optional
|
||||||
|
Similar to network_agents. Distribution of Agents that control the environment
|
||||||
|
environment_class: soil.environment.Environment subclass, optional
|
||||||
|
Class for the environment. It defailts to soil.environment.Environment
|
||||||
|
"""
|
||||||
|
__slots__ = 'name', 'agent_type', 'group', 'network_agents', 'environment_agents', 'states', 'default_state', 'interval', 'network_params', 'seed', 'num_trials', 'max_time', 'topology', 'schedule', 'initial_time', 'environment_params', 'environment_class', 'dir_path', '_added_to_path'
|
||||||
|
|
||||||
|
def __init__(self, name=None,
|
||||||
|
group=None,
|
||||||
|
agent_type='BaseAgent',
|
||||||
|
network_agents=None,
|
||||||
|
environment_agents=None,
|
||||||
|
states=None,
|
||||||
|
default_state=None,
|
||||||
|
interval=1,
|
||||||
|
network_params=None,
|
||||||
|
seed=None,
|
||||||
|
num_trials=1,
|
||||||
|
max_time=None,
|
||||||
|
topology=None,
|
||||||
|
schedule=None,
|
||||||
|
initial_time=0,
|
||||||
|
environment_params={},
|
||||||
|
environment_class='soil.Environment',
|
||||||
|
dir_path=None):
|
||||||
|
|
||||||
|
self.network_params = network_params
|
||||||
|
self.name = name or 'Unnamed'
|
||||||
|
self.seed = str(seed or name)
|
||||||
|
self.group = group or ''
|
||||||
|
self.num_trials = num_trials
|
||||||
|
self.max_time = max_time
|
||||||
|
self.default_state = default_state or {}
|
||||||
|
self.dir_path = dir_path or os.getcwd()
|
||||||
|
self.interval = interval
|
||||||
|
|
||||||
|
self._added_to_path = list(x for x in [os.getcwd(), self.dir_path] if x not in sys.path)
|
||||||
|
sys.path += self._added_to_path
|
||||||
|
|
||||||
|
self.topology = topology
|
||||||
|
|
||||||
|
self.schedule = schedule
|
||||||
|
self.initial_time = initial_time
|
||||||
|
|
||||||
|
|
||||||
|
self.environment_class = environment_class
|
||||||
|
self.environment_params = dict(environment_params)
|
||||||
|
|
||||||
|
#TODO: Check agent distro vs fixed agents
|
||||||
|
self.environment_agents = environment_agents or []
|
||||||
|
|
||||||
|
self.agent_type = agent_type
|
||||||
|
|
||||||
|
self.network_agents = network_agents or {}
|
||||||
|
|
||||||
|
self.states = states or {}
|
||||||
|
|
||||||
|
|
||||||
|
def validate(self):
|
||||||
|
agents._validate_states(self.states,
|
||||||
|
self._topology)
|
||||||
|
|
||||||
|
def restore_path(self):
|
||||||
|
for added in self._added_to_path:
|
||||||
|
sys.path.remove(added)
|
||||||
|
|
||||||
|
def to_yaml(self):
|
||||||
|
return yaml.dump(self.to_dict())
|
||||||
|
|
||||||
|
def dump_yaml(self, f=None, outdir=None):
|
||||||
|
if not f and not outdir:
|
||||||
|
raise ValueError('specify a file or an output directory')
|
||||||
|
|
||||||
|
if not f:
|
||||||
|
f = os.path.join(outdir, '{}.dumped.yml'.format(self.name))
|
||||||
|
|
||||||
|
with utils.open_or_reuse(f, 'w') as f:
|
||||||
|
f.write(self.to_yaml())
|
||||||
|
|
||||||
|
def to_yaml(self):
|
||||||
|
return yaml.dump(self.to_dict())
|
||||||
|
|
||||||
|
# TODO: See note on getstate
|
||||||
|
def to_dict(self):
|
||||||
|
return self.__getstate__()
|
||||||
|
|
||||||
|
def dump_yaml(self, f=None, outdir=None):
|
||||||
|
if not f and not outdir:
|
||||||
|
raise ValueError('specify a file or an output directory')
|
||||||
|
|
||||||
|
if not f:
|
||||||
|
f = os.path.join(outdir, '{}.dumped.yml'.format(self.name))
|
||||||
|
|
||||||
|
with utils.open_or_reuse(f, 'w') as f:
|
||||||
|
f.write(self.to_yaml())
|
||||||
|
|
||||||
|
def __getitem__(self, key):
|
||||||
|
return getattr(self, key)
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return (k for k in self.__slots__ if k[0] != '_')
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(self.__slots__)
|
||||||
|
|
||||||
|
def dump_pickle(self, f=None, outdir=None):
|
||||||
|
if not outdir and not f:
|
||||||
|
raise ValueError('specify a file or an output directory')
|
||||||
|
|
||||||
|
if not f:
|
||||||
|
f = os.path.join(outdir,
|
||||||
|
'{}.simulation.pickle'.format(self.name))
|
||||||
|
with utils.open_or_reuse(f, 'wb') as f:
|
||||||
|
pickle.dump(self, f)
|
||||||
|
|
||||||
|
# TODO: remove this. A config should be sendable regardless. Non-pickable objects could be computed via properties and the like
|
||||||
|
# def __getstate__(self):
|
||||||
|
# state={}
|
||||||
|
# for k, v in self.__dict__.items():
|
||||||
|
# if k[0] != '_':
|
||||||
|
# state[k] = v
|
||||||
|
# state['topology'] = json_graph.node_link_data(self.topology)
|
||||||
|
# state['network_agents'] = agents.serialize_definition(self.network_agents,
|
||||||
|
# known_modules = [])
|
||||||
|
# state['environment_agents'] = agents.serialize_definition(self.environment_agents,
|
||||||
|
# known_modules = [])
|
||||||
|
# state['environment_class'] = serialization.serialize(self.environment_class,
|
||||||
|
# known_modules=['soil.environment'])[1] # func, name
|
||||||
|
# if state['load_module'] is None:
|
||||||
|
# del state['load_module']
|
||||||
|
# return state
|
||||||
|
|
||||||
|
# # TODO: remove, same as __getstate__
|
||||||
|
# def __setstate__(self, state):
|
||||||
|
# self.__dict__ = state
|
||||||
|
# self.load_module = getattr(self, 'load_module', None)
|
||||||
|
# if self.dir_path not in sys.path:
|
||||||
|
# sys.path += [self.dir_path, os.getcwd()]
|
||||||
|
# self.topology = json_graph.node_link_graph(state['topology'])
|
||||||
|
# self.network_agents = agents.calculate_distribution(agents._convert_agent_types(self.network_agents))
|
||||||
|
# self.environment_agents = agents._convert_agent_types(self.environment_agents,
|
||||||
|
# known_modules=[self.load_module])
|
||||||
|
# self.environment_class = serialization.deserialize(self.environment_class,
|
||||||
|
# known_modules=[self.load_module,
|
||||||
|
# 'soil.environment', ]) # func, name
|
||||||
|
|
||||||
|
class CalculatedConfig(Config):
|
||||||
|
def __init__(self, config):
|
||||||
|
"""
|
||||||
|
Returns a configuration object that replaces some "plain" attributes (e.g., `environment_class` string) into
|
||||||
|
a Python object (`soil.environment.Environment` class).
|
||||||
|
"""
|
||||||
|
self._config = config
|
||||||
|
values = dict(config)
|
||||||
|
values['environment_class'] = self._environment_class()
|
||||||
|
values['environment_agents'] = self._environment_agents()
|
||||||
|
values['topology'] = self._topology()
|
||||||
|
values['network_agents'] = self._network_agents()
|
||||||
|
values['agent_type'] = serialization.deserialize(self.agent_type, known_modules=['soil.agents'])
|
||||||
|
|
||||||
|
return values
|
||||||
|
|
||||||
|
def _topology(self):
|
||||||
|
topology = self._config.topology
|
||||||
|
if topology is None:
|
||||||
|
topology = serialization.load_network(self._config.network_params,
|
||||||
|
dir_path=self._config.dir_path)
|
||||||
|
|
||||||
|
elif isinstance(topology, basestring) or isinstance(topology, dict):
|
||||||
|
topology = json_graph.node_link_graph(topology)
|
||||||
|
|
||||||
|
return nx.Graph(topology)
|
||||||
|
|
||||||
|
def _environment_class(self):
|
||||||
|
return serialization.deserialize(self._config.environment_class,
|
||||||
|
known_modules=['soil.environment', ]) or Environment
|
||||||
|
|
||||||
|
def _environment_agents(self):
|
||||||
|
return agents._convert_agent_types(self._config.environment_agents)
|
||||||
|
|
||||||
|
def _network_agents(self):
|
||||||
|
distro = agents.calculate_distribution(self._config.network_agents,
|
||||||
|
self._config.agent_type)
|
||||||
|
return agents._convert_agent_types(distro)
|
||||||
|
|
||||||
|
def _environment_class(self):
|
||||||
|
return serialization.deserialize(self._config.environment_class,
|
||||||
|
known_modules=['soil.environment', ]) # func, name
|
||||||
|
|
@ -8,19 +8,17 @@ class SoilDataCollector(MDC):
|
|||||||
# Populate model and env reporters so they have a key per
|
# Populate model and env reporters so they have a key per
|
||||||
# So they can be shown in the web interface
|
# So they can be shown in the web interface
|
||||||
self.environment = environment
|
self.environment = environment
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def model_vars(self):
|
def model_vars(self):
|
||||||
pass
|
raise NotImplementedError()
|
||||||
|
|
||||||
@model_vars.setter
|
@model_vars.setter
|
||||||
def model_vars(self, value):
|
def model_vars(self, value):
|
||||||
pass
|
raise NotImplementedError()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def agent_reporters(self):
|
def agent_reporters(self):
|
||||||
self.model._history._
|
raise NotImplementedError()
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
@ -1,23 +1,20 @@
|
|||||||
|
from __future__ import annotations
|
||||||
import os
|
import os
|
||||||
import sqlite3
|
import sqlite3
|
||||||
import csv
|
|
||||||
import math
|
import math
|
||||||
import random
|
import random
|
||||||
import yaml
|
|
||||||
import tempfile
|
|
||||||
import logging
|
|
||||||
import pandas as pd
|
|
||||||
from time import time as current_time
|
from time import time as current_time
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
from networkx.readwrite import json_graph
|
from networkx.readwrite import json_graph
|
||||||
|
|
||||||
import networkx as nx
|
|
||||||
|
|
||||||
from tsih import History, NoHistory, Record, Key
|
import networkx as nx
|
||||||
|
|
||||||
from mesa import Model
|
from mesa import Model
|
||||||
|
|
||||||
from . import serialization, agents, analysis, utils, time
|
from tsih import Record
|
||||||
|
|
||||||
|
from . import serialization, agents, analysis, utils, time, config
|
||||||
|
|
||||||
# These properties will be copied when pickling/unpickling the environment
|
# These properties will be copied when pickling/unpickling the environment
|
||||||
_CONFIG_PROPS = [ 'name',
|
_CONFIG_PROPS = [ 'name',
|
||||||
@ -49,7 +46,6 @@ class Environment(Model):
|
|||||||
schedule=None,
|
schedule=None,
|
||||||
initial_time=0,
|
initial_time=0,
|
||||||
environment_params=None,
|
environment_params=None,
|
||||||
history=False,
|
|
||||||
dir_path=None,
|
dir_path=None,
|
||||||
**kwargs):
|
**kwargs):
|
||||||
|
|
||||||
@ -76,20 +72,11 @@ class Environment(Model):
|
|||||||
topology = nx.Graph()
|
topology = nx.Graph()
|
||||||
self.G = nx.Graph(topology)
|
self.G = nx.Graph(topology)
|
||||||
|
|
||||||
|
|
||||||
self.environment_params = environment_params or {}
|
self.environment_params = environment_params or {}
|
||||||
self.environment_params.update(kwargs)
|
self.environment_params.update(kwargs)
|
||||||
|
|
||||||
self._env_agents = {}
|
self._env_agents = {}
|
||||||
self.interval = interval
|
self.interval = interval
|
||||||
|
|
||||||
if history:
|
|
||||||
history = History
|
|
||||||
else:
|
|
||||||
history = NoHistory
|
|
||||||
|
|
||||||
self._history = history(name=self.name,
|
|
||||||
backup=True)
|
|
||||||
self['SEED'] = seed
|
self['SEED'] = seed
|
||||||
|
|
||||||
if network_agents:
|
if network_agents:
|
||||||
@ -106,6 +93,19 @@ class Environment(Model):
|
|||||||
|
|
||||||
self.logger = utils.logger.getChild(self.name)
|
self.logger = utils.logger.getChild(self.name)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_config(conf: config.Config, trial_id, **kwargs) -> Environment:
|
||||||
|
'''Create an environment for a trial of the simulation'''
|
||||||
|
|
||||||
|
conf = config.Config(conf, **kwargs)
|
||||||
|
conf.seed = '{}_{}'.format(conf.seed, trial_id)
|
||||||
|
conf.name = '{}_trial_{}'.format(conf.name, trial_id).replace('.', '-')
|
||||||
|
opts = conf.environment_params.copy()
|
||||||
|
opts.update(conf)
|
||||||
|
opts.update(kwargs)
|
||||||
|
env = serialization.deserialize(conf.environment_class)(**opts)
|
||||||
|
return env
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def now(self):
|
def now(self):
|
||||||
if self.schedule:
|
if self.schedule:
|
||||||
@ -212,11 +212,14 @@ class Environment(Model):
|
|||||||
return self.logger.log(level, message, extra=extra)
|
return self.logger.log(level, message, extra=extra)
|
||||||
|
|
||||||
def step(self):
|
def step(self):
|
||||||
|
'''
|
||||||
|
Advance one step in the simulation, and update the data collection and scheduler appropriately
|
||||||
|
'''
|
||||||
super().step()
|
super().step()
|
||||||
self.schedule.step()
|
self.schedule.step()
|
||||||
|
|
||||||
def run(self, until, *args, **kwargs):
|
def run(self, until, *args, **kwargs):
|
||||||
self._save_state()
|
until = until or float('inf')
|
||||||
|
|
||||||
while self.schedule.next_time < until:
|
while self.schedule.next_time < until:
|
||||||
self.step()
|
self.step()
|
||||||
@ -252,14 +255,16 @@ class Environment(Model):
|
|||||||
|
|
||||||
def get(self, key, default=None):
|
def get(self, key, default=None):
|
||||||
'''
|
'''
|
||||||
Get the value of an environment attribute in a
|
Get the value of an environment attribute.
|
||||||
given point in the simulation (history).
|
Return `default` if the value is not set.
|
||||||
If key is an attribute name, this method returns
|
|
||||||
the current value.
|
|
||||||
To get values at other times, use a
|
|
||||||
:meth: `soil.history.Key` tuple.
|
|
||||||
'''
|
'''
|
||||||
return self[key] if key in self else default
|
return self.environment_params.get(key, default)
|
||||||
|
|
||||||
|
def __getitem__(self, key):
|
||||||
|
return self.environment_params.get(key)
|
||||||
|
|
||||||
|
def __setitem__(self, key, value):
|
||||||
|
return self.environment_params.__setitem__(key, value)
|
||||||
|
|
||||||
def get_agent(self, agent_id):
|
def get_agent(self, agent_id):
|
||||||
return self.G.nodes[agent_id]['agent']
|
return self.G.nodes[agent_id]['agent']
|
||||||
@ -269,112 +274,31 @@ class Environment(Model):
|
|||||||
return self.agents
|
return self.agents
|
||||||
return (self.G.nodes[i]['agent'] for i in nodes)
|
return (self.G.nodes[i]['agent'] for i in nodes)
|
||||||
|
|
||||||
def dump_csv(self, f):
|
def _agent_to_tuples(self, agent, now=None):
|
||||||
with utils.open_or_reuse(f, 'w') as f:
|
|
||||||
cr = csv.writer(f)
|
|
||||||
cr.writerow(('agent_id', 't_step', 'key', 'value'))
|
|
||||||
for i in self.history_to_tuples():
|
|
||||||
cr.writerow(i)
|
|
||||||
|
|
||||||
def dump_gexf(self, f):
|
|
||||||
G = self.history_to_graph()
|
|
||||||
# Workaround for geometric models
|
|
||||||
# See soil/soil#4
|
|
||||||
for node in G.nodes():
|
|
||||||
if 'pos' in G.nodes[node]:
|
|
||||||
G.nodes[node]['viz'] = {"position": {"x": G.nodes[node]['pos'][0], "y": G.nodes[node]['pos'][1], "z": 0.0}}
|
|
||||||
del (G.nodes[node]['pos'])
|
|
||||||
|
|
||||||
nx.write_gexf(G, f, version="1.2draft")
|
|
||||||
|
|
||||||
def dump(self, *args, formats=None, **kwargs):
|
|
||||||
if not formats:
|
|
||||||
return
|
|
||||||
functions = {
|
|
||||||
'csv': self.dump_csv,
|
|
||||||
'gexf': self.dump_gexf
|
|
||||||
}
|
|
||||||
for f in formats:
|
|
||||||
if f in functions:
|
|
||||||
functions[f](*args, **kwargs)
|
|
||||||
else:
|
|
||||||
raise ValueError('Unknown format: {}'.format(f))
|
|
||||||
|
|
||||||
def df(self):
|
|
||||||
return self._history[None, None, None].df()
|
|
||||||
|
|
||||||
def dump_sqlite(self, f):
|
|
||||||
return self._history.dump(f)
|
|
||||||
|
|
||||||
def state_to_tuples(self, now=None):
|
|
||||||
if now is None:
|
if now is None:
|
||||||
now = self.now
|
now = self.now
|
||||||
for k, v in self.environment_params.items():
|
|
||||||
yield Record(dict_id='env',
|
|
||||||
t_step=now,
|
|
||||||
key=k,
|
|
||||||
value=v)
|
|
||||||
for agent in self.agents:
|
|
||||||
for k, v in agent.state.items():
|
for k, v in agent.state.items():
|
||||||
yield Record(dict_id=agent.id,
|
yield Record(dict_id=agent.id,
|
||||||
t_step=now,
|
t_step=now,
|
||||||
key=k,
|
key=k,
|
||||||
value=v)
|
value=v)
|
||||||
|
|
||||||
def history_to_tuples(self, agent_id=None):
|
def state_to_tuples(self, agent_id=None, now=None):
|
||||||
if isinstance(self._history, NoHistory):
|
if now is None:
|
||||||
tuples = self.state_to_tuples()
|
now = self.now
|
||||||
else:
|
|
||||||
tuples = self._history.to_tuples()
|
|
||||||
if agent_id is None:
|
|
||||||
return tuples
|
|
||||||
return filter(lambda x: str(x[0]) == str(agent_id), tuples)
|
|
||||||
|
|
||||||
def history_to_graph(self):
|
if agent_id:
|
||||||
G = nx.Graph(self.G)
|
agent = self.get_agent(agent_id)
|
||||||
|
yield from self._agent_to_tuples(agent, now)
|
||||||
|
return
|
||||||
|
|
||||||
for agent in self.network_agents:
|
for k, v in self.environment_params.items():
|
||||||
|
yield Record(dict_id='env',
|
||||||
attributes = {'agent': str(agent.__class__)}
|
t_step=now,
|
||||||
lastattributes = {}
|
key=k,
|
||||||
spells = []
|
value=v)
|
||||||
lastvisible = False
|
for agent in self.agents:
|
||||||
laststep = None
|
yield from self._agent_to_tuples(agent, now)
|
||||||
history = sorted(list(self.history_to_tuples(agent_id=agent.id)))
|
|
||||||
if not history:
|
|
||||||
continue
|
|
||||||
for _, t_step, attribute, value in history:
|
|
||||||
if attribute == 'visible':
|
|
||||||
nowvisible = value
|
|
||||||
if nowvisible and not lastvisible:
|
|
||||||
laststep = t_step
|
|
||||||
if not nowvisible and lastvisible:
|
|
||||||
spells.append((laststep, t_step))
|
|
||||||
|
|
||||||
lastvisible = nowvisible
|
|
||||||
continue
|
|
||||||
key = 'attr_' + attribute
|
|
||||||
if key not in attributes:
|
|
||||||
attributes[key] = list()
|
|
||||||
if key not in lastattributes:
|
|
||||||
lastattributes[key] = (value, t_step)
|
|
||||||
elif lastattributes[key][0] != value:
|
|
||||||
last_value, laststep = lastattributes[key]
|
|
||||||
commit_value = (last_value, laststep, t_step)
|
|
||||||
if key not in attributes:
|
|
||||||
attributes[key] = list()
|
|
||||||
attributes[key].append(commit_value)
|
|
||||||
lastattributes[key] = (value, t_step)
|
|
||||||
for k, v in lastattributes.items():
|
|
||||||
attributes[k].append((v[0], v[1], None))
|
|
||||||
if lastvisible:
|
|
||||||
spells.append((laststep, None))
|
|
||||||
if spells:
|
|
||||||
G.add_node(agent.id, spells=spells, **attributes)
|
|
||||||
else:
|
|
||||||
G.add_node(agent.id, **attributes)
|
|
||||||
|
|
||||||
return G
|
|
||||||
|
|
||||||
def __getstate__(self):
|
def __getstate__(self):
|
||||||
state = {}
|
state = {}
|
||||||
@ -382,7 +306,6 @@ class Environment(Model):
|
|||||||
state[prop] = self.__dict__[prop]
|
state[prop] = self.__dict__[prop]
|
||||||
state['G'] = json_graph.node_link_data(self.G)
|
state['G'] = json_graph.node_link_data(self.G)
|
||||||
state['environment_agents'] = self._env_agents
|
state['environment_agents'] = self._env_agents
|
||||||
state['history'] = self._history
|
|
||||||
state['schedule'] = self.schedule
|
state['schedule'] = self.schedule
|
||||||
return state
|
return state
|
||||||
|
|
||||||
@ -391,7 +314,6 @@ class Environment(Model):
|
|||||||
self.__dict__[prop] = state[prop]
|
self.__dict__[prop] = state[prop]
|
||||||
self._env_agents = state['environment_agents']
|
self._env_agents = state['environment_agents']
|
||||||
self.G = json_graph.node_link_graph(state['G'])
|
self.G = json_graph.node_link_graph(state['G'])
|
||||||
self._history = state['history']
|
|
||||||
# self._env = None
|
# self._env = None
|
||||||
self.schedule = state['schedule']
|
self.schedule = state['schedule']
|
||||||
self._queue = []
|
self._queue = []
|
||||||
|
@ -48,20 +48,24 @@ class Exporter:
|
|||||||
self.simulation = simulation
|
self.simulation = simulation
|
||||||
outdir = outdir or os.path.join(os.getcwd(), 'soil_output')
|
outdir = outdir or os.path.join(os.getcwd(), 'soil_output')
|
||||||
self.outdir = os.path.join(outdir,
|
self.outdir = os.path.join(outdir,
|
||||||
simulation.group or '',
|
simulation.config.group or '',
|
||||||
simulation.name)
|
simulation.config.name)
|
||||||
self.dry_run = dry_run
|
self.dry_run = dry_run
|
||||||
self.copy_to = copy_to
|
self.copy_to = copy_to
|
||||||
|
|
||||||
def start(self):
|
def sim_start(self):
|
||||||
'''Method to call when the simulation starts'''
|
'''Method to call when the simulation starts'''
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def end(self, stats):
|
def sim_end(self, stats):
|
||||||
'''Method to call when the simulation ends'''
|
'''Method to call when the simulation ends'''
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def trial(self, env, stats):
|
def trial_start(self, env):
|
||||||
|
'''Method to call when a trial start'''
|
||||||
|
pass
|
||||||
|
|
||||||
|
def trial_end(self, env, stats):
|
||||||
'''Method to call when a trial ends'''
|
'''Method to call when a trial ends'''
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -80,21 +84,21 @@ class Exporter:
|
|||||||
class default(Exporter):
|
class default(Exporter):
|
||||||
'''Default exporter. Writes sqlite results, as well as the simulation YAML'''
|
'''Default exporter. Writes sqlite results, as well as the simulation YAML'''
|
||||||
|
|
||||||
def start(self):
|
def sim_start(self):
|
||||||
if not self.dry_run:
|
if not self.dry_run:
|
||||||
logger.info('Dumping results to %s', self.outdir)
|
logger.info('Dumping results to %s', self.outdir)
|
||||||
self.simulation.dump_yaml(outdir=self.outdir)
|
self.simulation.dump_yaml(outdir=self.outdir)
|
||||||
else:
|
else:
|
||||||
logger.info('NOT dumping results')
|
logger.info('NOT dumping results')
|
||||||
|
|
||||||
def trial(self, env, stats):
|
def trial_start(self, env, stats):
|
||||||
if not self.dry_run:
|
if not self.dry_run:
|
||||||
with timer('Dumping simulation {} trial {}'.format(self.simulation.name,
|
with timer('Dumping simulation {} trial {}'.format(self.simulation.name,
|
||||||
env.name)):
|
env.name)):
|
||||||
with self.output('{}.sqlite'.format(env.name), mode='wb') as f:
|
with self.output('{}.sqlite'.format(env.name), mode='wb') as f:
|
||||||
env.dump_sqlite(f)
|
env.dump_sqlite(f)
|
||||||
|
|
||||||
def end(self, stats):
|
def sim_end(self, stats):
|
||||||
with timer('Dumping simulation {}\'s stats'.format(self.simulation.name)):
|
with timer('Dumping simulation {}\'s stats'.format(self.simulation.name)):
|
||||||
with self.output('{}.sqlite'.format(self.simulation.name), mode='wb') as f:
|
with self.output('{}.sqlite'.format(self.simulation.name), mode='wb') as f:
|
||||||
self.simulation.dump_sqlite(f)
|
self.simulation.dump_sqlite(f)
|
||||||
@ -102,15 +106,14 @@ class default(Exporter):
|
|||||||
|
|
||||||
|
|
||||||
class csv(Exporter):
|
class csv(Exporter):
|
||||||
|
|
||||||
'''Export the state of each environment (and its agents) in a separate CSV file'''
|
'''Export the state of each environment (and its agents) in a separate CSV file'''
|
||||||
def trial(self, env, stats):
|
def trial_end(self, env, stats):
|
||||||
with timer('[CSV] Dumping simulation {} trial {} @ dir {}'.format(self.simulation.name,
|
with timer('[CSV] Dumping simulation {} trial {} @ dir {}'.format(self.simulation.name,
|
||||||
env.name,
|
env.name,
|
||||||
self.outdir)):
|
self.outdir)):
|
||||||
with self.output('{}.csv'.format(env.name)) as f:
|
|
||||||
env.dump_csv(f)
|
|
||||||
|
|
||||||
with self.output('{}.stats.csv'.format(env.name)) as f:
|
with self.output('{}.stats.{}.csv'.format(env.name, stats.name)) as f:
|
||||||
statwriter = csvlib.writer(f, delimiter='\t', quotechar='"', quoting=csvlib.QUOTE_ALL)
|
statwriter = csvlib.writer(f, delimiter='\t', quotechar='"', quoting=csvlib.QUOTE_ALL)
|
||||||
|
|
||||||
for stat in stats:
|
for stat in stats:
|
||||||
@ -118,7 +121,7 @@ class csv(Exporter):
|
|||||||
|
|
||||||
|
|
||||||
class gexf(Exporter):
|
class gexf(Exporter):
|
||||||
def trial(self, env, stats):
|
def trial_end(self, env, stats):
|
||||||
if self.dry_run:
|
if self.dry_run:
|
||||||
logger.info('Not dumping GEXF in dry_run mode')
|
logger.info('Not dumping GEXF in dry_run mode')
|
||||||
return
|
return
|
||||||
@ -126,22 +129,32 @@ class gexf(Exporter):
|
|||||||
with timer('[GEXF] Dumping simulation {} trial {}'.format(self.simulation.name,
|
with timer('[GEXF] Dumping simulation {} trial {}'.format(self.simulation.name,
|
||||||
env.name)):
|
env.name)):
|
||||||
with self.output('{}.gexf'.format(env.name), mode='wb') as f:
|
with self.output('{}.gexf'.format(env.name), mode='wb') as f:
|
||||||
env.dump_gexf(f)
|
self.dump_gexf(env, f)
|
||||||
|
|
||||||
|
def dump_gexf(self, env, f):
|
||||||
|
G = env.history_to_graph()
|
||||||
|
# Workaround for geometric models
|
||||||
|
# See soil/soil#4
|
||||||
|
for node in G.nodes():
|
||||||
|
if 'pos' in G.nodes[node]:
|
||||||
|
G.nodes[node]['viz'] = {"position": {"x": G.nodes[node]['pos'][0], "y": G.nodes[node]['pos'][1], "z": 0.0}}
|
||||||
|
del (G.nodes[node]['pos'])
|
||||||
|
|
||||||
|
nx.write_gexf(G, f, version="1.2draft")
|
||||||
|
|
||||||
class dummy(Exporter):
|
class dummy(Exporter):
|
||||||
|
|
||||||
def start(self):
|
def sim_start(self):
|
||||||
with self.output('dummy', 'w') as f:
|
with self.output('dummy', 'w') as f:
|
||||||
f.write('simulation started @ {}\n'.format(current_time()))
|
f.write('simulation started @ {}\n'.format(current_time()))
|
||||||
|
|
||||||
def trial(self, env, stats):
|
def trial_end(self, env, stats):
|
||||||
with self.output('dummy', 'w') as f:
|
with self.output('dummy', 'w') as f:
|
||||||
for i in env.history_to_tuples():
|
for i in stats:
|
||||||
f.write(','.join(map(str, i)))
|
f.write(','.join(map(str, i)))
|
||||||
f.write('\n')
|
f.write('\n')
|
||||||
|
|
||||||
def sim(self, stats):
|
def sim_end(self, stats):
|
||||||
with self.output('dummy', 'a') as f:
|
with self.output('dummy', 'a') as f:
|
||||||
f.write('simulation ended @ {}\n'.format(current_time()))
|
f.write('simulation ended @ {}\n'.format(current_time()))
|
||||||
|
|
||||||
@ -149,10 +162,57 @@ class dummy(Exporter):
|
|||||||
|
|
||||||
class graphdrawing(Exporter):
|
class graphdrawing(Exporter):
|
||||||
|
|
||||||
def trial(self, env, stats):
|
def trial_end(self, env, stats):
|
||||||
# Outside effects
|
# Outside effects
|
||||||
f = plt.figure()
|
f = plt.figure()
|
||||||
nx.draw(env.G, node_size=10, width=0.2, pos=nx.spring_layout(env.G, scale=100), ax=f.add_subplot(111))
|
nx.draw(env.G, node_size=10, width=0.2, pos=nx.spring_layout(env.G, scale=100), ax=f.add_subplot(111))
|
||||||
with open('graph-{}.png'.format(env.name)) as f:
|
with open('graph-{}.png'.format(env.name)) as f:
|
||||||
f.savefig(f)
|
f.savefig(f)
|
||||||
|
|
||||||
|
'''
|
||||||
|
Convert an environment into a NetworkX graph
|
||||||
|
'''
|
||||||
|
def env_to_graph(env, history=None):
|
||||||
|
G = nx.Graph(env.G)
|
||||||
|
|
||||||
|
for agent in env.network_agents:
|
||||||
|
|
||||||
|
attributes = {'agent': str(agent.__class__)}
|
||||||
|
lastattributes = {}
|
||||||
|
spells = []
|
||||||
|
lastvisible = False
|
||||||
|
laststep = None
|
||||||
|
if not history:
|
||||||
|
history = sorted(list(env.state_to_tuples()))
|
||||||
|
for _, t_step, attribute, value in history:
|
||||||
|
if attribute == 'visible':
|
||||||
|
nowvisible = value
|
||||||
|
if nowvisible and not lastvisible:
|
||||||
|
laststep = t_step
|
||||||
|
if not nowvisible and lastvisible:
|
||||||
|
spells.append((laststep, t_step))
|
||||||
|
|
||||||
|
lastvisible = nowvisible
|
||||||
|
continue
|
||||||
|
key = 'attr_' + attribute
|
||||||
|
if key not in attributes:
|
||||||
|
attributes[key] = list()
|
||||||
|
if key not in lastattributes:
|
||||||
|
lastattributes[key] = (value, t_step)
|
||||||
|
elif lastattributes[key][0] != value:
|
||||||
|
last_value, laststep = lastattributes[key]
|
||||||
|
commit_value = (last_value, laststep, t_step)
|
||||||
|
if key not in attributes:
|
||||||
|
attributes[key] = list()
|
||||||
|
attributes[key].append(commit_value)
|
||||||
|
lastattributes[key] = (value, t_step)
|
||||||
|
for k, v in lastattributes.items():
|
||||||
|
attributes[k].append((v[0], v[1], None))
|
||||||
|
if lastvisible:
|
||||||
|
spells.append((laststep, None))
|
||||||
|
if spells:
|
||||||
|
G.add_node(agent.id, spells=spells, **attributes)
|
||||||
|
else:
|
||||||
|
G.add_node(agent.id, **attributes)
|
||||||
|
|
||||||
|
return G
|
||||||
|
@ -2,6 +2,7 @@ import os
|
|||||||
import logging
|
import logging
|
||||||
import ast
|
import ast
|
||||||
import sys
|
import sys
|
||||||
|
import re
|
||||||
import importlib
|
import importlib
|
||||||
from glob import glob
|
from glob import glob
|
||||||
from itertools import product, chain
|
from itertools import product, chain
|
||||||
@ -18,6 +19,9 @@ logger = logging.getLogger('soil')
|
|||||||
def load_network(network_params, dir_path=None):
|
def load_network(network_params, dir_path=None):
|
||||||
G = nx.Graph()
|
G = nx.Graph()
|
||||||
|
|
||||||
|
if not network_params:
|
||||||
|
return G
|
||||||
|
|
||||||
if 'path' in network_params:
|
if 'path' in network_params:
|
||||||
path = network_params['path']
|
path = network_params['path']
|
||||||
if dir_path and not os.path.isabs(path):
|
if dir_path and not os.path.isabs(path):
|
||||||
@ -169,6 +173,9 @@ def serialize(v, known_modules=[]):
|
|||||||
func = serializer(tname)
|
func = serializer(tname)
|
||||||
return func(v), tname
|
return func(v), tname
|
||||||
|
|
||||||
|
|
||||||
|
IS_CLASS = re.compile(r"<class '(.*)'>")
|
||||||
|
|
||||||
def deserializer(type_, known_modules=[]):
|
def deserializer(type_, known_modules=[]):
|
||||||
if type(type_) != str: # Already deserialized
|
if type(type_) != str: # Already deserialized
|
||||||
return type_
|
return type_
|
||||||
@ -179,6 +186,13 @@ def deserializer(type_, known_modules=[]):
|
|||||||
if hasattr(builtins, type_): # Check if it's a builtin type
|
if hasattr(builtins, type_): # Check if it's a builtin type
|
||||||
cls = getattr(builtins, type_)
|
cls = getattr(builtins, type_)
|
||||||
return lambda x=None: ast.literal_eval(x) if x is not None else cls()
|
return lambda x=None: ast.literal_eval(x) if x is not None else cls()
|
||||||
|
match = IS_CLASS.match(type_)
|
||||||
|
if match:
|
||||||
|
modname, tname = match.group(1).rsplit(".", 1)
|
||||||
|
module = importlib.import_module(modname)
|
||||||
|
cls = getattr(module, tname)
|
||||||
|
return getattr(cls, 'deserialize', cls)
|
||||||
|
|
||||||
# Otherwise, see if we can find the module and the class
|
# Otherwise, see if we can find the module and the class
|
||||||
modules = known_modules or []
|
modules = known_modules or []
|
||||||
options = []
|
options = []
|
||||||
@ -189,7 +203,7 @@ def deserializer(type_, known_modules=[]):
|
|||||||
|
|
||||||
if '.' in type_: # Fully qualified module
|
if '.' in type_: # Fully qualified module
|
||||||
module, type_ = type_.rsplit(".", 1)
|
module, type_ = type_.rsplit(".", 1)
|
||||||
options.append ((module, type_))
|
options.append((module, type_))
|
||||||
|
|
||||||
errors = []
|
errors = []
|
||||||
for modname, tname in options:
|
for modname, tname in options:
|
||||||
@ -213,10 +227,10 @@ def deserialize(type_, value=None, **kwargs):
|
|||||||
|
|
||||||
|
|
||||||
def deserialize_all(names, *args, known_modules=['soil'], **kwargs):
|
def deserialize_all(names, *args, known_modules=['soil'], **kwargs):
|
||||||
'''Return the set of exporters for a simulation, given the exporter names'''
|
'''Return the list of deserialized objects'''
|
||||||
exporters = []
|
objects = []
|
||||||
for name in names:
|
for name in names:
|
||||||
mod = deserialize(name, known_modules=known_modules)
|
mod = deserialize(name, known_modules=known_modules)
|
||||||
exporters.append(mod(*args, **kwargs))
|
objects.append(mod(*args, **kwargs))
|
||||||
return exporters
|
return objects
|
||||||
|
|
||||||
|
@ -10,8 +10,6 @@ import networkx as nx
|
|||||||
from networkx.readwrite import json_graph
|
from networkx.readwrite import json_graph
|
||||||
from multiprocessing import Pool
|
from multiprocessing import Pool
|
||||||
from functools import partial
|
from functools import partial
|
||||||
from tsih import History
|
|
||||||
|
|
||||||
import pickle
|
import pickle
|
||||||
|
|
||||||
from . import serialization, utils, basestring, agents
|
from . import serialization, utils, basestring, agents
|
||||||
@ -20,127 +18,34 @@ from .utils import logger
|
|||||||
from .exporters import default
|
from .exporters import default
|
||||||
from .stats import defaultStats
|
from .stats import defaultStats
|
||||||
|
|
||||||
|
from .config import Config
|
||||||
|
|
||||||
|
|
||||||
#TODO: change documentation for simulation
|
#TODO: change documentation for simulation
|
||||||
|
|
||||||
class Simulation:
|
class Simulation:
|
||||||
"""
|
"""
|
||||||
Similar to nsim.NetworkSimulation with three main differences:
|
|
||||||
1) agent type can be specified by name or by class.
|
|
||||||
2) instead of just one type, a network agents distribution can be used.
|
|
||||||
The distribution specifies the weight (or probability) of each
|
|
||||||
agent type in the topology. This is an example distribution: ::
|
|
||||||
|
|
||||||
[
|
|
||||||
{'agent_type': 'agent_type_1',
|
|
||||||
'weight': 0.2,
|
|
||||||
'state': {
|
|
||||||
'id': 0
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{'agent_type': 'agent_type_2',
|
|
||||||
'weight': 0.8,
|
|
||||||
'state': {
|
|
||||||
'id': 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
|
|
||||||
In this example, 20% of the nodes will be marked as type
|
|
||||||
'agent_type_1'.
|
|
||||||
3) if no initial state is given, each node's state will be set
|
|
||||||
to `{'id': 0}`.
|
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
---------
|
---------
|
||||||
name : str, optional
|
config (optional): :class:`config.Config`
|
||||||
name of the Simulation
|
name of the Simulation
|
||||||
group : str, optional
|
|
||||||
a group name can be used to link simulations
|
kwargs: parameters to use to initialize a new configuration, if one has not been provided.
|
||||||
topology : networkx.Graph instance, optional
|
|
||||||
network_params : dict
|
|
||||||
parameters used to create a topology with networkx, if no topology is given
|
|
||||||
network_agents : dict
|
|
||||||
definition of agents to populate the topology with
|
|
||||||
agent_type : NetworkAgent subclass, optional
|
|
||||||
Default type of NetworkAgent to use for nodes not specified in network_agents
|
|
||||||
states : list, optional
|
|
||||||
List of initial states corresponding to the nodes in the topology. Basic form is a list of integers
|
|
||||||
whose value indicates the state
|
|
||||||
dir_path: str, optional
|
|
||||||
Directory path to load simulation assets (files, modules...)
|
|
||||||
seed : str, optional
|
|
||||||
Seed to use for the random generator
|
|
||||||
num_trials : int, optional
|
|
||||||
Number of independent simulation runs
|
|
||||||
max_time : int, optional
|
|
||||||
Time how long the simulation should run
|
|
||||||
environment_params : dict, optional
|
|
||||||
Dictionary of globally-shared environmental parameters
|
|
||||||
environment_agents: dict, optional
|
|
||||||
Similar to network_agents. Distribution of Agents that control the environment
|
|
||||||
environment_class: soil.environment.Environment subclass, optional
|
|
||||||
Class for the environment. It defailts to soil.environment.Environment
|
|
||||||
load_module : str, module name, deprecated
|
|
||||||
If specified, soil will load the content of this module under 'soil.agents.custom'
|
|
||||||
history: tsih.History subclass, optional
|
|
||||||
Class to use to store the history of the simulation (and environments). It defailts to tsih.History
|
|
||||||
If set to True, tsih.History will be used. If set to False or None, tsih.NoHistory will be used.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, name=None, group=None, topology=None, network_params=None,
|
def __init__(self, config=None,
|
||||||
network_agents=None, agent_type=None, states=None,
|
**kwargs):
|
||||||
default_state=None, interval=1, num_trials=1,
|
|
||||||
max_time=100, load_module=None, seed=None,
|
|
||||||
dir_path=None, environment_agents=None,
|
|
||||||
environment_params=None, environment_class=None,
|
|
||||||
history=History, **kwargs):
|
|
||||||
|
|
||||||
self.load_module = load_module
|
if bool(config) == bool(kwargs):
|
||||||
self.network_params = network_params
|
raise ValueError("Specify either a configuration or the parameters to initialize a configuration")
|
||||||
self.name = name or 'Unnamed'
|
|
||||||
self.seed = str(seed or name)
|
|
||||||
self._id = '{}_{}'.format(self.name, strftime("%Y-%m-%d_%H.%M.%S"))
|
|
||||||
self.group = group or ''
|
|
||||||
self.num_trials = num_trials
|
|
||||||
self.max_time = max_time
|
|
||||||
self.default_state = default_state or {}
|
|
||||||
self.dir_path = dir_path or os.getcwd()
|
|
||||||
self.interval = interval
|
|
||||||
|
|
||||||
sys.path += list(x for x in [os.getcwd(), self.dir_path] if x not in sys.path)
|
if kwargs:
|
||||||
|
config = Config(**kwargs)
|
||||||
|
|
||||||
if topology is None:
|
self.config = config
|
||||||
topology = serialization.load_network(network_params,
|
|
||||||
dir_path=self.dir_path)
|
|
||||||
elif isinstance(topology, basestring) or isinstance(topology, dict):
|
|
||||||
topology = json_graph.node_link_graph(topology)
|
|
||||||
self.topology = nx.Graph(topology)
|
|
||||||
|
|
||||||
|
@property
|
||||||
self.environment_params = environment_params or {}
|
def name(self) -> str:
|
||||||
self.environment_class = serialization.deserialize(environment_class,
|
return self.config.name
|
||||||
known_modules=['soil.environment', ]) or Environment
|
|
||||||
|
|
||||||
environment_agents = environment_agents or []
|
|
||||||
self.environment_agents = agents._convert_agent_types(environment_agents,
|
|
||||||
known_modules=[self.load_module])
|
|
||||||
|
|
||||||
distro = agents.calculate_distribution(network_agents,
|
|
||||||
agent_type)
|
|
||||||
self.network_agents = agents._convert_agent_types(distro,
|
|
||||||
known_modules=[self.load_module])
|
|
||||||
|
|
||||||
self.states = agents._validate_states(states,
|
|
||||||
self.topology)
|
|
||||||
|
|
||||||
if history == True:
|
|
||||||
history = History
|
|
||||||
elif not history:
|
|
||||||
history = NoHistory
|
|
||||||
|
|
||||||
self._history = history(name=self.name,
|
|
||||||
backup=False)
|
|
||||||
|
|
||||||
def run_simulation(self, *args, **kwargs):
|
def run_simulation(self, *args, **kwargs):
|
||||||
return self.run(*args, **kwargs)
|
return self.run(*args, **kwargs)
|
||||||
@ -153,13 +58,13 @@ class Simulation:
|
|||||||
if parallel and not os.environ.get('SENPY_DEBUG', None):
|
if parallel and not os.environ.get('SENPY_DEBUG', None):
|
||||||
p = Pool()
|
p = Pool()
|
||||||
func = partial(self.run_trial_exceptions, **kwargs)
|
func = partial(self.run_trial_exceptions, **kwargs)
|
||||||
for i in p.imap_unordered(func, range(self.num_trials)):
|
for i in p.imap_unordered(func, range(self.config.num_trials)):
|
||||||
if isinstance(i, Exception):
|
if isinstance(i, Exception):
|
||||||
logger.error('Trial failed:\n\t%s', i.message)
|
logger.error('Trial failed:\n\t%s', i.message)
|
||||||
continue
|
continue
|
||||||
yield i
|
yield i
|
||||||
else:
|
else:
|
||||||
for i in range(self.num_trials):
|
for i in range(self.config.num_trials):
|
||||||
yield self.run_trial(trial_id=i,
|
yield self.run_trial(trial_id=i,
|
||||||
**kwargs)
|
**kwargs)
|
||||||
|
|
||||||
@ -183,47 +88,44 @@ class Simulation:
|
|||||||
known_modules=['soil.stats',],
|
known_modules=['soil.stats',],
|
||||||
**stats_params)
|
**stats_params)
|
||||||
|
|
||||||
with utils.timer('simulation {}'.format(self.name)):
|
with utils.timer('simulation {}'.format(self.config.name)):
|
||||||
for stat in stats:
|
for stat in stats:
|
||||||
stat.start()
|
stat.sim_start()
|
||||||
|
|
||||||
for exporter in exporters:
|
for exporter in exporters:
|
||||||
exporter.start()
|
exporter.start()
|
||||||
|
|
||||||
for env in self._run_sync_or_async(parallel=parallel,
|
for env in self._run_sync_or_async(parallel=parallel,
|
||||||
log_level=log_level,
|
log_level=log_level,
|
||||||
**kwargs):
|
**kwargs):
|
||||||
|
|
||||||
collected = list(stat.trial(env) for stat in stats)
|
for exporter in exporters:
|
||||||
|
exporter.trial_start(env)
|
||||||
|
|
||||||
saved = self.save_stats(collected, t_step=env.now, trial_id=env.name)
|
collected = list(stat.trial_end(env) for stat in stats)
|
||||||
|
|
||||||
|
saved = self._update_stats(collected, t_step=env.now, trial_id=env.name)
|
||||||
|
|
||||||
for exporter in exporters:
|
for exporter in exporters:
|
||||||
exporter.trial(env, saved)
|
exporter.trial_end(env, saved)
|
||||||
|
|
||||||
yield env
|
yield env
|
||||||
|
|
||||||
|
|
||||||
collected = list(stat.end() for stat in stats)
|
collected = list(stat.end() for stat in stats)
|
||||||
saved = self.save_stats(collected)
|
saved = self._update_stats(collected)
|
||||||
|
|
||||||
for exporter in exporters:
|
for exporter in exporters:
|
||||||
exporter.end(saved)
|
exporter.sim_end(saved)
|
||||||
|
|
||||||
|
def _update_stats(self, collection, **kwargs):
|
||||||
def save_stats(self, collection, **kwargs):
|
|
||||||
stats = dict(kwargs)
|
stats = dict(kwargs)
|
||||||
for stat in collection:
|
for stat in collection:
|
||||||
stats.update(stat)
|
stats.update(stat)
|
||||||
self._history.save_stats(utils.flatten_dict(stats))
|
|
||||||
return stats
|
return stats
|
||||||
|
|
||||||
def get_stats(self, **kwargs):
|
|
||||||
return self._history.get_stats(**kwargs)
|
|
||||||
|
|
||||||
def log_stats(self, stats):
|
def log_stats(self, stats):
|
||||||
logger.info('Stats: \n{}'.format(yaml.dump(stats, default_flow_style=False)))
|
logger.info('Stats: \n{}'.format(yaml.dump(stats, default_flow_style=False)))
|
||||||
|
|
||||||
|
|
||||||
def get_env(self, trial_id=0, **kwargs):
|
def get_env(self, trial_id=0, **kwargs):
|
||||||
'''Create an environment for a trial of the simulation'''
|
'''Create an environment for a trial of the simulation'''
|
||||||
opts = self.environment_params.copy()
|
opts = self.environment_params.copy()
|
||||||
@ -246,18 +148,20 @@ class Simulation:
|
|||||||
env = self.environment_class(**opts)
|
env = self.environment_class(**opts)
|
||||||
return env
|
return env
|
||||||
|
|
||||||
def run_trial(self, trial_id=0, until=None, log_level=logging.INFO, **opts):
|
def run_trial(self, trial_id=None, until=None, log_level=logging.INFO, **opts):
|
||||||
"""
|
"""
|
||||||
Run a single trial of the simulation
|
Run a single trial of the simulation
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
trial_id = trial_id if trial_id is not None else current_time()
|
||||||
if log_level:
|
if log_level:
|
||||||
logger.setLevel(log_level)
|
logger.setLevel(log_level)
|
||||||
# Set-up trial environment and graph
|
# Set-up trial environment and graph
|
||||||
until = until or self.max_time
|
until = until or self.config.max_time
|
||||||
env = self.get_env(trial_id=trial_id, **opts)
|
|
||||||
|
env = Environment.from_config(self.config, trial_id=trial_id)
|
||||||
# Set up agents on nodes
|
# Set up agents on nodes
|
||||||
with utils.timer('Simulation {} trial {}'.format(self.name, trial_id)):
|
with utils.timer('Simulation {} trial {}'.format(self.config.name, trial_id)):
|
||||||
env.run(until)
|
env.run(until)
|
||||||
return env
|
return env
|
||||||
|
|
||||||
@ -274,64 +178,6 @@ class Simulation:
|
|||||||
ex.message = ''.join(traceback.format_exception(type(ex), ex, ex.__traceback__)[:])
|
ex.message = ''.join(traceback.format_exception(type(ex), ex, ex.__traceback__)[:])
|
||||||
return ex
|
return ex
|
||||||
|
|
||||||
def to_dict(self):
|
|
||||||
return self.__getstate__()
|
|
||||||
|
|
||||||
def to_yaml(self):
|
|
||||||
return yaml.dump(self.to_dict())
|
|
||||||
|
|
||||||
|
|
||||||
def dump_yaml(self, f=None, outdir=None):
|
|
||||||
if not f and not outdir:
|
|
||||||
raise ValueError('specify a file or an output directory')
|
|
||||||
|
|
||||||
if not f:
|
|
||||||
f = os.path.join(outdir, '{}.dumped.yml'.format(self.name))
|
|
||||||
|
|
||||||
with utils.open_or_reuse(f, 'w') as f:
|
|
||||||
f.write(self.to_yaml())
|
|
||||||
|
|
||||||
def dump_pickle(self, f=None, outdir=None):
|
|
||||||
if not outdir and not f:
|
|
||||||
raise ValueError('specify a file or an output directory')
|
|
||||||
|
|
||||||
if not f:
|
|
||||||
f = os.path.join(outdir,
|
|
||||||
'{}.simulation.pickle'.format(self.name))
|
|
||||||
with utils.open_or_reuse(f, 'wb') as f:
|
|
||||||
pickle.dump(self, f)
|
|
||||||
|
|
||||||
def dump_sqlite(self, f):
|
|
||||||
return self._history.dump(f)
|
|
||||||
|
|
||||||
def __getstate__(self):
|
|
||||||
state={}
|
|
||||||
for k, v in self.__dict__.items():
|
|
||||||
if k[0] != '_':
|
|
||||||
state[k] = v
|
|
||||||
state['topology'] = json_graph.node_link_data(self.topology)
|
|
||||||
state['network_agents'] = agents.serialize_definition(self.network_agents,
|
|
||||||
known_modules = [])
|
|
||||||
state['environment_agents'] = agents.serialize_definition(self.environment_agents,
|
|
||||||
known_modules = [])
|
|
||||||
state['environment_class'] = serialization.serialize(self.environment_class,
|
|
||||||
known_modules=['soil.environment'])[1] # func, name
|
|
||||||
if state['load_module'] is None:
|
|
||||||
del state['load_module']
|
|
||||||
return state
|
|
||||||
|
|
||||||
def __setstate__(self, state):
|
|
||||||
self.__dict__ = state
|
|
||||||
self.load_module = getattr(self, 'load_module', None)
|
|
||||||
if self.dir_path not in sys.path:
|
|
||||||
sys.path += [self.dir_path, os.getcwd()]
|
|
||||||
self.topology = json_graph.node_link_graph(state['topology'])
|
|
||||||
self.network_agents = agents.calculate_distribution(agents._convert_agent_types(self.network_agents))
|
|
||||||
self.environment_agents = agents._convert_agent_types(self.environment_agents,
|
|
||||||
known_modules=[self.load_module])
|
|
||||||
self.environment_class = serialization.deserialize(self.environment_class,
|
|
||||||
known_modules=[self.load_module, 'soil.environment', ]) # func, name
|
|
||||||
|
|
||||||
|
|
||||||
def all_from_config(config):
|
def all_from_config(config):
|
||||||
configs = list(serialization.load_config(config))
|
configs = list(serialization.load_config(config))
|
||||||
|
@ -8,18 +8,23 @@ class Stats:
|
|||||||
if you don't plan to implement all the methods.
|
if you don't plan to implement all the methods.
|
||||||
'''
|
'''
|
||||||
|
|
||||||
def __init__(self, simulation):
|
def __init__(self, simulation, name=None):
|
||||||
|
self.name = name or type(self).__name__
|
||||||
self.simulation = simulation
|
self.simulation = simulation
|
||||||
|
|
||||||
def start(self):
|
def sim_start(self):
|
||||||
'''Method to call when the simulation starts'''
|
'''Method to call when the simulation starts'''
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def end(self):
|
def sim_end(self):
|
||||||
'''Method to call when the simulation ends'''
|
'''Method to call when the simulation ends'''
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
def trial(self, env):
|
def trial_start(self, env):
|
||||||
|
'''Method to call when a trial starts'''
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def trial_end(self, env):
|
||||||
'''Method to call when a trial ends'''
|
'''Method to call when a trial ends'''
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
@ -30,12 +35,12 @@ class distribution(Stats):
|
|||||||
the mean value, and its deviation.
|
the mean value, and its deviation.
|
||||||
'''
|
'''
|
||||||
|
|
||||||
def start(self):
|
def sim_start(self):
|
||||||
self.means = []
|
self.means = []
|
||||||
self.counts = []
|
self.counts = []
|
||||||
|
|
||||||
def trial(self, env):
|
def trial_end(self, env):
|
||||||
df = env.df()
|
df = pd.DataFrame(env.state_to_tuples())
|
||||||
df = df.drop('SEED', axis=1)
|
df = df.drop('SEED', axis=1)
|
||||||
ix = df.index[-1]
|
ix = df.index[-1]
|
||||||
attrs = df.columns.get_level_values(0)
|
attrs = df.columns.get_level_values(0)
|
||||||
@ -60,7 +65,7 @@ class distribution(Stats):
|
|||||||
|
|
||||||
return stats
|
return stats
|
||||||
|
|
||||||
def end(self):
|
def sim_end(self):
|
||||||
dfm = pd.DataFrame(self.means, columns=['metric', 'key', 'value'])
|
dfm = pd.DataFrame(self.means, columns=['metric', 'key', 'value'])
|
||||||
dfc = pd.DataFrame(self.counts, columns=['metric', 'key', 'value', 'count'])
|
dfc = pd.DataFrame(self.counts, columns=['metric', 'key', 'value', 'count'])
|
||||||
|
|
||||||
@ -87,7 +92,7 @@ class distribution(Stats):
|
|||||||
|
|
||||||
class defaultStats(Stats):
|
class defaultStats(Stats):
|
||||||
|
|
||||||
def trial(self, env):
|
def trial_end(self, env):
|
||||||
c = Counter()
|
c = Counter()
|
||||||
c.update(a.__class__.__name__ for a in env.network_agents)
|
c.update(a.__class__.__name__ for a in env.network_agents)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user