From af76f54a2873d6c6d5d43407fed148a8fd1b81c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=2E=20Fernando=20S=C3=A1nchez?= Date: Mon, 16 Oct 2017 19:23:52 +0200 Subject: [PATCH] Added rabbits --- docs/quickstart.rst | 29 ++++++----- examples/custom.yml | 21 ++++++++ examples/custom_agents.py | 103 ++++++++++++++++++++++++++++++++++++++ examples/torvalds.yml | 4 +- soil/__init__.py | 16 +++++- soil/__main__.py | 4 ++ soil/agents/__init__.py | 68 ++++++++++++++++++------- soil/environment.py | 96 ++++++++++++++++++++++++----------- soil/simulation.py | 59 ++++++++++++++-------- soil/utils.py | 22 +++++++- tests/test_main.py | 10 ++-- 11 files changed, 343 insertions(+), 89 deletions(-) create mode 100644 examples/custom.yml create mode 100644 examples/custom_agents.py diff --git a/docs/quickstart.rst b/docs/quickstart.rst index acb2b85..6539ed3 100644 --- a/docs/quickstart.rst +++ b/docs/quickstart.rst @@ -13,7 +13,7 @@ Here's an example (``example.yml``). name: MyExampleSimulation max_time: 50 num_trials: 3 - timeout: 2 + interval: 2 network_params: network_type: barabasi_albert_graph n: 100 @@ -34,6 +34,12 @@ Here's an example (``example.yml``). environment_params: prob_infect: 0.075 + +This example configuration will run three trials of a simulation containing a randomly generated network. +The 100 nodes in the network will be SISaModel agents, 10% of them will start in the content state, 10% in the discontent state, and the remaining 80% in the neutral state. +All agents will have access to the environment, which only contains one variable, ``prob_infected``. +The state of the agents will be updated every 2 seconds (``interval``). + Now run the simulation with the command line tool: .. code:: bash @@ -41,7 +47,7 @@ Now run the simulation with the command line tool: soil example.yml Once the simulation finishes, its results will be stored in a folder named ``MyExampleSimulation``. -Four types of objects are saved by default: a pickle of the simulation, a ``YAML`` representation of the simulation (to re-launch it), for every trial, a csv file with the content of the state of every network node and the environment parameters at every step of the simulation as well as the network in gephi format (``gexf``). +Four types of objects are saved by default: a pickle of the simulation; a ``YAML`` representation of the simulation (which can be used to re-launch it); and for every trial, a csv file with the content of the state of every network node and the environment parameters at every step of the simulation, as well as the network in gephi format (``gexf``). .. code:: @@ -54,12 +60,6 @@ Four types of objects are saved by default: a pickle of the simulation, a ``YAML │   └── Sim_prob_0_trial_0.gexf -This example configuration will run three trials of a simulation containing a randomly generated network. -The 100 nodes in the network will be SISaModel agents, 10% of them will start in the content state, 10% in the discontent state, and the remaining 80% in the neutral state. -All agents will have access to the environment, which only contains one variable, ``prob_infected``. -The state of the agents will be updated every 2 seconds (``timeout``). - - Network ======= @@ -94,7 +94,7 @@ For example, the following configuration is equivalent to :code:`nx.complete_gra Environment ============ The environment is the place where the shared state of the simulation is stored. -For instance, the probability of certain events. +For instance, the probability of disease outbreak. The configuration file may specify the initial value of the environment parameters: .. code:: yaml @@ -103,14 +103,17 @@ The configuration file may specify the initial value of the environment paramete daily_probability_of_earthquake: 0.001 number_of_earthquakes: 0 +Any agent has unrestricted access to the environment. +However, for the sake of simplicity, we recommend limiting environment updates to environment agents. + Agents ====== Agents are a way of modelling behavior. Agents can be characterized with two variables: an agent type (``agent_type``) and its state. -Only one agent is executed at a time (generally, every ``timeout`` seconds), and it has access to its state and the environment parameters. +Only one agent is executed at a time (generally, every ``interval`` seconds), and it has access to its state and the environment parameters. Through the environment, it can access the network topology and the state of other agents. -There are three three types of agents according to how they are added to the simulation: network agents, environment agent, and other agents. +There are three three types of agents according to how they are added to the simulation: network agents and environment agent. Network Agents ############## @@ -118,13 +121,13 @@ Network agents are attached to a node in the topology. The configuration file allows you to specify how agents will be mapped to topology nodes. The simplest way is to specify a single type of agent. -Hence, every node in the network will have an associated agent of that type. +Hence, every node in the network will be associated to an agent of that type. .. code:: yaml agent_type: SISaModel -It is also possible to add more than one type of agent to the simulation, and to control the ratio of each type (``weight``). +It is also possible to add more than one type of agent to the simulation, and to control the ratio of each type (using the ``weight`` property). For instance, with following configuration, it is five times more likely for a node to be assigned a CounterModel type than a SISaModel type. .. code:: yaml diff --git a/examples/custom.yml b/examples/custom.yml new file mode 100644 index 0000000..403ad8e --- /dev/null +++ b/examples/custom.yml @@ -0,0 +1,21 @@ +--- +load_module: custom_agents +name: custom_agent_example +max_time: 2500 +interval: 1 +seed: MySimulationSeed +agent_type: RabbitModel +environment_agents: + - agent_type: RandomAccident +default_state: + mating_prob: 1 +topology: + nodes: + - id: 1 + state: + gender: female + - id: 0 + state: + gender: male + directed: true + links: [] diff --git a/examples/custom_agents.py b/examples/custom_agents.py new file mode 100644 index 0000000..dcf8e2b --- /dev/null +++ b/examples/custom_agents.py @@ -0,0 +1,103 @@ +import logging +from soil.agents import NetworkAgent, FSM, state, default_state, BaseAgent +from enum import Enum +from random import random, choice +from itertools import islice + +logger = logging.getLogger(__name__) + +class Genders(Enum): + male = 'male' + female = 'female' + + +class RabbitModel(NetworkAgent, FSM): + + defaults = { + 'age': 0, + 'gender': Genders.male.value, + 'mating_prob': 0.001, + 'offspring': 0, + } + + sexual_maturity = 4*30 + life_expectancy = 365 * 3 + gestation = 33 + pregnancy = -1 + max_females = 10 + + @default_state + @state + def newborn(self): + self['age'] += 1 + + if self['age'] >= self.sexual_maturity: + return self.fertile + + @state + def fertile(self): + self['age'] += 1 + if self['age'] > self.life_expectancy: + return self.dead + + if self['gender'] == Genders.female.value: + return + + # Males try to mate + females = self.get_agents(state_id=self.fertile.id, gender=Genders.female.value, limit_neighbors=False) + for f in islice(females, self.max_females): + r = random() + if r < self['mating_prob']: + self.impregnate(f) + break # Take a break + + def impregnate(self, whom): + if self['gender'] == Genders.female.value: + raise NotImplementedError('Females cannot impregnate') + whom['pregnancy'] = 0 + whom['mate'] = self.id + whom.set_state(whom.pregnant) + logger.debug('{} impregnating: {}. {}'.format(self.id, whom.id, whom.state)) + + @state + def pregnant(self): + self['age'] += 1 + if self['age'] > self.life_expectancy: + return self.dead + + self['pregnancy'] += 1 + logger.debug('Pregnancy: {}'.format(self['pregnancy'])) + if self['pregnancy'] >= self.gestation: + + state = {} + state['gender'] = choice(list(Genders)).value + child = self.env.add_node(self.__class__, state) + self.env.add_edge(self.id, child.id) + self.env.add_edge(self['mate'], child.id) + # self.add_edge() + logger.info("A rabbit has been born: {}. Total: {}".format(child.id, len(self.global_topology.nodes))) + self['offspring'] += 1 + self.env.get_agent(self['mate'])['offspring'] += 1 + del self['mate'] + self['pregnancy'] = -1 + return self.fertile + + @state + def dead(self): + logger.info('Agent {} is dying'.format(self.id)) + if 'pregnancy' in self and self['pregnancy'] > -1: + logger.info('A mother has died carrying a baby!: {}!'.format(self.state)) + self.die() + return + + +class RandomAccident(BaseAgent): + + def step(self): + logger.debug('Killing some rabbits!') + prob_death = self.env.get('prob_death', -1) + for i in self.env.network_agents: + r = random() + if r < prob_death: + logger.info('I killed a rabbit: {}'.format(i.id)) + i.set_state(i.dead) diff --git a/examples/torvalds.yml b/examples/torvalds.yml index d346c99..e338163 100644 --- a/examples/torvalds.yml +++ b/examples/torvalds.yml @@ -1,6 +1,6 @@ --- name: torvalds_example -max_time: 1 +max_time: 10 interval: 2 agent_type: CounterModel default_state: @@ -11,4 +11,4 @@ states: Torvalds: skill_level: 'God' balkian: - skill_level: 'developer' \ No newline at end of file + skill_level: 'developer' diff --git a/soil/__init__.py b/soil/__init__.py index 7a14b1c..f916204 100644 --- a/soil/__init__.py +++ b/soil/__init__.py @@ -1,6 +1,8 @@ import importlib import sys import os +import pdb +import logging __version__ = "0.9.7" @@ -29,6 +31,8 @@ def main(): help='file containing the code of any custom agents.') parser.add_argument('--dry-run', '--dry', action='store_true', help='Do not store the results of the simulation.') + parser.add_argument('--pdb', action='store_true', + help='Use a pdb console in case of exception.') parser.add_argument('--output', '-o', type=str, help='folder to write results to. It defaults to the current directory.') @@ -38,8 +42,16 @@ def main(): sys.path.append(os.getcwd()) importlib.import_module(args.module) - print('Loading config file: {}'.format(args.file, args.output)) - simulation.run_from_config(args.file, dump=(not args.dry_run), results_dir=args.output) + logging.basicConfig(level=logging.INFO) + logger = logging.getLogger(__name__) + logger.info('Loading config file: {}'.format(args.file, args.output)) + try: + simulation.run_from_config(args.file, dump=(not args.dry_run), results_dir=args.output) + except Exception as ex: + if args.pdb: + pdb.post_mortem() + else: + raise if __name__ == '__main__': diff --git a/soil/__main__.py b/soil/__main__.py index 5504027..7a5ff94 100644 --- a/soil/__main__.py +++ b/soil/__main__.py @@ -1,8 +1,12 @@ import importlib import sys +import os import argparse +import logging from . import simulation +logger = logging.getLogger(__name__) + def main(): diff --git a/soil/agents/__init__.py b/soil/agents/__init__.py index b951058..0460870 100644 --- a/soil/agents/__init__.py +++ b/soil/agents/__init__.py @@ -8,6 +8,7 @@ import nxsim from collections import OrderedDict from copy import deepcopy +from functools import partial import json from functools import wraps @@ -27,28 +28,33 @@ class BaseAgent(nxsim.BaseAgent, metaclass=MetaAgent): A special simpy BaseAgent that keeps track of its state history. """ - def __init__(self, *args, **kwargs): - self._history = OrderedDict() + defaults = {} + + def __init__(self, **kwargs): self._neighbors = None - super().__init__(*args, **kwargs) + self.alive = True + state = deepcopy(self.defaults) + state.update(kwargs.pop('state', {})) + kwargs['state'] = state + super().__init__(**kwargs) def __getitem__(self, key): if isinstance(key, tuple): k, t_step = key - if k is not None: - if t_step is not None: - return self._history[t_step][k] - else: - return {tt: tv.get(k, None) for tt, tv in self._history.items()} - else: - return self._history[t_step] - return self.state[key] + return self.env[t_step, self.id, k] + return self.state.get(key, None) + + def __delitem__(self, key): + del self.state[key] + + def __contains__(self, key): + return key in self.state def __setitem__(self, key, value): self.state[key] = value - def save_state(self): - self._history[self.now] = deepcopy(self.state) + def get(self, key, default=None): + return self[key] if key in self else default @property def now(self): @@ -59,19 +65,21 @@ class BaseAgent(nxsim.BaseAgent, metaclass=MetaAgent): return None def run(self): - while True: + while self.alive: res = self.step() yield res or self.env.timeout(self.env.interval) + def die(self, remove=False): + self.alive = False + if remove: + super().die() + def step(self): pass def to_json(self): return json.dumps(self._history) - -class NetworkAgent(BaseAgent, nxsim.BaseNetworkAgent): - def count_agents(self, state_id=None, limit_neighbors=False): if limit_neighbors: agents = self.global_topology.neighbors(self.id) @@ -84,6 +92,25 @@ class NetworkAgent(BaseAgent, nxsim.BaseNetworkAgent): count += 1 return count + def get_agents(self, state_id=None, limit_neighbors=False, **kwargs): + if limit_neighbors: + agents = super().get_agents(state_id, limit_neighbors) + else: + agents = filter(lambda x: state_id is None or x.state.get('id', None) == state_id, + self.env.agents) + + def matches_all(agent): + state = agent.state + for k, v in kwargs.items(): + if state.get(k, None) != v: + return False + return True + + return filter(matches_all, agents) + + +class NetworkAgent(BaseAgent, nxsim.BaseNetworkAgent): + def count_neighboring_agents(self, state_id=None): return self.count_agents(state_id, limit_neighbors=True) @@ -155,6 +182,13 @@ class FSM(BaseAgent, metaclass=MetaFSM): raise Exception('{} is not a valid id for {}'.format(next_state, self)) self.states[next_state](self) + def set_state(self, state): + if hasattr(state, 'id'): + state = state.id + if state not in self.states: + raise ValueError('{} is not a valid state'.format(state)) + self.state['id'] = state + from .BassModel import * from .BigMarketModel import * diff --git a/soil/environment.py b/soil/environment.py index 377e084..51b89ec 100644 --- a/soil/environment.py +++ b/soil/environment.py @@ -1,12 +1,16 @@ import os +import time import csv import weakref -from random import random +import random from copy import deepcopy +from functools import partial import networkx as nx import nxsim +from . import utils + class SoilEnvironment(nxsim.NetworkEnvironment): @@ -16,6 +20,8 @@ class SoilEnvironment(nxsim.NetworkEnvironment): states=None, default_state=None, interval=1, + seed=None, + dump=False, *args, **kwargs): self.name = name or 'UnnamedEnvironment' self.states = deepcopy(states) or {} @@ -25,8 +31,11 @@ class SoilEnvironment(nxsim.NetworkEnvironment): self._history = {} self.interval = interval self.logger = None + self.dump = dump # Add environment agents first, so their events get # executed before network agents + self['SEED'] = seed or time.time() + random.seed(self['SEED']) self.environment_agents = environment_agents or [] self.network_agents = network_agents or [] self.process(self.save_state()) @@ -65,26 +74,32 @@ class SoilEnvironment(nxsim.NetworkEnvironment): for ix in self.G.nodes(): i = ix node = self.G.node[i] - v = random() - found = False - for d in network_agents: - threshold = d['threshold'] - if v >= threshold[0] and v < threshold[1]: - agent = d['agent_type'] - state = None - if 'state' in d: - state = deepcopy(d['state']) - else: - try: - state = self.states[i] - except (IndexError, KeyError): - state = deepcopy(self.default_state) - node['agent'] = agent(environment=self, - agent_id=i, - state=state) - found = True - break - assert found + agent, state = utils.agent_from_distribution(network_agents) + self.set_agent(i, agent_type=agent, state=state) + + def set_agent(self, agent_id, agent_type, state=None): + node = self.G.nodes[agent_id] + defstate = deepcopy(self.default_state) + defstate.update(self.states.get(agent_id, {})) + if state: + defstate.update(state) + state = defstate + state.update(node.get('state', {})) + a = agent_type(environment=self, + agent_id=agent_id, + state=state) + node['agent'] = a + return a + + def add_node(self, agent_type, state=None): + agent_id = int(len(self.G.nodes())) + self.G.add_node(agent_id) + a = self.set_agent(agent_id, agent_type, state) + a['visible'] = True + return a + + def add_edge(self, agent1, agent2, attrs=None): + return self.G.add_edge(agent1, agent2) def run(self, *args, **kwargs): self._save_state() @@ -92,9 +107,12 @@ class SoilEnvironment(nxsim.NetworkEnvironment): self._save_state() def _save_state(self): + # for agent in self.agents: + # agent.save_state() + nowd = self._history[self.now] = {} + nowd['env'] = deepcopy(self.environment_params) for agent in self.agents: - agent.save_state() - self._history[self.now] = deepcopy(self.environment_params) + nowd[agent.id] = deepcopy(agent.state) def save_state(self): while True: @@ -107,11 +125,32 @@ class SoilEnvironment(nxsim.NetworkEnvironment): self._save_state() def __getitem__(self, key): + if isinstance(key, tuple): + t_step, agent_id, k = key + + def key_or_dict(d, k, nfunc): + if k is None: + if d is None: + return {} + return {k: nfunc(v) for k, v in d.items()} + if k in d: + return nfunc(d[k]) + return {} + + f1 = partial(key_or_dict, k=k, nfunc=lambda x: x) + f2 = partial(key_or_dict, k=agent_id, nfunc=f1) + return key_or_dict(self._history, t_step, f2) return self.environment_params[key] def __setitem__(self, key, value): self.environment_params[key] = value + def __contains__(self, key): + return key in self.environment_params + + def get(self, key, default=None): + return self[key] if key in self else default + def get_path(self, dir_path=None): dir_path = dir_path or self.sim().dir_path if not os.path.exists(dir_path): @@ -141,13 +180,10 @@ class SoilEnvironment(nxsim.NetworkEnvironment): nx.write_gexf(G, graph_path, version="1.2draft") def history_to_tuples(self): - for tstep, state in self._history.items(): - for attribute, value in state.items(): - yield ('env', tstep, attribute, value) - for agent in self.agents: - for tstep, state in agent._history.items(): + for tstep, states in self._history.items(): + for a_id, state in states.items(): for attribute, value in state.items(): - yield (agent.id, tstep, attribute, value) + yield (a_id, tstep, attribute, value) def history_to_graph(self): G = nx.Graph(self.G) @@ -159,7 +195,7 @@ class SoilEnvironment(nxsim.NetworkEnvironment): spells = [] lastvisible = False laststep = None - for t_step, state in reversed(list(agent._history.items())): + for t_step, state in reversed(list(self[None, agent.id, None].items())): for attribute, value in state.items(): if attribute == 'visible': nowvisible = state[attribute] diff --git a/soil/simulation.py b/soil/simulation.py index 838cd65..2b06ebe 100644 --- a/soil/simulation.py +++ b/soil/simulation.py @@ -1,14 +1,14 @@ import weakref import os -import csv import time +import imp +import sys import yaml +import logging import networkx as nx from networkx.readwrite import json_graph from copy import deepcopy -from random import random -from matplotlib import pyplot as plt import pickle @@ -16,6 +16,7 @@ from nxsim import NetworkSimulation from . import agents, utils, environment, basestring +logger = logging.getLogger(__name__) class SoilSimulation(NetworkSimulation): """ @@ -47,9 +48,9 @@ class SoilSimulation(NetworkSimulation): """ def __init__(self, name=None, topology=None, network_params=None, network_agents=None, agent_type=None, states=None, - default_state=None, interval=1, - dir_path=None, num_trials=3, max_time=100, - agent_module=None, + default_state=None, interval=1, dump=False, + dir_path=None, num_trials=1, max_time=100, + agent_module=None, load_module=None, seed=None, environment_agents=None, environment_params=None): if topology is None: @@ -58,6 +59,8 @@ class SoilSimulation(NetworkSimulation): elif isinstance(topology, basestring) or isinstance(topology, dict): topology = json_graph.node_link_graph(topology) + + self.load_module = load_module self.topology = nx.Graph(topology) self.network_params = network_params self.name = name or 'UnnamedSimulation' @@ -66,8 +69,15 @@ class SoilSimulation(NetworkSimulation): self.default_state = default_state or {} self.dir_path = dir_path or os.getcwd() self.interval = interval + self.seed = seed + self.dump = dump self.environment_params = environment_params or {} + if load_module: + path = sys.path + [self.dir_path] + f, fp, desc = imp.find_module(load_module, path) + imp.load_module('soil.agents.custom', f, fp, desc) + environment_agents = environment_agents or [] self.environment_agents = self._convert_agent_types(environment_agents) @@ -132,12 +142,23 @@ class SoilSimulation(NetworkSimulation): def run(self): return list(self.run_simulation_gen()) - def run_simulation_gen(self): + def run_simulation_gen(self, *args, **kwargs): with utils.timer('simulation'): for i in range(self.num_trials): - yield self.run_trial(i) + res = self.run_trial(i) + if self.dump: + res.dump_gexf(self.dir_path) + res.dump_csv(self.dir_path) + yield res - def run_trial(self, trial_id=0): + if self.dump: + logger.info('Dumping results to {}'.format(self.dir_path)) + self.dump_pickle(self.dir_path) + self.dump_yaml(self.dir_path) + else: + logger.info('NOT dumping results') + + def run_trial(self, trial_id=0, dump=False, dir_path=None): """Run a single trial of the simulation Parameters @@ -145,11 +166,13 @@ class SoilSimulation(NetworkSimulation): trial_id : int """ # Set-up trial environment and graph - print('Trial: {}'.format(trial_id)) + logger.info('Trial: {}'.format(trial_id)) env_name = '{}_trial_{}'.format(self.name, trial_id) env = environment.SoilEnvironment(name=env_name, topology=self.topology.copy(), + seed=self.seed, initial_time=0, + dump=self.dump, interval=self.interval, network_agents=self.network_agents, states=self.states, @@ -159,7 +182,7 @@ class SoilSimulation(NetworkSimulation): env.sim = weakref.ref(self) # Set up agents on nodes - print('\tRunning') + logger.info('\tRunning') with utils.timer('trial'): env.run(until=self.max_time) return env @@ -221,7 +244,7 @@ def run_from_config(*configs, dump=True, results_dir=None, timestamp=False): for config_def in configs: for config, cpath in utils.load_config(config_def): name = config.get('name', 'unnamed') - print("Using config(s): {name}".format(name=name)) + logger.info("Using config(s): {name}".format(name=name)) sim = SoilSimulation(**config) if timestamp: @@ -229,13 +252,7 @@ def run_from_config(*configs, dump=True, results_dir=None, timestamp=False): time.strftime("%Y-%m-%d_%H:%M:%S")) else: sim_folder = sim.name - dir_path = os.path.join(results_dir, - sim_folder) + sim.dir_path = os.path.join(results_dir, sim_folder) + sim.dump = dump + logger.info('Dumping results to {} : {}'.format(sim.dir_path, dump)) results = sim.run_simulation() - - if dump: - sim.dump_pickle(dir_path) - sim.dump_yaml(dir_path) - for env in results: - env.dump_gexf(dir_path) - env.dump_csv(dir_path) diff --git a/soil/utils.py b/soil/utils.py index 86bba4b..be1f7a2 100644 --- a/soil/utils.py +++ b/soil/utils.py @@ -1,12 +1,17 @@ import os import yaml +import logging from time import time from glob import glob +from random import random +from copy import deepcopy import networkx as nx from contextlib import contextmanager +logger = logging.getLogger(__name__) + def load_network(network_params, dir_path=None): path = network_params.get('path', None) @@ -51,7 +56,7 @@ def load_config(config): @contextmanager -def timer(name='task', pre="", function=print, to_object=None): +def timer(name='task', pre="", function=logger.info, to_object=None): start = time() yield start end = time() @@ -59,3 +64,18 @@ def timer(name='task', pre="", function=print, to_object=None): if to_object: to_object.start = start to_object.end = end + + +def agent_from_distribution(distribution, value=-1): + """Find the agent """ + if value < 0: + value = random() + for d in distribution: + threshold = d['threshold'] + if value >= threshold[0] and value < threshold[1]: + state = None + if 'state' in d: + state = deepcopy(d['state']) + return d['agent_type'], state + + raise Exception('Distribution for value {} not found in: {}'.format(value, distribution)) diff --git a/tests/test_main.py b/tests/test_main.py index d107d8c..d315f3f 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -111,7 +111,7 @@ class TestMain(TestCase): env = s.run_simulation()[0] for agent in env.network_agents: last = 0 - assert len(agent._history) == 11 + assert len(agent[None, None]) == 11 for step, total in agent['total', None].items(): if step > 0: assert total == last + 2 @@ -177,6 +177,7 @@ class TestMain(TestCase): recovered = yaml.load(serial) with utils.timer('deleting'): del recovered['topology'] + del recovered['load_module'] assert config == recovered def test_configuration_changes(self): @@ -190,6 +191,7 @@ class TestMain(TestCase): s.run_simulation() nconfig = s.to_dict() del nconfig['topology'] + del nconfig['load_module'] assert config == nconfig def test_examples(self): @@ -205,9 +207,11 @@ def make_example_test(path, config): s = simulation.from_config(config) envs = s.run_simulation() for env in envs: - n = config['network_params'].get('n', None) - if n is not None: + try: + n = config['network_params']['n'] assert len(env.get_agents()) == n + except KeyError: + pass os.chdir(root) return wrapped