mirror of
https://github.com/gsi-upm/soil
synced 2024-11-13 06:52:28 +00:00
fix timeout in FSM. Improve logs
This commit is contained in:
parent
09e14c6e84
commit
65f6aa72f3
19
CHANGELOG.md
Normal file
19
CHANGELOG.md
Normal file
@ -0,0 +1,19 @@
|
||||
# Changelog
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [Unreleased]
|
||||
### Changed
|
||||
### Added
|
||||
### Fixed
|
||||
|
||||
## [0.13.7]
|
||||
### Changed
|
||||
* History now defaults to not backing up! This makes it more intuitive to load the history for examination, at the expense of rewriting something. That should not happen because History is only created in the Environment, and that has `backup=True`.
|
||||
### Added
|
||||
* Agent names are assigned based on their agent types
|
||||
* Agent logging uses the agent name.
|
||||
* FSM agents can now return a timeout in addition to a new state. e.g. `return self.idle, self.env.timeout(2)` will execute the *different_state* in 2 *units of time* (`t_step=now+2`).
|
||||
* Example of using timeouts in FSM (custom_timeouts)
|
||||
* `network_agents` entries may include an `ids` entry. If set, it should be a list of node ids that should be assigned that agent type. This complements the previous behavior of setting agent type with `weights`.
|
36
examples/custom_timeouts/custom_timeouts.py
Normal file
36
examples/custom_timeouts/custom_timeouts.py
Normal file
@ -0,0 +1,36 @@
|
||||
from soil.agents import FSM, state, default_state
|
||||
|
||||
|
||||
class Fibonacci(FSM):
|
||||
'''Agent that only executes in t_steps that are Fibonacci numbers'''
|
||||
|
||||
defaults = {
|
||||
'prev': 1
|
||||
}
|
||||
|
||||
@default_state
|
||||
@state
|
||||
def counting(self):
|
||||
self.log('Stopping at {}'.format(self.now))
|
||||
prev, self['prev'] = self['prev'], max([self.now, self['prev']])
|
||||
return None, self.env.timeout(prev)
|
||||
|
||||
class Odds(FSM):
|
||||
'''Agent that only executes in odd t_steps'''
|
||||
@default_state
|
||||
@state
|
||||
def odds(self):
|
||||
self.log('Stopping at {}'.format(self.now))
|
||||
return None, self.env.timeout(1+self.now%2)
|
||||
|
||||
if __name__ == '__main__':
|
||||
import logging
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
from soil import Simulation
|
||||
s = Simulation(network_agents=[{'ids': [0], 'agent_type': Fibonacci},
|
||||
{'ids': [1], 'agent_type': Odds}],
|
||||
dry_run=True,
|
||||
network_params={"generator": "complete_graph", "n": 2},
|
||||
max_time=100,
|
||||
)
|
||||
s.run()
|
@ -1 +1 @@
|
||||
0.13.6
|
||||
0.13.7
|
||||
|
@ -14,6 +14,7 @@ except NameError:
|
||||
from . import agents
|
||||
from .simulation import *
|
||||
from .environment import Environment
|
||||
from .history import History
|
||||
from . import utils
|
||||
from . import analysis
|
||||
|
||||
|
@ -25,13 +25,13 @@ class BaseAgent(nxsim.BaseAgent):
|
||||
defaults = {}
|
||||
|
||||
def __init__(self, environment, agent_id, state=None,
|
||||
name='network_process', interval=None, **state_params):
|
||||
name=None, interval=None, **state_params):
|
||||
# Check for REQUIRED arguments
|
||||
assert environment is not None, TypeError('__init__ missing 1 required keyword argument: \'environment\'. '
|
||||
'Cannot be NoneType.')
|
||||
# Initialize agent parameters
|
||||
self.id = agent_id
|
||||
self.name = name
|
||||
self.name = name or '{}[{}]'.format(type(self).__name__, self.id)
|
||||
self.state_params = state_params
|
||||
|
||||
# Register agent to environment
|
||||
@ -46,8 +46,8 @@ class BaseAgent(nxsim.BaseAgent):
|
||||
|
||||
if not hasattr(self, 'level'):
|
||||
self.level = logging.DEBUG
|
||||
self.logger = logging.getLogger('{}-Agent-{}'.format(self.env.name,
|
||||
self.id))
|
||||
self.logger = logging.getLogger('{}.{}'.format(self.env.name,
|
||||
self.id))
|
||||
self.logger.setLevel(self.level)
|
||||
|
||||
# initialize every time an instance of the agent is created
|
||||
@ -174,7 +174,7 @@ class BaseAgent(nxsim.BaseAgent):
|
||||
|
||||
def log(self, message, *args, level=logging.INFO, **kwargs):
|
||||
message = message + " ".join(str(i) for i in args)
|
||||
message = "\t@{:>5}:\t{}".format(self.now, message)
|
||||
message = "\t{:10}@{:>5}:\t{}".format(self.name, self.now, message)
|
||||
for k, v in kwargs:
|
||||
message += " {k}={v} ".format(k, v)
|
||||
extra = {}
|
||||
@ -280,7 +280,7 @@ class FSM(BaseAgent, metaclass=MetaFSM):
|
||||
raise Exception('{} has no valid state id or default state'.format(self))
|
||||
if next_state not in self.states:
|
||||
raise Exception('{} is not a valid id for {}'.format(next_state, self))
|
||||
self.states[next_state](self)
|
||||
return self.states[next_state](self)
|
||||
|
||||
def set_state(self, state):
|
||||
if hasattr(state, 'id'):
|
||||
@ -306,6 +306,9 @@ def prob(prob=1):
|
||||
return r < prob
|
||||
|
||||
|
||||
STATIC_THRESHOLD = (-1, -1)
|
||||
|
||||
|
||||
def calculate_distribution(network_agents=None,
|
||||
agent_type=None):
|
||||
'''
|
||||
@ -343,6 +346,9 @@ def calculate_distribution(network_agents=None,
|
||||
total = sum(x.get('weight', 1) for x in network_agents)
|
||||
acc = 0
|
||||
for v in network_agents:
|
||||
if 'ids' in v:
|
||||
v['threshold'] = STATIC_THRESHOLD
|
||||
continue
|
||||
upper = acc + (v.get('weight', 1)/total)
|
||||
v['threshold'] = [acc, upper]
|
||||
acc = upper
|
||||
@ -403,17 +409,20 @@ def _convert_agent_types(ind, to_string=False, **kwargs):
|
||||
return deserialize_distribution(ind, **kwargs)
|
||||
|
||||
|
||||
def _agent_from_distribution(distribution, value=-1):
|
||||
def _agent_from_distribution(distribution, value=-1, agent_id=None):
|
||||
"""Used in the initialization of agents given an agent distribution."""
|
||||
if value < 0:
|
||||
value = random.random()
|
||||
for d in distribution:
|
||||
for d in sorted(distribution, key=lambda x: x['threshold']):
|
||||
threshold = d['threshold']
|
||||
if value >= threshold[0] and value < threshold[1]:
|
||||
state = {}
|
||||
if 'state' in d:
|
||||
state = deepcopy(d['state'])
|
||||
return d['agent_type'], state
|
||||
# Check if the definition matches by id (first) or by threshold
|
||||
if not ((agent_id is not None and threshold == STATIC_THRESHOLD and agent_id in d['ids']) or \
|
||||
(value >= threshold[0] and value < threshold[1])):
|
||||
continue
|
||||
state = {}
|
||||
if 'state' in d:
|
||||
state = deepcopy(d['state'])
|
||||
return d['agent_type'], state
|
||||
|
||||
raise Exception('Distribution for value {} not found in: {}'.format(value, distribution))
|
||||
|
||||
|
@ -4,9 +4,11 @@ import time
|
||||
import csv
|
||||
import random
|
||||
import simpy
|
||||
import yaml
|
||||
import tempfile
|
||||
import pandas as pd
|
||||
from copy import deepcopy
|
||||
from collections import Counter
|
||||
from networkx.readwrite import json_graph
|
||||
|
||||
import networkx as nx
|
||||
@ -60,7 +62,8 @@ class Environment(nxsim.NetworkEnvironment):
|
||||
if not dry_run:
|
||||
self.get_path()
|
||||
self._history = history.History(name=self.name if not dry_run else None,
|
||||
dir_path=self.dir_path)
|
||||
dir_path=self.dir_path,
|
||||
backup=True)
|
||||
# Add environment agents first, so their events get
|
||||
# executed before network agents
|
||||
self.environment_agents = environment_agents or []
|
||||
@ -111,7 +114,7 @@ class Environment(nxsim.NetworkEnvironment):
|
||||
|
||||
agent_type = None
|
||||
if 'agent_type' in self.states.get(agent_id, {}):
|
||||
agent_type = self.states[agent_id]
|
||||
agent_type = self.states[agent_id]['agent_type']
|
||||
elif 'agent_type' in node:
|
||||
agent_type = node['agent_type']
|
||||
elif 'agent_type' in self.default_state:
|
||||
@ -119,8 +122,8 @@ class Environment(nxsim.NetworkEnvironment):
|
||||
|
||||
if agent_type:
|
||||
agent_type = agents.deserialize_type(agent_type)
|
||||
else:
|
||||
agent_type, state = agents._agent_from_distribution(agent_distribution)
|
||||
elif agent_distribution:
|
||||
agent_type, state = agents._agent_from_distribution(agent_distribution, agent_id=agent_id)
|
||||
return self.set_agent(agent_id, agent_type, state)
|
||||
|
||||
def set_agent(self, agent_id, agent_type, state=None):
|
||||
@ -130,10 +133,12 @@ class Environment(nxsim.NetworkEnvironment):
|
||||
defstate.update(node.get('state', {}))
|
||||
if state:
|
||||
defstate.update(state)
|
||||
state = defstate
|
||||
a = agent_type(environment=self,
|
||||
agent_id=agent_id,
|
||||
state=state)
|
||||
a = None
|
||||
if agent_type:
|
||||
state = defstate
|
||||
a = agent_type(environment=self,
|
||||
agent_id=agent_id,
|
||||
state=state)
|
||||
node['agent'] = a
|
||||
return a
|
||||
|
||||
@ -153,8 +158,10 @@ class Environment(nxsim.NetworkEnvironment):
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
self._save_state()
|
||||
self.log_stats()
|
||||
super().run(*args, **kwargs)
|
||||
self._history.flush_cache()
|
||||
self.log_stats()
|
||||
|
||||
def _save_state(self, now=None):
|
||||
# for agent in self.agents:
|
||||
@ -327,6 +334,25 @@ class Environment(nxsim.NetworkEnvironment):
|
||||
|
||||
return G
|
||||
|
||||
def stats(self):
|
||||
stats = {}
|
||||
stats['network'] = {}
|
||||
stats['network']['n_nodes'] = self.G.number_of_nodes()
|
||||
stats['network']['n_edges'] = self.G.number_of_edges()
|
||||
c = Counter()
|
||||
c.update(a.__class__.__name__ for a in self.network_agents)
|
||||
stats['agents'] = {}
|
||||
stats['agents']['model_count'] = dict(c)
|
||||
c2 = Counter()
|
||||
c2.update(a['id'] for a in self.network_agents)
|
||||
stats['agents']['state_count'] = dict(c2)
|
||||
stats['params'] = self.environment_params
|
||||
return stats
|
||||
|
||||
def log_stats(self):
|
||||
stats = self.stats()
|
||||
utils.logger.info('Environment stats: \n{}'.format(yaml.dump(stats, default_flow_style=False)))
|
||||
|
||||
def __getstate__(self):
|
||||
state = {}
|
||||
for prop in _CONFIG_PROPS:
|
||||
|
@ -3,6 +3,10 @@ import os
|
||||
import pandas as pd
|
||||
import sqlite3
|
||||
import copy
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
from collections import UserDict, namedtuple
|
||||
|
||||
from . import utils
|
||||
@ -13,7 +17,7 @@ class History:
|
||||
Store and retrieve values from a sqlite database.
|
||||
"""
|
||||
|
||||
def __init__(self, db_path=None, name=None, dir_path=None, backup=True):
|
||||
def __init__(self, db_path=None, name=None, dir_path=None, backup=False):
|
||||
if db_path is None and name:
|
||||
db_path = os.path.join(dir_path or os.getcwd(),
|
||||
'{}.db.sqlite'.format(name))
|
||||
@ -28,6 +32,7 @@ class History:
|
||||
self.db = db_path
|
||||
|
||||
with self.db:
|
||||
logger.debug('Creating database {}'.format(self.db_path))
|
||||
self.db.execute('''CREATE TABLE IF NOT EXISTS history (agent_id text, t_step int, key text, value text text)''')
|
||||
self.db.execute('''CREATE TABLE IF NOT EXISTS value_types (key text, value_type text)''')
|
||||
self.db.execute('''CREATE UNIQUE INDEX IF NOT EXISTS idx_history ON history (agent_id, t_step, key);''')
|
||||
@ -46,6 +51,7 @@ class History:
|
||||
def db(self, db_path=None):
|
||||
db_path = db_path or self.db_path
|
||||
if isinstance(db_path, str):
|
||||
logger.debug('Connecting to database {}'.format(db_path))
|
||||
self._db = sqlite3.connect(db_path)
|
||||
else:
|
||||
self._db = db_path
|
||||
@ -110,6 +116,7 @@ class History:
|
||||
Use a cache to save state changes to avoid opening a session for every change.
|
||||
The cache will be flushed at the end of the simulation, and when history is accessed.
|
||||
'''
|
||||
logger.debug('Flushing cache {}'.format(self.db_path))
|
||||
with self.db:
|
||||
for rec in self._tups:
|
||||
self.db.execute("replace into history(agent_id, t_step, key, value) values (?, ?, ?, ?)", (rec.agent_id, rec.t_step, rec.key, rec.value))
|
||||
|
@ -120,18 +120,18 @@ class TestHistory(TestCase):
|
||||
assert os.path.exists(db_path)
|
||||
|
||||
# Recover the data
|
||||
recovered = history.History(db_path=db_path, backup=False)
|
||||
recovered = history.History(db_path=db_path)
|
||||
assert recovered['a_1', 0, 'id'] == 'v'
|
||||
assert recovered['a_1', 4, 'id'] == 'e'
|
||||
|
||||
# Using the same name should create a backup copy
|
||||
# Using backup=True should create a backup copy, and initialize an empty history
|
||||
newhistory = history.History(db_path=db_path, backup=True)
|
||||
backuppaths = glob(db_path + '.backup*.sqlite')
|
||||
assert len(backuppaths) == 1
|
||||
backuppath = backuppaths[0]
|
||||
assert newhistory.db_path == h.db_path
|
||||
assert os.path.exists(backuppath)
|
||||
assert not len(newhistory[None, None, None])
|
||||
assert len(newhistory[None, None, None]) == 0
|
||||
|
||||
def test_history_tuples(self):
|
||||
"""
|
||||
|
Loading…
Reference in New Issue
Block a user