mirror of
https://github.com/gsi-upm/soil
synced 2025-10-20 18:28:28 +00:00
Compare commits
14 Commits
0.13.0
...
a3ea434f23
Author | SHA1 | Date | |
---|---|---|---|
|
a3ea434f23 | ||
|
65f6aa72f3 | ||
|
09e14c6e84 | ||
|
8593ac999d | ||
|
90338c3549 | ||
|
1d532dacfe | ||
|
a1f8d8c9c5 | ||
|
de326eb331 | ||
|
04b4380c61 | ||
|
d70a0c865c | ||
|
625c28e4ee | ||
|
9749f4ca14 | ||
|
3526fa29d7 | ||
|
53604c1e66 |
@@ -1,21 +1,28 @@
|
||||
image: python:3.7
|
||||
|
||||
steps:
|
||||
- build
|
||||
stages:
|
||||
- test
|
||||
- build
|
||||
|
||||
build:
|
||||
stage: build
|
||||
image:
|
||||
name: gcr.io/kaniko-project/executor:debug
|
||||
entrypoint: [""]
|
||||
tags:
|
||||
- docker
|
||||
script:
|
||||
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > /kaniko/.docker/config.json
|
||||
- /kaniko/executor --context $CI_PROJECT_DIR --dockerfile $CI_PROJECT_DIR/Dockerfile --destination $CI_REGISTRY_IMAGE:$CI_COMMIT_TAG
|
||||
# The skip-tls-verify flag is there because our registry certificate is self signed
|
||||
- /kaniko/executor --context $CI_PROJECT_DIR --skip-tls-verify --dockerfile $CI_PROJECT_DIR/Dockerfile --destination $CI_REGISTRY_IMAGE:$CI_COMMIT_TAG
|
||||
only:
|
||||
- tags
|
||||
|
||||
|
||||
test:
|
||||
except:
|
||||
- tags # Avoid running tests for tags, because they are already run for the branch
|
||||
tags:
|
||||
- docker
|
||||
image: python:3.7
|
||||
stage: test
|
||||
script:
|
||||
python setup.py test
|
||||
- python setup.py test
|
27
CHANGELOG.md
Normal file
27
CHANGELOG.md
Normal file
@@ -0,0 +1,27 @@
|
||||
# Changelog
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [0.13.8]
|
||||
### Changed
|
||||
* Moved TerroristNetworkModel to examples
|
||||
### Added
|
||||
* `get_agents` and `count_agents` methods now accept lists as inputs. They can be used to retrieve agents from node ids
|
||||
* `subgraph` in BaseAgent
|
||||
* `agents.select` method, to filter out agents
|
||||
* `skip_test` property in yaml definitions, to force skipping some examples
|
||||
* `agents.Geo`, with a search function based on postition
|
||||
* `BaseAgent.ego_search` to get nodes from the ego network of a node
|
||||
* `BaseAgent.degree` and `BaseAgent.betweenness`
|
||||
### Fixed
|
||||
|
||||
## [0.13.7]
|
||||
### Changed
|
||||
* History now defaults to not backing up! This makes it more intuitive to load the history for examination, at the expense of rewriting something. That should not happen because History is only created in the Environment, and that has `backup=True`.
|
||||
### Added
|
||||
* Agent names are assigned based on their agent types
|
||||
* Agent logging uses the agent name.
|
||||
* FSM agents can now return a timeout in addition to a new state. e.g. `return self.idle, self.env.timeout(2)` will execute the *different_state* in 2 *units of time* (`t_step=now+2`).
|
||||
* Example of using timeouts in FSM (custom_timeouts)
|
||||
* `network_agents` entries may include an `ids` entry. If set, it should be a list of node ids that should be assigned that agent type. This complements the previous behavior of setting agent type with `weights`.
|
@@ -2,3 +2,6 @@ include requirements.txt
|
||||
include test-requirements.txt
|
||||
include README.rst
|
||||
graft soil
|
||||
global-exclude __pycache__
|
||||
global-exclude soil_output
|
||||
global-exclude *.py[co]
|
||||
|
@@ -16,15 +16,14 @@ The configuration includes things such as number of agents to use and their type
|
||||
|
||||
Soil includes several agent classes in the ``soil.agents`` module, and we will use them in this quickstart.
|
||||
If you are interested in developing your own agents classes, see :doc:`soil_tutorial`.
|
||||
The configuration is the following:
|
||||
|
||||
Configuration
|
||||
=============
|
||||
To get you started, we will use this configuration (:download:`download the file <quickstart.yml>` directly):
|
||||
|
||||
.. literalinclude:: quickstart.yml
|
||||
:language: yaml
|
||||
|
||||
Configuration
|
||||
=============
|
||||
|
||||
You may :download:`download the file <quickstart.yml>` directly.
|
||||
The agent type used, SISa, is a very simple model.
|
||||
It only has three states (neutral, content and discontent),
|
||||
Its parameters are the probabilities to change from one state to another, either spontaneously or because of contagion from neighboring agents.
|
||||
@@ -79,16 +78,16 @@ Change some of the parameters, such as the number of agents, the probability of
|
||||
Soil also includes a web server that allows you to upload your simulations, change parameters, and visualize the results, including a timeline of the network.
|
||||
To make it work, you have to install soil like this:
|
||||
|
||||
```
|
||||
pip install soil[web]
|
||||
```
|
||||
.. code::
|
||||
|
||||
pip install soil[web]
|
||||
|
||||
Once installed, the soil web UI can be run in two ways:
|
||||
|
||||
```
|
||||
soil-web
|
||||
.. code::
|
||||
|
||||
OR
|
||||
soil-web
|
||||
|
||||
python -m soil.web
|
||||
```
|
||||
# OR
|
||||
|
||||
python -m soil.web
|
@@ -26,7 +26,7 @@ But before that, let's import the soil module and networkx.
|
||||
%autoreload 2
|
||||
|
||||
%pylab inline
|
||||
# To display plots in the notebooed_
|
||||
# To display plots in the notebook_
|
||||
|
||||
|
||||
.. parsed-literal::
|
||||
|
17
examples/custom_generator/custom_generator.yml
Normal file
17
examples/custom_generator/custom_generator.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
name: custom-generator
|
||||
description: Using a custom generator for the network
|
||||
num_trials: 3
|
||||
dry_run: True
|
||||
max_time: 100
|
||||
interval: 1
|
||||
network_params:
|
||||
generator: mymodule.mygenerator
|
||||
# These are custom parameters
|
||||
n: 10
|
||||
n_edges: 5
|
||||
network_agents:
|
||||
- agent_type: CounterModel
|
||||
weight: 1
|
||||
state:
|
||||
id: 0
|
27
examples/custom_generator/mymodule.py
Normal file
27
examples/custom_generator/mymodule.py
Normal file
@@ -0,0 +1,27 @@
|
||||
from networkx import Graph
|
||||
import networkx as nx
|
||||
from random import choice
|
||||
|
||||
def mygenerator(n=5, n_edges=5):
|
||||
'''
|
||||
Just a simple generator that creates a network with n nodes and
|
||||
n_edges edges. Edges are assigned randomly, only avoiding self loops.
|
||||
'''
|
||||
G = nx.Graph()
|
||||
|
||||
for i in range(n):
|
||||
G.add_node(i)
|
||||
|
||||
for i in range(n_edges):
|
||||
nodes = list(G.nodes)
|
||||
n_in = choice(nodes)
|
||||
nodes.remove(n_in) # Avoid loops
|
||||
n_out = choice(nodes)
|
||||
G.add_edge(n_in, n_out)
|
||||
return G
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
36
examples/custom_timeouts/custom_timeouts.py
Normal file
36
examples/custom_timeouts/custom_timeouts.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from soil.agents import FSM, state, default_state
|
||||
|
||||
|
||||
class Fibonacci(FSM):
|
||||
'''Agent that only executes in t_steps that are Fibonacci numbers'''
|
||||
|
||||
defaults = {
|
||||
'prev': 1
|
||||
}
|
||||
|
||||
@default_state
|
||||
@state
|
||||
def counting(self):
|
||||
self.log('Stopping at {}'.format(self.now))
|
||||
prev, self['prev'] = self['prev'], max([self.now, self['prev']])
|
||||
return None, self.env.timeout(prev)
|
||||
|
||||
class Odds(FSM):
|
||||
'''Agent that only executes in odd t_steps'''
|
||||
@default_state
|
||||
@state
|
||||
def odds(self):
|
||||
self.log('Stopping at {}'.format(self.now))
|
||||
return None, self.env.timeout(1+self.now%2)
|
||||
|
||||
if __name__ == '__main__':
|
||||
import logging
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
from soil import Simulation
|
||||
s = Simulation(network_agents=[{'ids': [0], 'agent_type': Fibonacci},
|
||||
{'ids': [1], 'agent_type': Odds}],
|
||||
dry_run=True,
|
||||
network_params={"generator": "complete_graph", "n": 2},
|
||||
max_time=100,
|
||||
)
|
||||
s.run()
|
@@ -6,7 +6,7 @@ environment_params:
|
||||
prob_neighbor_spread: 0.0
|
||||
prob_tv_spread: 0.01
|
||||
interval: 1
|
||||
max_time: 30
|
||||
max_time: 300
|
||||
name: Sim_all_dumb
|
||||
network_agents:
|
||||
- agent_type: DumbViewer
|
||||
@@ -30,7 +30,7 @@ environment_params:
|
||||
prob_neighbor_spread: 0.0
|
||||
prob_tv_spread: 0.01
|
||||
interval: 1
|
||||
max_time: 30
|
||||
max_time: 300
|
||||
name: Sim_half_herd
|
||||
network_agents:
|
||||
- agent_type: DumbViewer
|
||||
@@ -62,7 +62,7 @@ environment_params:
|
||||
prob_neighbor_spread: 0.0
|
||||
prob_tv_spread: 0.01
|
||||
interval: 1
|
||||
max_time: 30
|
||||
max_time: 300
|
||||
name: Sim_all_herd
|
||||
network_agents:
|
||||
- agent_type: HerdViewer
|
||||
@@ -89,7 +89,7 @@ environment_params:
|
||||
prob_tv_spread: 0.01
|
||||
prob_neighbor_cure: 0.1
|
||||
interval: 1
|
||||
max_time: 30
|
||||
max_time: 300
|
||||
name: Sim_wise_herd
|
||||
network_agents:
|
||||
- agent_type: HerdViewer
|
||||
@@ -115,7 +115,7 @@ environment_params:
|
||||
prob_tv_spread: 0.01
|
||||
prob_neighbor_cure: 0.1
|
||||
interval: 1
|
||||
max_time: 30
|
||||
max_time: 300
|
||||
name: Sim_all_wise
|
||||
network_agents:
|
||||
- agent_type: WiseViewer
|
||||
|
1
examples/programmatic/.gitignore
vendored
Normal file
1
examples/programmatic/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
Programmatic*
|
38
examples/programmatic/programmatic.py
Normal file
38
examples/programmatic/programmatic.py
Normal file
@@ -0,0 +1,38 @@
|
||||
'''
|
||||
Example of a fully programmatic simulation, without definition files.
|
||||
'''
|
||||
from soil import Simulation, agents
|
||||
from networkx import Graph
|
||||
import logging
|
||||
|
||||
|
||||
def mygenerator():
|
||||
# Add only a node
|
||||
G = Graph()
|
||||
G.add_node(1)
|
||||
return G
|
||||
|
||||
|
||||
class MyAgent(agents.FSM):
|
||||
|
||||
@agents.default_state
|
||||
@agents.state
|
||||
def neutral(self):
|
||||
self.info('I am running')
|
||||
|
||||
|
||||
s = Simulation(name='Programmatic',
|
||||
network_params={'generator': mygenerator},
|
||||
num_trials=1,
|
||||
max_time=100,
|
||||
agent_type=MyAgent,
|
||||
dry_run=True)
|
||||
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
envs = s.run()
|
||||
|
||||
s.dump_yaml()
|
||||
|
||||
for env in envs:
|
||||
env.dump_csv()
|
208
examples/terrorism/TerroristNetworkModel.py
Normal file
208
examples/terrorism/TerroristNetworkModel.py
Normal file
@@ -0,0 +1,208 @@
|
||||
import random
|
||||
import networkx as nx
|
||||
from soil.agents import Geo, NetworkAgent, FSM, state, default_state
|
||||
from soil import Environment
|
||||
|
||||
|
||||
class TerroristSpreadModel(FSM, Geo):
|
||||
"""
|
||||
Settings:
|
||||
information_spread_intensity
|
||||
|
||||
terrorist_additional_influence
|
||||
|
||||
min_vulnerability (optional else zero)
|
||||
|
||||
max_vulnerability
|
||||
|
||||
prob_interaction
|
||||
"""
|
||||
|
||||
def __init__(self, environment=None, agent_id=0, state=()):
|
||||
super().__init__(environment=environment, agent_id=agent_id, state=state)
|
||||
|
||||
self.information_spread_intensity = environment.environment_params['information_spread_intensity']
|
||||
self.terrorist_additional_influence = environment.environment_params['terrorist_additional_influence']
|
||||
self.prob_interaction = environment.environment_params['prob_interaction']
|
||||
|
||||
if self['id'] == self.civilian.id: # Civilian
|
||||
self.mean_belief = random.uniform(0.00, 0.5)
|
||||
elif self['id'] == self.terrorist.id: # Terrorist
|
||||
self.mean_belief = random.uniform(0.8, 1.00)
|
||||
elif self['id'] == self.leader.id: # Leader
|
||||
self.mean_belief = 1.00
|
||||
else:
|
||||
raise Exception('Invalid state id: {}'.format(self['id']))
|
||||
|
||||
if 'min_vulnerability' in environment.environment_params:
|
||||
self.vulnerability = random.uniform( environment.environment_params['min_vulnerability'], environment.environment_params['max_vulnerability'] )
|
||||
else :
|
||||
self.vulnerability = random.uniform( 0, environment.environment_params['max_vulnerability'] )
|
||||
|
||||
|
||||
@state
|
||||
def civilian(self):
|
||||
neighbours = list(self.get_neighboring_agents(agent_type=TerroristSpreadModel))
|
||||
if len(neighbours) > 0:
|
||||
# Only interact with some of the neighbors
|
||||
interactions = list(n for n in neighbours if random.random() <= self.prob_interaction)
|
||||
influence = sum( self.degree(i) for i in interactions )
|
||||
mean_belief = sum( i.mean_belief * self.degree(i) / influence for i in interactions )
|
||||
mean_belief = mean_belief * self.information_spread_intensity + self.mean_belief * ( 1 - self.information_spread_intensity )
|
||||
self.mean_belief = mean_belief * self.vulnerability + self.mean_belief * ( 1 - self.vulnerability )
|
||||
|
||||
if self.mean_belief >= 0.8:
|
||||
return self.terrorist
|
||||
|
||||
@state
|
||||
def leader(self):
|
||||
self.mean_belief = self.mean_belief ** ( 1 - self.terrorist_additional_influence )
|
||||
for neighbour in self.get_neighboring_agents(state_id=[self.terrorist.id, self.leader.id]):
|
||||
if self.betweenness(neighbour) > self.betweenness(self):
|
||||
return self.terrorist
|
||||
|
||||
@state
|
||||
def terrorist(self):
|
||||
neighbours = self.get_agents(state_id=[self.terrorist.id, self.leader.id],
|
||||
agent_type=TerroristSpreadModel,
|
||||
limit_neighbors=True)
|
||||
if len(neighbours) > 0:
|
||||
influence = sum( self.degree(n) for n in neighbours )
|
||||
mean_belief = sum( n.mean_belief * self.degree(n) / influence for n in neighbours )
|
||||
mean_belief = mean_belief * self.vulnerability + self.mean_belief * ( 1 - self.vulnerability )
|
||||
self.mean_belief = self.mean_belief ** ( 1 - self.terrorist_additional_influence )
|
||||
|
||||
# Check if there are any leaders in the group
|
||||
leaders = list(filter(lambda x: x.state.id == self.leader.id, neighbours))
|
||||
if not leaders:
|
||||
# Check if this is the potential leader
|
||||
# Stop once it's found. Otherwise, set self as leader
|
||||
for neighbour in neighbours:
|
||||
if self.betweenness(self) < self.betweenness(neighbour):
|
||||
return
|
||||
return self.leader
|
||||
|
||||
|
||||
class TrainingAreaModel(FSM, Geo):
|
||||
"""
|
||||
Settings:
|
||||
training_influence
|
||||
|
||||
min_vulnerability
|
||||
|
||||
Requires TerroristSpreadModel.
|
||||
"""
|
||||
|
||||
def __init__(self, environment=None, agent_id=0, state=()):
|
||||
super().__init__(environment=environment, agent_id=agent_id, state=state)
|
||||
self.training_influence = environment.environment_params['training_influence']
|
||||
if 'min_vulnerability' in environment.environment_params:
|
||||
self.min_vulnerability = environment.environment_params['min_vulnerability']
|
||||
else: self.min_vulnerability = 0
|
||||
|
||||
@default_state
|
||||
@state
|
||||
def terrorist(self):
|
||||
for neighbour in self.get_neighboring_agents(agent_type=TerroristSpreadModel):
|
||||
if neighbour.vulnerability > self.min_vulnerability:
|
||||
neighbour.vulnerability = neighbour.vulnerability ** ( 1 - self.training_influence )
|
||||
|
||||
|
||||
class HavenModel(FSM, Geo):
|
||||
"""
|
||||
Settings:
|
||||
haven_influence
|
||||
|
||||
min_vulnerability
|
||||
|
||||
max_vulnerability
|
||||
|
||||
Requires TerroristSpreadModel.
|
||||
"""
|
||||
|
||||
def __init__(self, environment=None, agent_id=0, state=()):
|
||||
super().__init__(environment=environment, agent_id=agent_id, state=state)
|
||||
self.haven_influence = environment.environment_params['haven_influence']
|
||||
if 'min_vulnerability' in environment.environment_params:
|
||||
self.min_vulnerability = environment.environment_params['min_vulnerability']
|
||||
else: self.min_vulnerability = 0
|
||||
self.max_vulnerability = environment.environment_params['max_vulnerability']
|
||||
|
||||
def get_occupants(self, **kwargs):
|
||||
return self.get_neighboring_agents(agent_type=TerroristSpreadModel, **kwargs)
|
||||
|
||||
@state
|
||||
def civilian(self):
|
||||
civilians = self.get_occupants(state_id=self.civilian.id)
|
||||
if not civilians:
|
||||
return self.terrorist
|
||||
|
||||
for neighbour in self.get_occupants():
|
||||
if neighbour.vulnerability > self.min_vulnerability:
|
||||
neighbour.vulnerability = neighbour.vulnerability * ( 1 - self.haven_influence )
|
||||
return self.civilian
|
||||
|
||||
@state
|
||||
def terrorist(self):
|
||||
for neighbour in self.get_occupants():
|
||||
if neighbour.vulnerability < self.max_vulnerability:
|
||||
neighbour.vulnerability = neighbour.vulnerability ** ( 1 - self.haven_influence )
|
||||
return self.terrorist
|
||||
|
||||
|
||||
class TerroristNetworkModel(TerroristSpreadModel):
|
||||
"""
|
||||
Settings:
|
||||
sphere_influence
|
||||
|
||||
vision_range
|
||||
|
||||
weight_social_distance
|
||||
|
||||
weight_link_distance
|
||||
"""
|
||||
|
||||
def __init__(self, environment=None, agent_id=0, state=()):
|
||||
super().__init__(environment=environment, agent_id=agent_id, state=state)
|
||||
|
||||
self.vision_range = environment.environment_params['vision_range']
|
||||
self.sphere_influence = environment.environment_params['sphere_influence']
|
||||
self.weight_social_distance = environment.environment_params['weight_social_distance']
|
||||
self.weight_link_distance = environment.environment_params['weight_link_distance']
|
||||
|
||||
@state
|
||||
def terrorist(self):
|
||||
self.update_relationships()
|
||||
return super().terrorist()
|
||||
|
||||
@state
|
||||
def leader(self):
|
||||
self.update_relationships()
|
||||
return super().leader()
|
||||
|
||||
def update_relationships(self):
|
||||
if self.count_neighboring_agents(state_id=self.civilian.id) == 0:
|
||||
close_ups = set(self.geo_search(radius=self.vision_range, agent_type=TerroristNetworkModel))
|
||||
step_neighbours = set(self.ego_search(self.sphere_influence, agent_type=TerroristNetworkModel, center=False))
|
||||
neighbours = set(agent.id for agent in self.get_neighboring_agents(agent_type=TerroristNetworkModel))
|
||||
search = (close_ups | step_neighbours) - neighbours
|
||||
for agent in self.get_agents(search):
|
||||
social_distance = 1 / self.shortest_path_length(agent.id)
|
||||
spatial_proximity = ( 1 - self.get_distance(agent.id) )
|
||||
prob_new_interaction = self.weight_social_distance * social_distance + self.weight_link_distance * spatial_proximity
|
||||
if agent['id'] == agent.civilian.id and random.random() < prob_new_interaction:
|
||||
self.add_edge(agent)
|
||||
break
|
||||
|
||||
def get_distance(self, target):
|
||||
source_x, source_y = nx.get_node_attributes(self.global_topology, 'pos')[self.id]
|
||||
target_x, target_y = nx.get_node_attributes(self.global_topology, 'pos')[target]
|
||||
dx = abs( source_x - target_x )
|
||||
dy = abs( source_y - target_y )
|
||||
return ( dx ** 2 + dy ** 2 ) ** ( 1 / 2 )
|
||||
|
||||
def shortest_path_length(self, target):
|
||||
try:
|
||||
return nx.shortest_path_length(self.global_topology, self.id, target)
|
||||
except nx.NetworkXNoPath:
|
||||
return float('inf')
|
@@ -60,3 +60,4 @@ visualization_params:
|
||||
background_image: 'map_4800x2860.jpg'
|
||||
background_opacity: '0.9'
|
||||
background_filter_color: 'blue'
|
||||
skip_test: true # This simulation takes too long for automated tests.
|
@@ -5,3 +5,4 @@ numpy
|
||||
matplotlib
|
||||
pyyaml
|
||||
pandas
|
||||
scipy
|
||||
|
@@ -1 +1 @@
|
||||
0.13.0
|
||||
0.13.8
|
||||
|
@@ -11,11 +11,10 @@ try:
|
||||
except NameError:
|
||||
basestring = str
|
||||
|
||||
logging.basicConfig()
|
||||
|
||||
from . import agents
|
||||
from .simulation import *
|
||||
from .environment import Environment
|
||||
from .history import History
|
||||
from . import utils
|
||||
from . import analysis
|
||||
|
||||
@@ -23,6 +22,9 @@ def main():
|
||||
import argparse
|
||||
from . import simulation
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logging.info('Running SOIL version: {}'.format(__version__))
|
||||
|
||||
parser = argparse.ArgumentParser(description='Run a SOIL simulation')
|
||||
parser.add_argument('file', type=str,
|
||||
nargs="?",
|
||||
@@ -62,7 +64,7 @@ def main():
|
||||
simulation.run_from_config(args.file,
|
||||
dry_run=args.dry_run,
|
||||
dump=dump,
|
||||
parallel=(not args.synchronous and not args.pdb),
|
||||
parallel=(not args.synchronous),
|
||||
results_dir=args.output)
|
||||
except Exception:
|
||||
if args.pdb:
|
||||
|
@@ -22,11 +22,17 @@ class AggregatedCounter(BaseAgent):
|
||||
in each step and adds it to its state.
|
||||
"""
|
||||
|
||||
defaults = {
|
||||
'times': 0,
|
||||
'neighbors': 0,
|
||||
'total': 0
|
||||
}
|
||||
|
||||
def step(self):
|
||||
# Outside effects
|
||||
total = len(list(self.get_all_agents()))
|
||||
self['times'] += 1
|
||||
neighbors = len(list(self.get_neighboring_agents()))
|
||||
self['times'] = self.get('times', 0) + 1
|
||||
self['neighbors'] = self.get('neighbors', 0) + neighbors
|
||||
self['total'] = total = self.get('total', 0) + total
|
||||
self['neighbors'] += neighbors
|
||||
total = len(list(self.get_all_agents()))
|
||||
self['total'] += total
|
||||
self.debug('Running for step: {}. Total: {}'.format(self.now, total))
|
||||
|
@@ -10,6 +10,7 @@ import logging
|
||||
from collections import OrderedDict
|
||||
from copy import deepcopy
|
||||
from functools import partial
|
||||
from scipy.spatial import cKDTree as KDTree
|
||||
import json
|
||||
|
||||
from functools import wraps
|
||||
@@ -17,6 +18,12 @@ from functools import wraps
|
||||
from .. import utils, history
|
||||
|
||||
|
||||
def as_node(agent):
|
||||
if isinstance(agent, BaseAgent):
|
||||
return agent.id
|
||||
return agent
|
||||
|
||||
|
||||
class BaseAgent(nxsim.BaseAgent):
|
||||
"""
|
||||
A special simpy BaseAgent that keeps track of its state history.
|
||||
@@ -24,20 +31,16 @@ class BaseAgent(nxsim.BaseAgent):
|
||||
|
||||
defaults = {}
|
||||
|
||||
def __init__(self, environment, agent_id=None, state=None,
|
||||
name='network_process', interval=None, **state_params):
|
||||
def __init__(self, environment, agent_id, state=None,
|
||||
name=None, interval=None, **state_params):
|
||||
# Check for REQUIRED arguments
|
||||
assert environment is not None, TypeError('__init__ missing 1 required keyword argument: \'environment\'. '
|
||||
'Cannot be NoneType.')
|
||||
# Initialize agent parameters
|
||||
self.id = agent_id
|
||||
self.name = name
|
||||
self.name = name or '{}[{}]'.format(type(self).__name__, self.id)
|
||||
self.state_params = state_params
|
||||
|
||||
# Global parameters
|
||||
self.global_topology = environment.G
|
||||
self.environment_params = environment.environment_params
|
||||
|
||||
# Register agent to environment
|
||||
self.env = environment
|
||||
|
||||
@@ -50,8 +53,7 @@ class BaseAgent(nxsim.BaseAgent):
|
||||
|
||||
if not hasattr(self, 'level'):
|
||||
self.level = logging.DEBUG
|
||||
self.logger = logging.getLogger('{}-Agent-{}'.format(self.env.name,
|
||||
self.id))
|
||||
self.logger = logging.getLogger(self.env.name)
|
||||
self.logger.setLevel(self.level)
|
||||
|
||||
# initialize every time an instance of the agent is created
|
||||
@@ -73,6 +75,18 @@ class BaseAgent(nxsim.BaseAgent):
|
||||
for k, v in value.items():
|
||||
self[k] = v
|
||||
|
||||
@property
|
||||
def global_topology(self):
|
||||
return self.env.G
|
||||
|
||||
@property
|
||||
def environment_params(self):
|
||||
return self.env.environment_params
|
||||
|
||||
@environment_params.setter
|
||||
def environment_params(self, value):
|
||||
self.env.environment_params = value
|
||||
|
||||
def __getitem__(self, key):
|
||||
if isinstance(key, tuple):
|
||||
key, t_step = key
|
||||
@@ -126,50 +140,25 @@ class BaseAgent(nxsim.BaseAgent):
|
||||
def step(self):
|
||||
pass
|
||||
|
||||
def to_json(self):
|
||||
return json.dumps(self.state)
|
||||
def count_agents(self, **kwargs):
|
||||
return len(list(self.get_agents(**kwargs)))
|
||||
|
||||
def count_agents(self, state_id=None, limit_neighbors=False):
|
||||
def count_neighboring_agents(self, state_id=None, **kwargs):
|
||||
return len(super().get_neighboring_agents(state_id=state_id, **kwargs))
|
||||
|
||||
def get_neighboring_agents(self, state_id=None, **kwargs):
|
||||
return self.get_agents(limit_neighbors=True, state_id=state_id, **kwargs)
|
||||
|
||||
def get_agents(self, agents=None, limit_neighbors=False, **kwargs):
|
||||
if limit_neighbors:
|
||||
agents = self.global_topology.neighbors(self.id)
|
||||
agents = super().get_agents(limit_neighbors=limit_neighbors)
|
||||
else:
|
||||
agents = self.global_topology.nodes()
|
||||
count = 0
|
||||
for agent in agents:
|
||||
if state_id and state_id != self.global_topology.node[agent]['agent']['id']:
|
||||
continue
|
||||
count += 1
|
||||
return count
|
||||
|
||||
def count_neighboring_agents(self, state_id=None):
|
||||
return len(super().get_agents(state_id, limit_neighbors=True))
|
||||
|
||||
def get_agents(self, state_id=None, agent_type=None, limit_neighbors=False, iterator=False, **kwargs):
|
||||
agents = self.env.agents
|
||||
if limit_neighbors:
|
||||
agents = super().get_agents(state_id, limit_neighbors)
|
||||
|
||||
def matches_all(agent):
|
||||
if state_id is not None:
|
||||
if agent.state.get('id', None) != state_id:
|
||||
return False
|
||||
if agent_type is not None:
|
||||
if type(agent) != agent_type:
|
||||
return False
|
||||
state = agent.state
|
||||
for k, v in kwargs.items():
|
||||
if state.get(k, None) != v:
|
||||
return False
|
||||
return True
|
||||
|
||||
f = filter(matches_all, agents)
|
||||
if iterator:
|
||||
return f
|
||||
return list(f)
|
||||
agents = self.env.get_agents(agents)
|
||||
return select(agents, **kwargs)
|
||||
|
||||
def log(self, message, *args, level=logging.INFO, **kwargs):
|
||||
message = message + " ".join(str(i) for i in args)
|
||||
message = "\t@{:>5}:\t{}".format(self.now, message)
|
||||
message = "\t{:10}@{:>5}:\t{}".format(self.name, self.now, message)
|
||||
for k, v in kwargs:
|
||||
message += " {k}={v} ".format(k, v)
|
||||
extra = {}
|
||||
@@ -183,12 +172,72 @@ class BaseAgent(nxsim.BaseAgent):
|
||||
def info(self, *args, **kwargs):
|
||||
return self.log(*args, level=logging.INFO, **kwargs)
|
||||
|
||||
def __getstate__(self):
|
||||
'''
|
||||
Serializing an agent will lose all its running information (you cannot
|
||||
serialize an iterator), but it keeps the state and link to the environment,
|
||||
so it can be used for inspection and dumping to a file
|
||||
'''
|
||||
state = {}
|
||||
state['id'] = self.id
|
||||
state['environment'] = self.env
|
||||
state['_state'] = self._state
|
||||
return state
|
||||
|
||||
def state(func):
|
||||
def __setstate__(self, state):
|
||||
'''
|
||||
Get back a serialized agent and try to re-compose it
|
||||
'''
|
||||
self.id = state['id']
|
||||
self._state = state['_state']
|
||||
self.env = state['environment']
|
||||
|
||||
def add_edge(self, node1, node2, **attrs):
|
||||
node1 = as_node(node1)
|
||||
node2 = as_node(node2)
|
||||
|
||||
for n in [node1, node2]:
|
||||
if n not in self.global_topology.nodes(data=False):
|
||||
raise ValueError('"{}" not in the graph'.format(n))
|
||||
return self.global_topology.add_edge(node1, node2, **attrs)
|
||||
|
||||
def subgraph(self, center=True, **kwargs):
|
||||
include = [self] if center else []
|
||||
return self.global_topology.subgraph(n.id for n in self.get_agents(**kwargs)+include)
|
||||
|
||||
|
||||
class NetworkAgent(BaseAgent):
|
||||
|
||||
def add_edge(self, other, **kwargs):
|
||||
return super(NetworkAgent, self).add_edge(node1=self.id, node2=other, **kwargs)
|
||||
|
||||
def ego_search(self, steps=1, center=False, node=None, **kwargs):
|
||||
'''Get a list of nodes in the ego network of *node* of radius *steps*'''
|
||||
node = as_node(node if node is not None else self)
|
||||
G = self.subgraph(**kwargs)
|
||||
return nx.ego_graph(G, node, center=center, radius=steps).nodes()
|
||||
|
||||
def degree(self, node, force=False):
|
||||
node = as_node(node)
|
||||
if force or (not hasattr(self.env, '_degree')) or getattr(self.env, '_last_step', 0) < self.now:
|
||||
self.env._degree = nx.degree_centrality(self.global_topology)
|
||||
self.env._last_step = self.now
|
||||
return self.env._degree[node]
|
||||
|
||||
def betweenness(self, node, force=False):
|
||||
node = as_node(node)
|
||||
if force or (not hasattr(self.env, '_betweenness')) or getattr(self.env, '_last_step', 0) < self.now:
|
||||
self.env._betweenness = nx.betweenness_centrality(self.global_topology)
|
||||
self.env._last_step = self.now
|
||||
return self.env._betweenness[node]
|
||||
|
||||
|
||||
def state(name=None):
|
||||
def decorator(func, name=None):
|
||||
'''
|
||||
A state function should return either a state id, or a tuple (state_id, when)
|
||||
The default value for state_id is the current state id.
|
||||
The default value for when is the interval defined in the nevironment.
|
||||
The default value for when is the interval defined in the environment.
|
||||
'''
|
||||
|
||||
@wraps(func)
|
||||
@@ -205,10 +254,15 @@ def state(func):
|
||||
self.set_state(next_state)
|
||||
return when
|
||||
|
||||
func_wrapper.id = func.__name__
|
||||
func_wrapper.id = name or func.__name__
|
||||
func_wrapper.is_default = False
|
||||
return func_wrapper
|
||||
|
||||
if callable(name):
|
||||
return decorator(name)
|
||||
else:
|
||||
return partial(decorator, name=name)
|
||||
|
||||
|
||||
def default_state(func):
|
||||
func.is_default = True
|
||||
@@ -255,7 +309,7 @@ class FSM(BaseAgent, metaclass=MetaFSM):
|
||||
raise Exception('{} has no valid state id or default state'.format(self))
|
||||
if next_state not in self.states:
|
||||
raise Exception('{} is not a valid id for {}'.format(next_state, self))
|
||||
self.states[next_state](self)
|
||||
return self.states[next_state](self)
|
||||
|
||||
def set_state(self, state):
|
||||
if hasattr(state, 'id'):
|
||||
@@ -281,6 +335,9 @@ def prob(prob=1):
|
||||
return r < prob
|
||||
|
||||
|
||||
STATIC_THRESHOLD = (-1, -1)
|
||||
|
||||
|
||||
def calculate_distribution(network_agents=None,
|
||||
agent_type=None):
|
||||
'''
|
||||
@@ -312,12 +369,15 @@ def calculate_distribution(network_agents=None,
|
||||
elif agent_type:
|
||||
network_agents = [{'agent_type': agent_type}]
|
||||
else:
|
||||
return []
|
||||
raise ValueError('Specify a distribution or a default agent type')
|
||||
|
||||
# Calculate the thresholds
|
||||
total = sum(x.get('weight', 1) for x in network_agents)
|
||||
acc = 0
|
||||
for v in network_agents:
|
||||
if 'ids' in v:
|
||||
v['threshold'] = STATIC_THRESHOLD
|
||||
continue
|
||||
upper = acc + (v.get('weight', 1)/total)
|
||||
v['threshold'] = [acc, upper]
|
||||
acc = upper
|
||||
@@ -336,7 +396,7 @@ def serialize_distribution(network_agents, known_modules=[]):
|
||||
When serializing an agent distribution, remove the thresholds, in order
|
||||
to avoid cluttering the YAML definition file.
|
||||
'''
|
||||
d = deepcopy(network_agents)
|
||||
d = deepcopy(list(network_agents))
|
||||
for v in d:
|
||||
if 'threshold' in v:
|
||||
del v['threshold']
|
||||
@@ -378,13 +438,16 @@ def _convert_agent_types(ind, to_string=False, **kwargs):
|
||||
return deserialize_distribution(ind, **kwargs)
|
||||
|
||||
|
||||
def _agent_from_distribution(distribution, value=-1):
|
||||
def _agent_from_distribution(distribution, value=-1, agent_id=None):
|
||||
"""Used in the initialization of agents given an agent distribution."""
|
||||
if value < 0:
|
||||
value = random.random()
|
||||
for d in distribution:
|
||||
for d in sorted(distribution, key=lambda x: x['threshold']):
|
||||
threshold = d['threshold']
|
||||
if value >= threshold[0] and value < threshold[1]:
|
||||
# Check if the definition matches by id (first) or by threshold
|
||||
if not ((agent_id is not None and threshold == STATIC_THRESHOLD and agent_id in d['ids']) or \
|
||||
(value >= threshold[0] and value < threshold[1])):
|
||||
continue
|
||||
state = {}
|
||||
if 'state' in d:
|
||||
state = deepcopy(d['state'])
|
||||
@@ -393,6 +456,58 @@ def _agent_from_distribution(distribution, value=-1):
|
||||
raise Exception('Distribution for value {} not found in: {}'.format(value, distribution))
|
||||
|
||||
|
||||
class Geo(NetworkAgent):
|
||||
'''In this type of network, nodes have a "pos" attribute.'''
|
||||
|
||||
def geo_search(self, radius, node=None, center=False, **kwargs):
|
||||
'''Get a list of nodes whose coordinates are closer than *radius* to *node*.'''
|
||||
node = as_node(node if node is not None else self)
|
||||
|
||||
G = self.subgraph(**kwargs)
|
||||
|
||||
pos = nx.get_node_attributes(G, 'pos')
|
||||
if not pos:
|
||||
return []
|
||||
nodes, coords = list(zip(*pos.items()))
|
||||
kdtree = KDTree(coords) # Cannot provide generator.
|
||||
indices = kdtree.query_ball_point(pos[node], radius)
|
||||
return [nodes[i] for i in indices if center or (nodes[i] != node)]
|
||||
|
||||
|
||||
def select(agents, state_id=None, agent_type=None, ignore=None, iterator=False, **kwargs):
|
||||
|
||||
if state_id is not None:
|
||||
try:
|
||||
state_id = tuple(state_id)
|
||||
except TypeError:
|
||||
state_id = tuple([state_id])
|
||||
if agent_type is not None:
|
||||
try:
|
||||
agent_type = tuple(agent_type)
|
||||
except TypeError:
|
||||
agent_type = tuple([agent_type])
|
||||
|
||||
def matches_all(agent):
|
||||
if state_id is not None:
|
||||
if agent.state.get('id', None) not in state_id:
|
||||
return False
|
||||
if agent_type is not None:
|
||||
if not isinstance(agent, agent_type):
|
||||
return False
|
||||
state = agent.state
|
||||
for k, v in kwargs.items():
|
||||
if state.get(k, None) != v:
|
||||
return False
|
||||
return True
|
||||
|
||||
f = filter(matches_all, agents)
|
||||
if ignore:
|
||||
f = filter(lambda x: x not in ignore, f)
|
||||
if iterator:
|
||||
return f
|
||||
return list(f)
|
||||
|
||||
|
||||
from .BassModel import *
|
||||
from .BigMarketModel import *
|
||||
from .IndependentCascadeModel import *
|
||||
|
@@ -4,9 +4,11 @@ import time
|
||||
import csv
|
||||
import random
|
||||
import simpy
|
||||
import yaml
|
||||
import tempfile
|
||||
import pandas as pd
|
||||
from copy import deepcopy
|
||||
from collections import Counter
|
||||
from networkx.readwrite import json_graph
|
||||
|
||||
import networkx as nx
|
||||
@@ -14,6 +16,14 @@ import nxsim
|
||||
|
||||
from . import utils, agents, analysis, history
|
||||
|
||||
# These properties will be copied when pickling/unpickling the environment
|
||||
_CONFIG_PROPS = [ 'name',
|
||||
'states',
|
||||
'default_state',
|
||||
'interval',
|
||||
'dry_run',
|
||||
'dir_path',
|
||||
]
|
||||
|
||||
class Environment(nxsim.NetworkEnvironment):
|
||||
"""
|
||||
@@ -52,7 +62,8 @@ class Environment(nxsim.NetworkEnvironment):
|
||||
if not dry_run:
|
||||
self.get_path()
|
||||
self._history = history.History(name=self.name if not dry_run else None,
|
||||
dir_path=self.dir_path)
|
||||
dir_path=self.dir_path,
|
||||
backup=True)
|
||||
# Add environment agents first, so their events get
|
||||
# executed before network agents
|
||||
self.environment_agents = environment_agents or []
|
||||
@@ -91,8 +102,7 @@ class Environment(nxsim.NetworkEnvironment):
|
||||
|
||||
@network_agents.setter
|
||||
def network_agents(self, network_agents):
|
||||
if not network_agents:
|
||||
return
|
||||
self._network_agents = network_agents
|
||||
for ix in self.G.nodes():
|
||||
self.init_agent(ix, agent_distribution=network_agents)
|
||||
|
||||
@@ -103,7 +113,7 @@ class Environment(nxsim.NetworkEnvironment):
|
||||
|
||||
agent_type = None
|
||||
if 'agent_type' in self.states.get(agent_id, {}):
|
||||
agent_type = self.states[agent_id]
|
||||
agent_type = self.states[agent_id]['agent_type']
|
||||
elif 'agent_type' in node:
|
||||
agent_type = node['agent_type']
|
||||
elif 'agent_type' in self.default_state:
|
||||
@@ -111,8 +121,11 @@ class Environment(nxsim.NetworkEnvironment):
|
||||
|
||||
if agent_type:
|
||||
agent_type = agents.deserialize_type(agent_type)
|
||||
elif agent_distribution:
|
||||
agent_type, state = agents._agent_from_distribution(agent_distribution, agent_id=agent_id)
|
||||
else:
|
||||
agent_type, state = agents._agent_from_distribution(agent_distribution)
|
||||
utils.logger.debug('Skipping node {}'.format(agent_id))
|
||||
return
|
||||
return self.set_agent(agent_id, agent_type, state)
|
||||
|
||||
def set_agent(self, agent_id, agent_type, state=None):
|
||||
@@ -122,6 +135,8 @@ class Environment(nxsim.NetworkEnvironment):
|
||||
defstate.update(node.get('state', {}))
|
||||
if state:
|
||||
defstate.update(state)
|
||||
a = None
|
||||
if agent_type:
|
||||
state = defstate
|
||||
a = agent_type(environment=self,
|
||||
agent_id=agent_id,
|
||||
@@ -136,17 +151,20 @@ class Environment(nxsim.NetworkEnvironment):
|
||||
a['visible'] = True
|
||||
return a
|
||||
|
||||
def add_edge(self, agent1, agent2, attrs=None):
|
||||
def add_edge(self, agent1, agent2, start=None, **attrs):
|
||||
if hasattr(agent1, 'id'):
|
||||
agent1 = agent1.id
|
||||
if hasattr(agent2, 'id'):
|
||||
agent2 = agent2.id
|
||||
return self.G.add_edge(agent1, agent2)
|
||||
start = start or self.now
|
||||
return self.G.add_edge(agent1, agent2, **attrs)
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
self._save_state()
|
||||
self.log_stats()
|
||||
super().run(*args, **kwargs)
|
||||
self._history.flush_cache()
|
||||
self.log_stats()
|
||||
|
||||
def _save_state(self, now=None):
|
||||
# for agent in self.agents:
|
||||
@@ -216,8 +234,10 @@ class Environment(nxsim.NetworkEnvironment):
|
||||
def get_agent(self, agent_id):
|
||||
return self.G.node[agent_id]['agent']
|
||||
|
||||
def get_agents(self):
|
||||
def get_agents(self, nodes=None):
|
||||
if nodes is None:
|
||||
return list(self.agents)
|
||||
return [self.G.node[i]['agent'] for i in nodes]
|
||||
|
||||
def dump_csv(self, dir_path=None):
|
||||
csv_name = os.path.join(self.get_path(dir_path),
|
||||
@@ -319,20 +339,40 @@ class Environment(nxsim.NetworkEnvironment):
|
||||
|
||||
return G
|
||||
|
||||
def stats(self):
|
||||
stats = {}
|
||||
stats['network'] = {}
|
||||
stats['network']['n_nodes'] = self.G.number_of_nodes()
|
||||
stats['network']['n_edges'] = self.G.number_of_edges()
|
||||
c = Counter()
|
||||
c.update(a.__class__.__name__ for a in self.network_agents)
|
||||
stats['agents'] = {}
|
||||
stats['agents']['model_count'] = dict(c)
|
||||
c2 = Counter()
|
||||
c2.update(a['id'] for a in self.network_agents)
|
||||
stats['agents']['state_count'] = dict(c2)
|
||||
stats['params'] = self.environment_params
|
||||
return stats
|
||||
|
||||
def log_stats(self):
|
||||
stats = self.stats()
|
||||
utils.logger.info('Environment stats: \n{}'.format(yaml.dump(stats, default_flow_style=False)))
|
||||
|
||||
def __getstate__(self):
|
||||
state = self.__dict__.copy()
|
||||
state = {}
|
||||
for prop in _CONFIG_PROPS:
|
||||
state[prop] = self.__dict__[prop]
|
||||
state['G'] = json_graph.node_link_data(self.G)
|
||||
state['network_agents'] = agents._serialize_distribution(self.network_agents)
|
||||
state['environment_agents'] = agents._convert_agent_types(self.environment_agents,
|
||||
to_string=True)
|
||||
state['environment_agents'] = self._env_agents
|
||||
state['history'] = self._history
|
||||
return state
|
||||
|
||||
def __setstate__(self, state):
|
||||
self.__dict__ = state
|
||||
for prop in _CONFIG_PROPS:
|
||||
self.__dict__[prop] = state[prop]
|
||||
self._env_agents = state['environment_agents']
|
||||
self.G = json_graph.node_link_graph(state['G'])
|
||||
self.network_agents = self.calculate_distribution(self._convert_agent_types(self.network_agents))
|
||||
self.environment_agents = self._convert_agent_types(self.environment_agents)
|
||||
return state
|
||||
self._history = state['history']
|
||||
|
||||
|
||||
SoilEnvironment = Environment
|
||||
|
@@ -3,6 +3,10 @@ import os
|
||||
import pandas as pd
|
||||
import sqlite3
|
||||
import copy
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
from collections import UserDict, namedtuple
|
||||
|
||||
from . import utils
|
||||
@@ -13,7 +17,7 @@ class History:
|
||||
Store and retrieve values from a sqlite database.
|
||||
"""
|
||||
|
||||
def __init__(self, db_path=None, name=None, dir_path=None, backup=True):
|
||||
def __init__(self, db_path=None, name=None, dir_path=None, backup=False):
|
||||
if db_path is None and name:
|
||||
db_path = os.path.join(dir_path or os.getcwd(),
|
||||
'{}.db.sqlite'.format(name))
|
||||
@@ -28,6 +32,7 @@ class History:
|
||||
self.db = db_path
|
||||
|
||||
with self.db:
|
||||
logger.debug('Creating database {}'.format(self.db_path))
|
||||
self.db.execute('''CREATE TABLE IF NOT EXISTS history (agent_id text, t_step int, key text, value text text)''')
|
||||
self.db.execute('''CREATE TABLE IF NOT EXISTS value_types (key text, value_type text)''')
|
||||
self.db.execute('''CREATE UNIQUE INDEX IF NOT EXISTS idx_history ON history (agent_id, t_step, key);''')
|
||||
@@ -38,7 +43,7 @@ class History:
|
||||
def db(self):
|
||||
try:
|
||||
self._db.cursor()
|
||||
except sqlite3.ProgrammingError:
|
||||
except (sqlite3.ProgrammingError, AttributeError):
|
||||
self.db = None # Reset the database
|
||||
return self._db
|
||||
|
||||
@@ -46,6 +51,7 @@ class History:
|
||||
def db(self, db_path=None):
|
||||
db_path = db_path or self.db_path
|
||||
if isinstance(db_path, str):
|
||||
logger.debug('Connecting to database {}'.format(db_path))
|
||||
self._db = sqlite3.connect(db_path)
|
||||
else:
|
||||
self._db = db_path
|
||||
@@ -110,6 +116,7 @@ class History:
|
||||
Use a cache to save state changes to avoid opening a session for every change.
|
||||
The cache will be flushed at the end of the simulation, and when history is accessed.
|
||||
'''
|
||||
logger.debug('Flushing cache {}'.format(self.db_path))
|
||||
with self.db:
|
||||
for rec in self._tups:
|
||||
self.db.execute("replace into history(agent_id, t_step, key, value) values (?, ?, ?, ?)", (rec.agent_id, rec.t_step, rec.key, rec.value))
|
||||
@@ -208,6 +215,16 @@ class History:
|
||||
df_p = df_p.reindex(t_steps, method='ffill')
|
||||
return df_p.ffill()
|
||||
|
||||
def __getstate__(self):
|
||||
state = dict(**self.__dict__)
|
||||
del state['_db']
|
||||
del state['_dtypes']
|
||||
return state
|
||||
|
||||
def __setstate__(self, state):
|
||||
self.__dict__ = state
|
||||
self._dtypes = {}
|
||||
|
||||
|
||||
class Records():
|
||||
|
||||
|
@@ -88,14 +88,8 @@ class Simulation(NetworkSimulation):
|
||||
environment_agents=None, environment_params=None,
|
||||
environment_class=None, **kwargs):
|
||||
|
||||
if topology is None:
|
||||
topology = utils.load_network(network_params,
|
||||
dir_path=dir_path)
|
||||
elif isinstance(topology, basestring) or isinstance(topology, dict):
|
||||
topology = json_graph.node_link_graph(topology)
|
||||
|
||||
self.seed = str(seed) or str(time.time())
|
||||
self.load_module = load_module
|
||||
self.topology = nx.Graph(topology)
|
||||
self.network_params = network_params
|
||||
self.name = name or 'UnnamedSimulation'
|
||||
self.num_trials = num_trials
|
||||
@@ -103,12 +97,19 @@ class Simulation(NetworkSimulation):
|
||||
self.default_state = default_state or {}
|
||||
self.dir_path = dir_path or os.getcwd()
|
||||
self.interval = interval
|
||||
self.seed = str(seed) or str(time.time())
|
||||
self.dump = dump
|
||||
self.dry_run = dry_run
|
||||
|
||||
sys.path += [self.dir_path, os.getcwd()]
|
||||
|
||||
if topology is None:
|
||||
topology = utils.load_network(network_params,
|
||||
dir_path=self.dir_path)
|
||||
elif isinstance(topology, basestring) or isinstance(topology, dict):
|
||||
topology = json_graph.node_link_graph(topology)
|
||||
self.topology = nx.Graph(topology)
|
||||
|
||||
|
||||
self.environment_params = environment_params or {}
|
||||
self.environment_class = utils.deserialize(environment_class,
|
||||
known_modules=['soil.environment', ]) or Environment
|
||||
@@ -201,7 +202,7 @@ class Simulation(NetworkSimulation):
|
||||
return self.run_trial(*args, **kwargs)
|
||||
except Exception as ex:
|
||||
c = ex.__cause__
|
||||
c.message = ''.join(traceback.format_tb(c.__traceback__)[3:])
|
||||
c.message = ''.join(traceback.format_exception(type(c), c, c.__traceback__)[:])
|
||||
return c
|
||||
|
||||
def to_dict(self):
|
||||
|
@@ -1,5 +1,6 @@
|
||||
import os
|
||||
import ast
|
||||
import sys
|
||||
import yaml
|
||||
import logging
|
||||
import importlib
|
||||
@@ -36,9 +37,14 @@ def load_network(network_params, dir_path=None):
|
||||
return method(path, **kwargs)
|
||||
|
||||
net_args = network_params.copy()
|
||||
net_type = net_args.pop('generator')
|
||||
net_gen = net_args.pop('generator')
|
||||
|
||||
if dir_path not in sys.path:
|
||||
sys.path.append(dir_path)
|
||||
|
||||
method = deserializer(net_gen,
|
||||
known_modules=['networkx.generators',])
|
||||
|
||||
method = getattr(nx.generators, net_type)
|
||||
return method(**net_args)
|
||||
|
||||
|
||||
@@ -114,6 +120,8 @@ def serialize(v, known_modules=[]):
|
||||
return func(v), tname
|
||||
|
||||
def deserializer(type_, known_modules=[]):
|
||||
if type(type_) != str: # Already deserialized
|
||||
return type_
|
||||
if type_ == 'str':
|
||||
return lambda x='': x
|
||||
if type_ == 'None':
|
||||
@@ -139,7 +147,7 @@ def deserializer(type_, known_modules=[]):
|
||||
module = importlib.import_module(modname)
|
||||
cls = getattr(module, tname)
|
||||
return getattr(cls, 'deserialize', cls)
|
||||
except (ImportError, AttributeError) as ex:
|
||||
except (ModuleNotFoundError, AttributeError) as ex:
|
||||
errors.append((modname, tname, ex))
|
||||
raise Exception('Could not find type {}. Tried: {}'.format(type_, errors))
|
||||
|
||||
|
@@ -1,255 +0,0 @@
|
||||
import random
|
||||
import networkx as nx
|
||||
from soil.agents import BaseAgent, FSM, state, default_state
|
||||
from scipy.spatial import cKDTree as KDTree
|
||||
|
||||
global betweenness_centrality_global
|
||||
global degree_centrality_global
|
||||
|
||||
betweenness_centrality_global = None
|
||||
degree_centrality_global = None
|
||||
|
||||
class TerroristSpreadModel(FSM):
|
||||
"""
|
||||
Settings:
|
||||
information_spread_intensity
|
||||
|
||||
terrorist_additional_influence
|
||||
|
||||
min_vulnerability (optional else zero)
|
||||
|
||||
max_vulnerability
|
||||
|
||||
prob_interaction
|
||||
"""
|
||||
|
||||
def __init__(self, environment=None, agent_id=0, state=()):
|
||||
super().__init__(environment=environment, agent_id=agent_id, state=state)
|
||||
|
||||
global betweenness_centrality_global
|
||||
global degree_centrality_global
|
||||
|
||||
if betweenness_centrality_global == None:
|
||||
betweenness_centrality_global = nx.betweenness_centrality(self.global_topology)
|
||||
if degree_centrality_global == None:
|
||||
degree_centrality_global = nx.degree_centrality(self.global_topology)
|
||||
|
||||
self.information_spread_intensity = environment.environment_params['information_spread_intensity']
|
||||
self.terrorist_additional_influence = environment.environment_params['terrorist_additional_influence']
|
||||
self.prob_interaction = environment.environment_params['prob_interaction']
|
||||
|
||||
if self['id'] == self.civilian.id: # Civilian
|
||||
self.initial_belief = random.uniform(0.00, 0.5)
|
||||
elif self['id'] == self.terrorist.id: # Terrorist
|
||||
self.initial_belief = random.uniform(0.8, 1.00)
|
||||
elif self['id'] == self.leader.id: # Leader
|
||||
self.initial_belief = 1.00
|
||||
else:
|
||||
raise Exception('Invalid state id: {}'.format(self['id']))
|
||||
|
||||
if 'min_vulnerability' in environment.environment_params:
|
||||
self.vulnerability = random.uniform( environment.environment_params['min_vulnerability'], environment.environment_params['max_vulnerability'] )
|
||||
else :
|
||||
self.vulnerability = random.uniform( 0, environment.environment_params['max_vulnerability'] )
|
||||
|
||||
self.mean_belief = self.initial_belief
|
||||
self.betweenness_centrality = betweenness_centrality_global[self.id]
|
||||
self.degree_centrality = degree_centrality_global[self.id]
|
||||
|
||||
# self.state['radicalism'] = self.mean_belief
|
||||
|
||||
def count_neighboring_agents(self, state_id=None):
|
||||
if isinstance(state_id, list):
|
||||
return len(self.get_neighboring_agents(state_id))
|
||||
else:
|
||||
return len(super().get_agents(state_id, limit_neighbors=True))
|
||||
|
||||
def get_neighboring_agents(self, state_id=None):
|
||||
if isinstance(state_id, list):
|
||||
_list = []
|
||||
for i in state_id:
|
||||
_list += super().get_agents(i, limit_neighbors=True)
|
||||
return [ neighbour for neighbour in _list if isinstance(neighbour, TerroristSpreadModel) ]
|
||||
else:
|
||||
_list = super().get_agents(state_id, limit_neighbors=True)
|
||||
return [ neighbour for neighbour in _list if isinstance(neighbour, TerroristSpreadModel) ]
|
||||
|
||||
@state
|
||||
def civilian(self):
|
||||
if self.count_neighboring_agents() > 0:
|
||||
neighbours = []
|
||||
for neighbour in self.get_neighboring_agents():
|
||||
if random.random() < self.prob_interaction:
|
||||
neighbours.append(neighbour)
|
||||
influence = sum( neighbour.degree_centrality for neighbour in neighbours )
|
||||
mean_belief = sum( neighbour.mean_belief * neighbour.degree_centrality / influence for neighbour in neighbours )
|
||||
self.initial_belief = self.mean_belief
|
||||
mean_belief = mean_belief * self.information_spread_intensity + self.initial_belief * ( 1 - self.information_spread_intensity )
|
||||
self.mean_belief = mean_belief * self.vulnerability + self.initial_belief * ( 1 - self.vulnerability )
|
||||
|
||||
if self.mean_belief >= 0.8:
|
||||
return self.terrorist
|
||||
|
||||
@state
|
||||
def leader(self):
|
||||
self.mean_belief = self.mean_belief ** ( 1 - self.terrorist_additional_influence )
|
||||
if self.count_neighboring_agents(state_id=[self.terrorist.id, self.leader.id]) > 0:
|
||||
for neighbour in self.get_neighboring_agents(state_id=[self.terrorist.id, self.leader.id]):
|
||||
if neighbour.betweenness_centrality > self.betweenness_centrality:
|
||||
return self.terrorist
|
||||
|
||||
@state
|
||||
def terrorist(self):
|
||||
if self.count_neighboring_agents(state_id=[self.terrorist.id, self.leader.id]) > 0:
|
||||
neighbours = self.get_neighboring_agents(state_id=[self.terrorist.id, self.leader.id])
|
||||
influence = sum( neighbour.degree_centrality for neighbour in neighbours )
|
||||
mean_belief = sum( neighbour.mean_belief * neighbour.degree_centrality / influence for neighbour in neighbours )
|
||||
self.initial_belief = self.mean_belief
|
||||
self.mean_belief = mean_belief * self.vulnerability + self.initial_belief * ( 1 - self.vulnerability )
|
||||
self.mean_belief = self.mean_belief ** ( 1 - self.terrorist_additional_influence )
|
||||
|
||||
if self.count_neighboring_agents(state_id=self.leader.id) == 0 and self.count_neighboring_agents(state_id=self.terrorist.id) > 0:
|
||||
max_betweenness_centrality = self
|
||||
for neighbour in self.get_neighboring_agents(state_id=self.terrorist.id):
|
||||
if neighbour.betweenness_centrality > max_betweenness_centrality.betweenness_centrality:
|
||||
max_betweenness_centrality = neighbour
|
||||
if max_betweenness_centrality == self:
|
||||
return self.leader
|
||||
|
||||
def add_edge(self, G, source, target):
|
||||
G.add_edge(source.id, target.id, start=self.env._now)
|
||||
|
||||
def link_search(self, G, node, radius):
|
||||
pos = nx.get_node_attributes(G, 'pos')
|
||||
nodes, coords = list(zip(*pos.items()))
|
||||
kdtree = KDTree(coords) # Cannot provide generator.
|
||||
edge_indexes = kdtree.query_pairs(radius, 2)
|
||||
_list = [ edge[int(not edge.index(node))] for edge in edge_indexes if node in edge ]
|
||||
return [ G.nodes()[index]['agent'] for index in _list ]
|
||||
|
||||
def social_search(self, G, node, steps):
|
||||
nodes = list(nx.ego_graph(G, node, radius=steps).nodes())
|
||||
nodes.remove(node)
|
||||
return [ G.nodes()[index]['agent'] for index in nodes ]
|
||||
|
||||
|
||||
class TrainingAreaModel(FSM):
|
||||
"""
|
||||
Settings:
|
||||
training_influence
|
||||
|
||||
min_vulnerability
|
||||
|
||||
Requires TerroristSpreadModel.
|
||||
"""
|
||||
|
||||
def __init__(self, environment=None, agent_id=0, state=()):
|
||||
super().__init__(environment=environment, agent_id=agent_id, state=state)
|
||||
self.training_influence = environment.environment_params['training_influence']
|
||||
if 'min_vulnerability' in environment.environment_params:
|
||||
self.min_vulnerability = environment.environment_params['min_vulnerability']
|
||||
else: self.min_vulnerability = 0
|
||||
|
||||
@default_state
|
||||
@state
|
||||
def terrorist(self):
|
||||
for neighbour in self.get_neighboring_agents():
|
||||
if isinstance(neighbour, TerroristSpreadModel) and neighbour.vulnerability > self.min_vulnerability:
|
||||
neighbour.vulnerability = neighbour.vulnerability ** ( 1 - self.training_influence )
|
||||
|
||||
|
||||
class HavenModel(FSM):
|
||||
"""
|
||||
Settings:
|
||||
haven_influence
|
||||
|
||||
min_vulnerability
|
||||
|
||||
max_vulnerability
|
||||
|
||||
Requires TerroristSpreadModel.
|
||||
"""
|
||||
|
||||
def __init__(self, environment=None, agent_id=0, state=()):
|
||||
super().__init__(environment=environment, agent_id=agent_id, state=state)
|
||||
self.haven_influence = environment.environment_params['haven_influence']
|
||||
if 'min_vulnerability' in environment.environment_params:
|
||||
self.min_vulnerability = environment.environment_params['min_vulnerability']
|
||||
else: self.min_vulnerability = 0
|
||||
self.max_vulnerability = environment.environment_params['max_vulnerability']
|
||||
|
||||
@state
|
||||
def civilian(self):
|
||||
for neighbour_agent in self.get_neighboring_agents():
|
||||
if isinstance(neighbour_agent, TerroristSpreadModel) and neighbour_agent['id'] == neighbour_agent.civilian.id:
|
||||
for neighbour in self.get_neighboring_agents():
|
||||
if isinstance(neighbour, TerroristSpreadModel) and neighbour.vulnerability > self.min_vulnerability:
|
||||
neighbour.vulnerability = neighbour.vulnerability * ( 1 - self.haven_influence )
|
||||
return self.civilian
|
||||
return self.terrorist
|
||||
|
||||
@state
|
||||
def terrorist(self):
|
||||
for neighbour in self.get_neighboring_agents():
|
||||
if isinstance(neighbour, TerroristSpreadModel) and neighbour.vulnerability < self.max_vulnerability:
|
||||
neighbour.vulnerability = neighbour.vulnerability ** ( 1 - self.haven_influence )
|
||||
return self.terrorist
|
||||
|
||||
|
||||
class TerroristNetworkModel(TerroristSpreadModel):
|
||||
"""
|
||||
Settings:
|
||||
sphere_influence
|
||||
|
||||
vision_range
|
||||
|
||||
weight_social_distance
|
||||
|
||||
weight_link_distance
|
||||
"""
|
||||
|
||||
def __init__(self, environment=None, agent_id=0, state=()):
|
||||
super().__init__(environment=environment, agent_id=agent_id, state=state)
|
||||
|
||||
self.vision_range = environment.environment_params['vision_range']
|
||||
self.sphere_influence = environment.environment_params['sphere_influence']
|
||||
self.weight_social_distance = environment.environment_params['weight_social_distance']
|
||||
self.weight_link_distance = environment.environment_params['weight_link_distance']
|
||||
|
||||
@state
|
||||
def terrorist(self):
|
||||
self.update_relationships()
|
||||
return super().terrorist()
|
||||
|
||||
@state
|
||||
def leader(self):
|
||||
self.update_relationships()
|
||||
return super().leader()
|
||||
|
||||
def update_relationships(self):
|
||||
if self.count_neighboring_agents(state_id=self.civilian.id) == 0:
|
||||
close_ups = self.link_search(self.global_topology, self.id, self.vision_range)
|
||||
step_neighbours = self.social_search(self.global_topology, self.id, self.sphere_influence)
|
||||
search = list(set(close_ups).union(step_neighbours))
|
||||
neighbours = self.get_neighboring_agents()
|
||||
search = [item for item in search if not item in neighbours and isinstance(item, TerroristNetworkModel)]
|
||||
for agent in search:
|
||||
social_distance = 1 / self.shortest_path_length(self.global_topology, self.id, agent.id)
|
||||
spatial_proximity = ( 1 - self.get_distance(self.global_topology, self.id, agent.id) )
|
||||
prob_new_interaction = self.weight_social_distance * social_distance + self.weight_link_distance * spatial_proximity
|
||||
if agent['id'] == agent.civilian.id and random.random() < prob_new_interaction:
|
||||
self.add_edge(self.global_topology, self, agent)
|
||||
break
|
||||
|
||||
def get_distance(self, G, source, target):
|
||||
source_x, source_y = nx.get_node_attributes(G, 'pos')[source]
|
||||
target_x, target_y = nx.get_node_attributes(G, 'pos')[target]
|
||||
dx = abs( source_x - target_x )
|
||||
dy = abs( source_y - target_y )
|
||||
return ( dx ** 2 + dy ** 2 ) ** ( 1 / 2 )
|
||||
|
||||
def shortest_path_length(self, G, source, target):
|
||||
try:
|
||||
return nx.shortest_path_length(G, source, target)
|
||||
except nx.NetworkXNoPath:
|
||||
return float('inf')
|
@@ -19,7 +19,7 @@ from xml.etree.ElementTree import tostring
|
||||
from tornado.concurrent import run_on_executor
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
from ..simulation import SoilSimulation
|
||||
from ..simulation import Simulation
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.setLevel(logging.INFO)
|
||||
|
||||
@@ -168,7 +168,7 @@ class SocketHandler(tornado.websocket.WebSocketHandler):
|
||||
|
||||
@run_on_executor
|
||||
def nonblocking(self, config):
|
||||
simulation = SoilSimulation(**config)
|
||||
simulation = Simulation(**config)
|
||||
return simulation.run()
|
||||
|
||||
@tornado.gen.coroutine
|
||||
|
@@ -7,6 +7,8 @@ from soil import utils, simulation
|
||||
ROOT = os.path.abspath(os.path.dirname(__file__))
|
||||
EXAMPLES = join(ROOT, '..', 'examples')
|
||||
|
||||
FORCE_TESTS = os.environ.get('FORCE_TESTS', '')
|
||||
|
||||
|
||||
class TestExamples(TestCase):
|
||||
pass
|
||||
@@ -19,7 +21,10 @@ def make_example_test(path, config):
|
||||
s = simulation.from_config(config)
|
||||
iterations = s.max_time * s.num_trials
|
||||
if iterations > 1000:
|
||||
self.skipTest('This example would probably take too long')
|
||||
s.max_time = 100
|
||||
s.num_trials = 1
|
||||
if config.get('skip_test', False) and not FORCE_TESTS:
|
||||
self.skipTest('Example ignored.')
|
||||
envs = s.run_simulation(dry_run=True)
|
||||
assert envs
|
||||
for env in envs:
|
||||
|
@@ -120,18 +120,18 @@ class TestHistory(TestCase):
|
||||
assert os.path.exists(db_path)
|
||||
|
||||
# Recover the data
|
||||
recovered = history.History(db_path=db_path, backup=False)
|
||||
recovered = history.History(db_path=db_path)
|
||||
assert recovered['a_1', 0, 'id'] == 'v'
|
||||
assert recovered['a_1', 4, 'id'] == 'e'
|
||||
|
||||
# Using the same name should create a backup copy
|
||||
# Using backup=True should create a backup copy, and initialize an empty history
|
||||
newhistory = history.History(db_path=db_path, backup=True)
|
||||
backuppaths = glob(db_path + '.backup*.sqlite')
|
||||
assert len(backuppaths) == 1
|
||||
backuppath = backuppaths[0]
|
||||
assert newhistory.db_path == h.db_path
|
||||
assert os.path.exists(backuppath)
|
||||
assert not len(newhistory[None, None, None])
|
||||
assert len(newhistory[None, None, None]) == 0
|
||||
|
||||
def test_history_tuples(self):
|
||||
"""
|
||||
|
@@ -2,6 +2,7 @@ from unittest import TestCase
|
||||
|
||||
import os
|
||||
import yaml
|
||||
import pickle
|
||||
import networkx as nx
|
||||
from functools import partial
|
||||
|
||||
@@ -248,12 +249,10 @@ class TestMain(TestCase):
|
||||
assert name == 'soil.agents.BaseAgent'
|
||||
assert ser == agents.BaseAgent
|
||||
|
||||
class CustomAgent(agents.BaseAgent):
|
||||
pass
|
||||
|
||||
ser, name = utils.serialize(CustomAgent)
|
||||
assert name == 'test_main.CustomAgent'
|
||||
assert ser == CustomAgent
|
||||
pickle.dumps(ser)
|
||||
|
||||
def test_serialize_builtin_types(self):
|
||||
|
||||
@@ -269,6 +268,7 @@ class TestMain(TestCase):
|
||||
assert ser == 'test_main.CustomAgent'
|
||||
ser = agents.serialize_type(agents.BaseAgent)
|
||||
assert ser == 'BaseAgent'
|
||||
pickle.dumps(ser)
|
||||
|
||||
def test_deserialize_agent_distribution(self):
|
||||
agent_distro = [
|
||||
@@ -284,6 +284,7 @@ class TestMain(TestCase):
|
||||
converted = agents.deserialize_distribution(agent_distro)
|
||||
assert converted[0]['agent_type'] == agents.CounterModel
|
||||
assert converted[1]['agent_type'] == CustomAgent
|
||||
pickle.dumps(converted)
|
||||
|
||||
def test_serialize_agent_distribution(self):
|
||||
agent_distro = [
|
||||
@@ -299,9 +300,39 @@ class TestMain(TestCase):
|
||||
converted = agents.serialize_distribution(agent_distro)
|
||||
assert converted[0]['agent_type'] == 'CounterModel'
|
||||
assert converted[1]['agent_type'] == 'test_main.CustomAgent'
|
||||
pickle.dumps(converted)
|
||||
|
||||
def test_pickle_agent_environment(self):
|
||||
env = Environment(name='Test')
|
||||
a = agents.BaseAgent(environment=env, agent_id=25)
|
||||
|
||||
a['key'] = 'test'
|
||||
|
||||
pickled = pickle.dumps(a)
|
||||
recovered = pickle.loads(pickled)
|
||||
|
||||
assert recovered.env.name == 'Test'
|
||||
assert recovered['key'] == 'test'
|
||||
assert recovered['key', 0] == 'test'
|
||||
|
||||
def test_history(self):
|
||||
'''Test storing in and retrieving from history (sqlite)'''
|
||||
h = history.History()
|
||||
h.save_record(agent_id=0, t_step=0, key="test", value="hello")
|
||||
assert h[0, 0, "test"] == "hello"
|
||||
|
||||
def test_subgraph(self):
|
||||
'''An agent should be able to subgraph the global topology'''
|
||||
G = nx.Graph()
|
||||
G.add_node(3)
|
||||
G.add_edge(1, 2)
|
||||
distro = agents.calculate_distribution(agent_type=agents.NetworkAgent)
|
||||
env = Environment(name='Test', topology=G, network_agents=distro)
|
||||
lst = list(env.network_agents)
|
||||
|
||||
a2 = env.get_agent(2)
|
||||
a3 = env.get_agent(3)
|
||||
assert len(a2.subgraph(limit_neighbors=True)) == 2
|
||||
assert len(a3.subgraph(limit_neighbors=True)) == 1
|
||||
assert len(a3.subgraph(limit_neighbors=True, center=False)) == 0
|
||||
assert len(a3.subgraph(agent_type=agents.NetworkAgent)) == 3
|
||||
|
Reference in New Issue
Block a user