Skip to content

Generative Relations: MCDA

In this workshop, we will learn how enable agents to utilize MCDA (Multi Criteria Decision Analyses) in their spatial behaviors.

0. Initialization

0.1. Load required libraries

import os
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import cm
import topogenesis as tg
import pyvista as pv
import trimesh as tm
import numpy as np
import networkx as nx
import pandas as pd
import scipy as sp

np.random.seed(0)
# extra import function
def lattice_from_csv(file_path):
    # read metadata
    meta_df = pd.read_csv(file_path, nrows=3)

    shape = np.array(meta_df['shape'])
    unit = np.array(meta_df['unit'])
    minbound = np.array(meta_df['minbound'])

    # read lattice
    lattice_df = pd.read_csv(file_path, skiprows=5)

    # create the buffer
    buffer = np.array(lattice_df['value']).reshape(shape)

    # create the lattice
    l = tg.to_lattice(buffer, minbound=minbound, unit=unit)

    return l

0.2. Define the Neighborhood (Stencil)

# creating neighborhood definition
stencil = tg.create_stencil("von_neumann", 1, 1)
# setting the center to zero
stencil.set_index([0,0,0], 0)

0.3. Load the envelope lattice as the availability lattice

# loading the lattice from csv
lattice_path = os.path.relpath("data/lattice_complete_3_6.csv')
avail_lattice = lattice_from_csv(lattice_path)
init_avail_lattice = tg.to_lattice(np.copy(avail_lattice), avail_lattice)

0.4. Load Agents Information

# loading program (agents information) from CSV
prgm_path = os.path.relpath("data/matrix.csv")
agn_info = np.genfromtxt(prgm_path, delimiter=',')[1:, 1:]
agn_ids = agn_info[:, 0]
# extract agent ids
agn_ids = agn_info[:, 0]
# extract agent preferences
agn_prefs = agn_info[:, 1:27]
# extract agent space area
agn_space_area = agn_info[:, 27]
# extract the initial location 
agn_initial_loc = agn_info[:, 28].astype(int)
#print(agn_prefs.shape)
#print(agn_ids_space)
#print(agn_ids_entrance)
#print(agn_prefs)
#print(agn_initial_loc)
agn_data = pd.read_csv(prgm_path)
agn_data.loc[1]["sun_access"]
1.0
agn_data.loc[0]
space_name                       Atrium
space_id                              0
Atrium                                1
Courtyard                             1
Starter_housing                       0
Assisted_living                       0
Student_housing                       0
Supermarket                         0.2
Cafe_restaurant_pub                 0.2
Cinema                              0.2
Arcade                              0.2
Workshops                           0.2
Community_center                      0
Co_cooking_center                     0
Library                             0.2
Gym                                   0
Laundry_room                          0
Ent_access_courtyard                  0
Ent_access_trashroom                  0
Ent_access_parking                    0
Ent_access_supermarket                0
Ent_access_library                    0
Ent_access_cinema_cafe_arcade         0
Ent_access_atrium                     1
sun_access                            1
sky_view                              0
silent_level_active                 0.6
noise_sensitivity                     0
space_area                          188
initial_loc                          -1
0 floor                               1
1 floor                               0
Name: 0, dtype: object

0.5. Initialize environment information layers from Sun Access Lattice, Sky View Lattice, Noise Lattice,

# loading the lattice from csv
sun_acc_path = os.path.relpath("data/solar_access_complete_3_6.csv")
sun_acc_lattice = lattice_from_csv(sun_acc_path)

# list the environment information layers (lattices)
# the order should match the program matrix
env_info_dict = {"sun_access": sun_acc_lattice,
                 #"test_layer": test_latice
                }
env_info = [sun_acc_lattice]



# loading the lattice from csv
skyview_acc_path = os.path.relpath("data/roof_access_3_6.csv")
skyview_acc_lattice = lattice_from_csv(sun_acc_path)

# list the environment information layers (lattices)
# the order should match the program matrix
env_info_dict = {"sky_view": skyview_acc_lattice,
                 #"test_layer": test_latice
                }
env_info = [skyview_acc_lattice]


# loading the lattice from csv
external_noise_acc_path = os.path.relpath("data/sound_3_6.csv")
external_noise_acc_lattice = lattice_from_csv(sun_acc_path)

# list the environment information layers (lattices)
# the order should match the program matrix
env_info_dict = {"noise_sensitivity ": external_noise_acc_lattice,
                 #"test_layer": test_latice
                }
env_info = [external_noise_acc_lattice]

1. ABM Simulation

1.1. Initialize the Agents

# initialize the occupation lattice
occ_lattice = avail_lattice * 0 - 1

# Finding the index of the available voxels in avail_lattice
avail_flat = avail_lattice.flatten()
avail_index = np.array(np.where(avail_lattice == 1)).T

# count the number of spaces (rows) and intiialize an agent for each space
agn_num = len(agn_info) 
# choose the initial location of agents randomly
select_id = np.random.choice(len(avail_index), agn_num, replace=False)
agn_origins = avail_index[select_id]

# adding the origins to the agents locations
agn_locs = []

# for each agent origin ... 
for a_id, a_origin, a_init_loc in zip(agn_ids, agn_origins, agn_initial_loc):
    if a_init_loc == -1:
        final_a_origin = a_origin
    else:
        final_a_origin = np.unravel_index(a_init_loc, avail_lattice.shape)
    # add the origin to the list of agent locations
    agn_locs.append([final_a_origin])

    # set the origin in availablity lattice as 0 (UNavailable)
    avail_lattice[tuple(final_a_origin)] = 0

    # set the origin in occupation lattice as the agent id (a_id)
    occ_lattice[tuple(final_a_origin)] = int(a_id) # this is now based on the id of the agent in the program
def floor_level_lattice(target_floor, avail_lattice):
    # initialize the floor lattice
    floor_lattice = avail_lattice * 0.0
    # cut a single column out
    single_column = floor_lattice[0,0,:]
    # specify a range array based on the number of voxels in th ecolumn
    range_column = np.arange(single_column.size)
    # compute the distances based on the range column
    distance_column = (1 / (np.abs(range_column - target_floor) + 1)**2).reshape(1, 1, avail_lattice.shape[2])
    # compute the floor lattice
    floor_lattice += distance_column
    # return the floor lattice
    return floor_lattice

# define the ground floor lattice
ground_floor_lattice = floor_level_lattice(0, avail_lattice)
# define the first floor lattice
first_floor_lattice = floor_level_lattice(1, avail_lattice)

# add to dictionary
env_info_dict["0 floor"] = ground_floor_lattice
env_info_dict["1 floor"] = first_floor_lattice
# visualizing the info lattices
p = pv.Plotter(notebook=True)

info_val_list = list(env_info_dict.values())
info_key_list = env_info_dict.keys()
for i, k in enumerate(info_key_list):
    print(i, k)

base_lattice = info_val_list[0]

# Create the spatial reference
grid = pv.UniformGrid()

# Set the grid dimensions: shape because we want to inject our values
grid.dimensions = base_lattice.shape
# The bottom left corner of the data set
grid.origin = base_lattice.minbound
# These are the cell sizes along each axis
grid.spacing = base_lattice.unit

def create_mesh(value):
    f = int(value)
    lattice = info_val_list[f]

    # Add the data values to the cell data
    grid.point_arrays["info"] = lattice.flatten(order="F")  # Flatten the Lattice

    # adding the volume
    opacity = np.array([0,0.6,0.6,0.6,0.6,0.6,0.6])*1.5
    p.add_volume(grid, cmap="coolwarm", name='sphere', clim=[0.0, 1.0],opacity=opacity, shade=True)

    return

p.add_slider_widget(create_mesh, [0, len(info_val_list)-1], title='Time', value=0, event_type="always", style="classic", pointa=(0.1, 0.1), pointb=(0.9, 0.1))
p.show(use_ipyvtk=True)
0 noise_sensitivity 
1 0 floor
2 1 floor

[(246.73679328932968, 145.93679328932967, 225.13679328932966),
 (48.60000000000001, -52.2, 27.0),
 (0.0, 0.0, 1.0)]
def dynamic_noise_lattice(agn_locs, avail_lattice):

    # define the noise range
    noise_range = [10.0, 60.0]

    # initialize noise sources
    noise_src_points = []
    noise_src_levels = []

    # iterate over agents
    for a_id in range(len(agn_locs)):
        # extract agent locations
        a_locs = agn_locs[a_id]
        # retrieve the silent level of the agent
        a_noise_level_mapped = 1 - agn_data.loc[a_id]["silent_level_active"]
        # mapping the [0,1] values to noise level (db)
        a_noise_level = a_noise_level_mapped * (noise_range[1] - noise_range[0]) + noise_range[0]

        # for each agent location
        for a_loc in a_locs:
            # append the noise source information
            noise_src_points.append(a_loc)
            noise_src_levels.append(a_noise_level)

    # convert to numpy array
    noise_src_points = np.array(noise_src_points)

    # create full lattice
    full_lattice = avail_lattice * 0 + 1

    # extract the coordiantes of the centroid of all voxel
    vox_centroids = full_lattice.centroids

    # extract voxel indices of all voxels
    vox_indices = np.array(np.where(full_lattice==1)).T

    # initializing the sum lattice of noise
    sum_noise_lats = avail_lattice * 0.0

    # for each source of noise
    for src_point, src_level in zip(noise_src_points,noise_src_levels):
        # initialize the occupation lattice
        dist_latice = avail_lattice * 0.0

        for cen, ind in zip(vox_centroids, vox_indices):
            # compute the euclidian distance
            dist_latice[tuple(ind)] = sp.spatial.distance.euclidean(cen, src_point)

        # computing the noise lattice from dist lattice
        noise_latice = src_level - 20 * np.log10(dist_latice) - 8

        # summing
        sum_noise_lats += np.power(10, noise_latice / 10.0)

    # computing the final aggregation
    agg_noise_lats = 10 * np.log10(sum_noise_lats)

    # normalizing the noise values
    normalized_silence_lattice = 1 - (agg_noise_lats - np.min(agg_noise_lats)) / (np.max(agg_noise_lats) - np.min(agg_noise_lats))

    return normalized_silence_lattice

1.2. Running the Simulation

dynamic_info = {"noise_sensitivity ": dynamic_noise_lattice,


               }
# make a deep copy of occupation lattice
cur_occ_lattice = tg.to_lattice(np.copy(occ_lattice), occ_lattice)
# initialzing the list of frames
frames = [cur_occ_lattice]

# setting the time variable to 0
t = 0
n_frames = max(agn_space_area)
# Simulation Loop
# main feedback loop of the simulation (for each time step ...)
while t<n_frames:
    # update the info lattices
    # iterate over dynamic lattices
    for info_key, info_function in dynamic_info.items():
        env_info_dict[info_key] = info_function(agn_locs, avail_lattice)
        # env_info_dict["noise_sensitivity "] = dynamic_noise_lattice(agn_locs, avail_lattice)

    # Agent Loop
    # for each agent ... 
    for a_id in range(agn_num):
        # retrieve the list of the locations of the current agent
        a_locs = agn_locs[a_id]
        # Make sure there are no more voxels than specified (area check)
        if len(a_locs) < agn_data.loc[a_id]["space_area"]:# agn_space_area[a_id]: 
            # initialize the list of free neighbours
            free_neighs = []
            # Location loop
            # for each location of the agent
            for loc in a_locs:
                # retrieve the list of neighbours of the agent based on the stencil
                neighs = avail_lattice.find_neighbours_masked(stencil, loc = loc)

                # for each neighbour ... 
                for n in neighs:
                    # compute 3D index of neighbour
                    neigh_3d_id = np.unravel_index(n, avail_lattice.shape)
                    # if the neighbour is available... 
                    if avail_lattice[neigh_3d_id]:
                        # add the neighbour to the list of free neighbours
                        free_neighs.append(neigh_3d_id)

            # check if found any free neighbour
            if len(free_neighs)>0:   
                # convert free neighbours to a numpy array
                free_neighs = np.array(free_neighs)

                # retrieving the entrance access value of the free neighbours
                neigh_vals = []
                # retrieve agent preferences
                a_pref = agn_prefs[a_id]
                a_pref_dict = agn_data.loc[a_id].to_dict()
                # Neighbour Evaluation Loop
                for neigh in free_neighs:
                    neigh_value = 1.0
                    # for every lattice in the environment informations
                    for info_key, info_lattice in env_info_dict.items():
                        # Here we utilise Fuzzy Logics to be able to compare different layers 
                        # of environmental information and evaluate the voxel for the agent. 
                        # This method is introduced, and generalised in Pirouz Nourian dissertation: 
                        # section 5.7.3, pp. 201-208, eq. 57. You can refer to this section for 
                        # comprehensive mathematical details.
                        vox_val = info_lattice[tuple(neigh)]
                        agn_vox_val = np.power(vox_val, a_pref_dict[info_key])
                        neigh_value *= agn_vox_val
                    # add the neighbour value to the list of values
                    neigh_vals.append(neigh_value)

                # convert to numpy array
                neigh_vals = np.array(neigh_vals)
                # select the neighbour with highest value 
                selected_int = np.argmax(neigh_vals) 
                # find 3D intiger index of selected neighbour
                selected_neigh_3d_id = tuple(free_neighs[selected_int].T)
                # find the location of the newly selected neighbour
                selected_neigh_loc = np.array(selected_neigh_3d_id).flatten()

                # add the newly selected neighbour location to agent locations
                agn_locs[a_id].append(selected_neigh_loc)
                # set the newly selected neighbour as UNavailable (0) in the availability lattice
                avail_lattice[selected_neigh_3d_id] = 0
                # set the newly selected neighbour as OCCUPIED by current agent 
                # (-1 means not-occupied so a_id)
                occ_lattice[selected_neigh_3d_id] = a_id

    # constructing the new lattice
    new_occ_lattice = tg.to_lattice(np.copy(occ_lattice), occ_lattice)
    # adding the new lattice to the list of frames
    frames.append(new_occ_lattice)
    # adding one to the time counter
    t += 1

1.3. Visualizing the simulation

p = pv.Plotter(notebook=True)

base_lattice = frames[0]

# Set the grid dimensions: shape + 1 because we want to inject our values on the CELL data
grid = pv.UniformGrid()
grid.dimensions = np.array(base_lattice.shape) + 1
# The bottom left corner of the data set
grid.origin = base_lattice.minbound - base_lattice.unit * 0.5
# These are the cell sizes along each axis
grid.spacing = base_lattice.unit 

# adding the boundingbox wireframe
p.add_mesh(grid.outline(), color="grey", label="Domain")

# adding the avilability lattice
init_avail_lattice.fast_vis(p)

# adding axes
p.add_axes()
p.show_bounds(grid="back", location="back", color="#aaaaaa")

#Make a dictonary for the annotations
space_list = {
    0: "Atrium",
    1: "Courtyard",
    2: "Starter housing",
    3: "Assisted living",
    4: "Student housing",
    5: "Supermarket",
    6: "Cafe restaurant pub",
    7: "Cinema",
    8: "Arcade",
    9: "Workshops",
    10: "Community center",
    11: "Co cooking center",
    12: "Library",
    13: "Gym",
    14: "Laundry room",
    15: "Green_roof",
    16: "Entrance courtyard",
    17: "Entrance trashroom",
    18: "Entrance parking",
    19: "Entrance supermarket",
    20: "Entrance library",
    21: "Entrance cinema cafe arcade",
    22: "Entrance atrium",
}
#make a dictionary for 
sargs = dict(
    shadow = True,
    n_labels = 0,
    italic = False,
    fmt ="%.0f",
    font_family="arial",
    height = 0.6,
    vertical = True,
    position_x = 1.05,
    position_y = 1)


def create_mesh(value):
    f = int(value)
    lattice = frames[f]

    # Add the data values to the cell data
    grid.cell_arrays["Agents"] = lattice.flatten(order="F").astype(int)  # Flatten the array!
    # filtering the voxels
    threshed = grid.threshold([-0.1, agn_num - 0.9])
    # adding the voxels
    p.add_mesh(threshed, name='sphere', show_edges=True, opacity=1.0, show_scalar_bar=True, annotations = space_list, scalar_bar_args=sargs, cmap="tab20b")


    return


p.add_slider_widget(create_mesh, [0, n_frames], title='Time', value=0, event_type="always", style="classic", pointa=(0.1, 0.2), pointb=(0.9, 0.2))
p.show(use_ipyvtk=True)
[(253.02459757597026, 152.2245973852354, 231.42459588319835),
 (48.60000133514404, -52.19999885559082, 26.99999964237213),
 (0.0, 0.0, 1.0)]

2.3. Saving lattice frames in CSV

for i, lattice in enumerate(frames):
    csv_path = os.path.relpath("data/abm/abm_f_"+ f'{i:03}' + '.csv')
    lattice.to_csv(csv_path)

Credits

__author__ = "Shervin Azadi and Pirouz Nourian"
__license__ = "MIT"
__version__ = "1.0"
__url__ = "https://github.com/shervinazadi/spatial_computing_workshops"
__summary__ = "Spatial Computing Design Studio Workshop on MCDA and Path Finding for Generative Spatial Relations"









Last update: January 25, 2021