Spaces:
Sleeping
Sleeping
nastasiasnk
commited on
Commit
•
6f24628
1
Parent(s):
eb1dd2f
Update app.py
Browse files
app.py
CHANGED
@@ -110,40 +110,49 @@ def test(input_json):
|
|
110 |
|
111 |
|
112 |
# ------------------------- Accessing input data from Grasshopper ------------------------- #
|
113 |
-
|
114 |
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
if df_lu_filtered is None or df_lu_filtered.empty:
|
121 |
landuses = inputs['input']["landuse_areas"]
|
122 |
-
df_landuses = pd.DataFrame(landuses).T
|
123 |
-
df_landuses = df_landuses.round(0).astype(int)
|
124 |
-
else:
|
125 |
-
df_landuses = df_lu_filtered
|
126 |
-
df_landuses = df_landuses.round(0).astype(int)
|
127 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
|
129 |
-
|
130 |
-
#df_landuses = df_landuses.round(0).astype(int)
|
131 |
-
|
132 |
attributeMapperDict_gh = inputs['input']["attributeMapperDict"]
|
133 |
landuseMapperDict_gh = inputs['input']["landuseMapperDict"]
|
134 |
-
|
135 |
-
alpha = inputs['input']["alpha"]
|
136 |
-
alpha = float(alpha)
|
137 |
-
threshold = inputs['input']["threshold"]
|
138 |
-
threshold = float(threshold)
|
139 |
-
|
140 |
-
df_matrix = pd.DataFrame(matrix).T
|
141 |
-
df_matrix = df_matrix.round(0).astype(int)
|
142 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
143 |
|
144 |
|
145 |
from imports_utils import splitDictByStrFragmentInColumnName
|
146 |
-
|
|
|
147 |
# List containing the substrings to check against
|
148 |
tranportModes = ["DRT", "GMT", "HSR"]
|
149 |
|
@@ -157,48 +166,22 @@ def test(input_json):
|
|
157 |
df_art_matrix = df_art_matrix.round(0).astype(int)
|
158 |
df_gmt_matrix = pd.DataFrame(gmt_dict).T
|
159 |
df_gmt_matrix = df_art_matrix.round(0).astype(int)
|
160 |
-
|
|
|
161 |
|
162 |
# create a mask based on the matrix size and ids, crop activity nodes to the mask
|
163 |
-
mask_connected =
|
164 |
|
165 |
-
valid_indexes = [idx for idx in mask_connected if idx in
|
166 |
# Identify and report missing indexes
|
167 |
missing_indexes = set(mask_connected) - set(valid_indexes)
|
168 |
if missing_indexes:
|
169 |
print(f"Error: The following indexes were not found in the DataFrame: {missing_indexes}, length: {len(missing_indexes)}")
|
170 |
|
171 |
# Apply the filtered mask
|
172 |
-
|
173 |
-
|
174 |
-
"""
|
175 |
-
# find a set of unique domains, to which subdomains are aggregated
|
176 |
-
temp = []
|
177 |
-
for key, values in livabilityMapperDict.items():
|
178 |
-
domain = livabilityMapperDict[key]['domain']
|
179 |
-
for item in domain:
|
180 |
-
if ',' in item:
|
181 |
-
domain_list = item.split(',')
|
182 |
-
livabilityMapperDict[key]['domain'] = domain_list
|
183 |
-
for domain in domain_list:
|
184 |
-
temp.append(domain)
|
185 |
-
else:
|
186 |
-
if item != 0:
|
187 |
-
temp.append(item)
|
188 |
-
|
189 |
-
domainsUnique = list(set(temp))
|
190 |
|
191 |
-
|
192 |
-
# find a list of unique subdomains, to which land uses are aggregated
|
193 |
-
temp = []
|
194 |
-
for key, values in landuseMapperDict.items():
|
195 |
-
subdomain = str(landuseMapperDict[key]["subdomain livability"])
|
196 |
-
if subdomain != 0:
|
197 |
-
temp.append(subdomain)
|
198 |
-
|
199 |
-
subdomainsUnique = list(set(temp))
|
200 |
|
201 |
-
"""
|
202 |
from imports_utils import findUniqueDomains
|
203 |
from imports_utils import findUniqueSubdomains
|
204 |
|
@@ -212,35 +195,30 @@ def test(input_json):
|
|
212 |
|
213 |
domainsUnique = findUniqueDomains(livabilityMapperDict)
|
214 |
subdomainsUnique = findUniqueSubdomains(landuseMapperDict)
|
|
|
|
|
215 |
|
216 |
-
|
217 |
-
|
218 |
-
LivabilitySubdomainsWeights = landusesToSubdomains(df_dm,df_lu_filtered,landuseMapperDict,subdomainsUnique)
|
219 |
-
|
220 |
-
|
221 |
-
WorkplacesNumber = FindWorkplacesNumber(df_dm,livabilityMapperDict,LivabilitySubdomainsWeights,subdomainsUnique)
|
222 |
|
223 |
# prepare an input weights dataframe for the parameter LivabilitySubdomainsInputs
|
224 |
LivabilitySubdomainsInputs =pd.concat([LivabilitySubdomainsWeights, WorkplacesNumber], axis=1)
|
225 |
|
226 |
-
subdomainsAccessibility = computeAccessibility(
|
227 |
-
artAccessibility = computeAccessibility_pointOfInterest(df_art_matrix,'ART',alpha,threshold)
|
228 |
-
gmtAccessibility = computeAccessibility_pointOfInterest(df_gmt_matrix,'GMT+HSR',alpha,threshold)
|
229 |
|
230 |
-
AccessibilityInputs = pd.concat([subdomainsAccessibility, artAccessibility,gmtAccessibility], axis=1)
|
231 |
|
232 |
|
233 |
if 'jobs' not in subdomainsAccessibility.columns:
|
234 |
print("Error: Column 'jobs' does not exist in the subdomainsAccessibility.")
|
235 |
|
236 |
-
livability = accessibilityToLivability(
|
237 |
|
238 |
|
239 |
livability_dictionary = livability.to_dict('index')
|
240 |
LivabilitySubdomainsInputs_dictionary = LivabilitySubdomainsInputs.to_dict('index')
|
241 |
subdomainsAccessibility_dictionary = AccessibilityInputs.to_dict('index')
|
242 |
-
artmatrix = df_art_matrix.to_dict('index')
|
243 |
-
|
244 |
LivabilitySubdomainsWeights_dictionary = LivabilitySubdomainsWeights.to_dict('index')
|
245 |
|
246 |
|
|
|
110 |
|
111 |
|
112 |
# ------------------------- Accessing input data from Grasshopper ------------------------- #
|
|
|
113 |
|
114 |
+
from config import useGrasshopperData
|
115 |
+
|
116 |
+
if useGrasshopperData == True:
|
117 |
+
matrix = inputs['input']["matrix"]
|
|
|
|
|
118 |
landuses = inputs['input']["landuse_areas"]
|
|
|
|
|
|
|
|
|
|
|
119 |
|
120 |
+
dfLanduses = pd.DataFrame(landuses).T
|
121 |
+
dfLanduses = dfLanduses.round(0).astype(int)
|
122 |
+
|
123 |
+
dfMatrix = pd.DataFrame(matrix).T
|
124 |
+
dfMatrix = dfMatrix.round(0).astype(int)
|
125 |
+
else:
|
126 |
+
dfLanduses = df_lu_filtered.copy()
|
127 |
+
dfLanduses = dfLanduses.round(0).astype(int)
|
128 |
+
|
129 |
+
dfMatrix = df_dm.copy()
|
130 |
+
dfMatrix = dfMatrix.round(0).astype(int)
|
131 |
|
132 |
+
|
|
|
|
|
133 |
attributeMapperDict_gh = inputs['input']["attributeMapperDict"]
|
134 |
landuseMapperDict_gh = inputs['input']["landuseMapperDict"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
135 |
|
136 |
+
if not inputs['input']["alpha"]:
|
137 |
+
from imports_utils import alpha
|
138 |
+
else:
|
139 |
+
alpha = inputs['input']["alpha"]
|
140 |
+
alpha = float(alpha)
|
141 |
+
|
142 |
+
if not inputs['input']["threshold"]:
|
143 |
+
from imports_utils import threshold
|
144 |
+
else:
|
145 |
+
threshold = inputs['input']["threshold"]
|
146 |
+
threshold = float(threshold)
|
147 |
+
|
148 |
+
|
149 |
+
|
150 |
+
|
151 |
|
152 |
|
153 |
from imports_utils import splitDictByStrFragmentInColumnName
|
154 |
+
|
155 |
+
"""
|
156 |
# List containing the substrings to check against
|
157 |
tranportModes = ["DRT", "GMT", "HSR"]
|
158 |
|
|
|
166 |
df_art_matrix = df_art_matrix.round(0).astype(int)
|
167 |
df_gmt_matrix = pd.DataFrame(gmt_dict).T
|
168 |
df_gmt_matrix = df_art_matrix.round(0).astype(int)
|
169 |
+
|
170 |
+
"""
|
171 |
|
172 |
# create a mask based on the matrix size and ids, crop activity nodes to the mask
|
173 |
+
mask_connected = dfMatrix.index.tolist()
|
174 |
|
175 |
+
valid_indexes = [idx for idx in mask_connected if idx in dfLanduses.index]
|
176 |
# Identify and report missing indexes
|
177 |
missing_indexes = set(mask_connected) - set(valid_indexes)
|
178 |
if missing_indexes:
|
179 |
print(f"Error: The following indexes were not found in the DataFrame: {missing_indexes}, length: {len(missing_indexes)}")
|
180 |
|
181 |
# Apply the filtered mask
|
182 |
+
dfLanduses_filtered = dfLanduses.loc[valid_indexes]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
183 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
184 |
|
|
|
185 |
from imports_utils import findUniqueDomains
|
186 |
from imports_utils import findUniqueSubdomains
|
187 |
|
|
|
195 |
|
196 |
domainsUnique = findUniqueDomains(livabilityMapperDict)
|
197 |
subdomainsUnique = findUniqueSubdomains(landuseMapperDict)
|
198 |
+
|
199 |
+
LivabilitySubdomainsWeights = landusesToSubdomains(dfMatrix,df_lu_filtered,landuseMapperDict,subdomainsUnique)
|
200 |
|
201 |
+
WorkplacesNumber = FindWorkplacesNumber(dfMatrix,livabilityMapperDict,LivabilitySubdomainsWeights,subdomainsUnique)
|
|
|
|
|
|
|
|
|
|
|
202 |
|
203 |
# prepare an input weights dataframe for the parameter LivabilitySubdomainsInputs
|
204 |
LivabilitySubdomainsInputs =pd.concat([LivabilitySubdomainsWeights, WorkplacesNumber], axis=1)
|
205 |
|
206 |
+
subdomainsAccessibility = computeAccessibility(dfMatrix,LivabilitySubdomainsInputs,alpha,threshold)
|
207 |
+
#artAccessibility = computeAccessibility_pointOfInterest(df_art_matrix,'ART',alpha,threshold)
|
208 |
+
#gmtAccessibility = computeAccessibility_pointOfInterest(df_gmt_matrix,'GMT+HSR',alpha,threshold)
|
209 |
|
210 |
+
#AccessibilityInputs = pd.concat([subdomainsAccessibility, artAccessibility,gmtAccessibility], axis=1)
|
211 |
|
212 |
|
213 |
if 'jobs' not in subdomainsAccessibility.columns:
|
214 |
print("Error: Column 'jobs' does not exist in the subdomainsAccessibility.")
|
215 |
|
216 |
+
livability = accessibilityToLivability(dfMatrix,subdomainsAccessibility,livabilityMapperDict,domainsUnique)
|
217 |
|
218 |
|
219 |
livability_dictionary = livability.to_dict('index')
|
220 |
LivabilitySubdomainsInputs_dictionary = LivabilitySubdomainsInputs.to_dict('index')
|
221 |
subdomainsAccessibility_dictionary = AccessibilityInputs.to_dict('index')
|
|
|
|
|
222 |
LivabilitySubdomainsWeights_dictionary = LivabilitySubdomainsWeights.to_dict('index')
|
223 |
|
224 |
|