I would need to check with the community one thing though, as we need to store the new shortest path calculated at each iteration of the bfw assignment. I see that this information is stored in the .feather files and am trying to convert it into an interpretable .csv file. To do so, I have adapted part of the code shared from Kai Tang (from another thread) but in said thread it is mentioned there is a mistake somewhere in his code, which is not exactly pinpointed. I think the mistake is in the assignment part, not in the .feather part, but need to be sure.
I would need to check with you then if the following lines of code work as intended, or if instead I am incorporating a mistake without realizing:
paths_ = r'C:\Users\Aequilibrae\Sioux\path_files'
shutil.move(r'C:\Users\Aequilibrae\Sioux\path_files\f61bc3183e4b4cab82b7f5e6d5513b3d\correspondence_cc_c.feather', r'C:\Users\Aequilibrae\Sioux\path_files\zzcorrespondence_cc_c.feather')
shutil.move(r'C:\Users\Aequilibrae\Sioux\path_files\f61bc3183e4b4cab82b7f5e6d5513b3d\nodes_to_indices_cc_c.feather', r'C:\Users\Aequilibrae\Sioux\path_files\zznodes_to_indices_cc_c.feather')
def parse_path_file(path_fldr=None, mode_name=None, cen_array=None):
processor_id = os.listdir(path_fldr)[0] ### this gives the id of the folder in path_files
iter_len = len(os.listdir(os.path.join(path_fldr, processor_id))) # this is equal to the number of iterations
map_dict = {}
map_node_path = r'C:\Users\Aequilibrae\Sioux\path_files\zznodes_to_indices_cc_c.feather'
map_link_path = r'C:\Users\Aequilibrae\Sioux\path_files\zzcorrespondence_cc_c.feather'
map_link_df = pd.read_feather(map_link_path)
map_node_df = pd.read_feather(map_node_path)
node_new_origin_dict = {n: o for n, o in zip(map_node_df['node_index'], map_node_df.index)}
map_link_df['__compressed_id__'] = map_link_df['__compressed_id__'].astype(int)
compressed_link_ft_dict = {int(l): (node_new_origin_dict[int(f)], node_new_origin_dict[int(t)])
for l, f, t in zip(map_link_df['__compressed_id__'],
map_link_df['a_node'],
map_link_df['b_node'])}
last_iter_path_fldr = os.path.join(path_fldr, processor_id, rf'iter{iter_len}')
path_res_df = pd.DataFrame()
for t in range(0, iter_len):
curr_iter = os.path.join(path_fldr, processor_id, rf'iter{t+1}')
for i in range(0, len(cen_array)):
path_file_path = os.path.join(curr_iter, rf'path_cc_c', rf'o{i}.feather')
index_file_path = os.path.join(curr_iter, rf'path_cc_c',
rf'o{i}_indexdata.feather')
feather_path_df = pd.read_feather(path_file_path)
feather_index_df = pd.read_feather(index_file_path)
path_df = generate_path_from_feather(feather_path_df=feather_path_df,
feather_index_df=feather_index_df,
compressed_link_ft_dict=compressed_link_ft_dict)
path_df['origins'] = i
path_df['class_type'] = mode_name
path_df['origins'] = path_df['origins'].apply(lambda x: node_new_origin_dict[x])
path_df['destinations'] = path_df['destinations'].apply(lambda x: node_new_origin_dict[x])
path_res_df = path_res_df._append(path_df)
return path_res_df[['origins', 'destinations', 'link_seq']]
def generate_path_from_feather(feather_path_df=None, feather_index_df=None, compressed_link_ft_dict=None):
feather_path_df['ft'] = feather_path_df['data'].apply(lambda x: compressed_link_ft_dict[x])
feather_index_df = feather_index_df.reset_index(drop=False).rename(columns={'index': 'destinations', 'data': 'to'})
feather_index_df['from'] = feather_index_df['to'].shift(1)
feather_index_df['from'] = feather_index_df['from'].fillna(0)
feather_index_df['from'] = feather_index_df['from'].astype(int)
feather_index_df['link_seq'] = feather_index_df[['from', 'to']].apply(
lambda x: feather_path_df.loc[x[0]: x[1] - 1, 'ft'].to_list()[::-1] if x[1] != x[0] else [], axis=1)
return feather_index_df[['destinations', 'link_seq']]
path_df = parse_path_file(path_fldr=paths_,
mode_name='tc_car', cen_array=[i for i in range(0, 24)])
path_df.to_csv("paths.csv")