from convert import * from joblib import dump, load # b = generate_damage_files_index( # num_damage=6, # file_index_start=1, # col=5, # base_path="D:/thesis/data/dataset_B", # prefix="zzzBD", # # undamage_file="zzzBU.TXT" # ) # Example: Generate tuples with a special group of df0 at the beginning special_groups_A = [ {'df_name': 'zzzAU.TXT', 'position': 0, 'size': 5} # Add at beginning ] special_groups_B = [ {'df_name': 'zzzBU.TXT', 'position': 0, 'size': 5} # Add at beginning ] # Generate the tuples with the special group a_complement = [(comp) for n in range(1, 31) for comp in complement_pairs(n)] a = generate_df_tuples(special_groups=a_complement, prefix="zzzAD") # b_complement = [(comp) # for n in range(1, 31) # for comp in complement_pairs(n)] # b = generate_df_tuples(special_groups=b_complement, prefix="zzzBD") # a = generate_damage_files_index( # num_damage=6, # file_index_start=1, # col=5, # base_path="D:/thesis/data/dataset_A", # prefix="zzzAD", # # undamage_file="zzzBU.TXT" # ) data_A = DataProcessor(file_index=a, base_path="D:/thesis/data/dataset_A", include_time=True) # data_A.create_vector_column(overwrite=True) # # data_A.create_limited_sensor_vector_column(overwrite=True) data_A.export_to_csv("D:/thesis/data/converted/raw") # data_B = DataProcessor(file_index=b, base_path="D:/thesis/data/dataset_B", include_time=True) # data_B.create_vector_column(overwrite=True) # # data_B.create_limited_sensor_vector_column(overwrite=True) # data_B.export_to_csv("D:/thesis/data/converted/raw_B") # a = load("D:/cache.joblib") # breakpoint()