[EXP] Alterntive Undamage Case Data #100
@@ -25,6 +25,15 @@ class DamageFilesIndices(TypedDict):
|
||||
damage_index: int
|
||||
files: List[str]
|
||||
|
||||
def complement_pairs(n):
|
||||
"""
|
||||
Return the four complement tuples for zzzBD<n>.TXT
|
||||
"""
|
||||
filename = f"zzzAD{n}.TXT" # TODO: shouldnt be hardcoded
|
||||
orig_a = (n - 1) % 5 + 1 # 1 … 5
|
||||
for a in range(1, 6): # a = 1 … 5
|
||||
if a != orig_a: # skip original a
|
||||
yield (filename, [a, a + 25]) # use yield instead of return to return a generator of tuples
|
||||
|
||||
def generate_df_tuples(total_dfs=30, group_size=5, prefix="zzzAD", extension="TXT", first_col_start=1, last_col_offset=25,
|
||||
special_groups=None, group=True):
|
||||
@@ -68,22 +77,10 @@ def generate_df_tuples(total_dfs=30, group_size=5, prefix="zzzAD", extension="TX
|
||||
|
||||
# Add special groups at specified positions (other than beginning)
|
||||
if special_groups:
|
||||
for group in special_groups:
|
||||
position = group.get('position', 0) # default value is 0 if not specified
|
||||
df_name = group['df_name']
|
||||
size = group.get('size', group_size)
|
||||
|
||||
# Create the special group tuples
|
||||
special_tuples = []
|
||||
for i in range(size):
|
||||
first_col = first_col_start + i
|
||||
last_col = first_col + last_col_offset
|
||||
special_tuples.append((df_name, [first_col, last_col]))
|
||||
|
||||
tuples.insert(position, special_tuples)
|
||||
result.insert(0, special_groups)
|
||||
|
||||
|
||||
return tuples
|
||||
return result
|
||||
|
||||
|
||||
# file_path = os.path.join(base_path, f"zzz{prefix}D{file_index}.TXT")
|
||||
|
||||
@@ -19,8 +19,15 @@ special_groups_B = [
|
||||
]
|
||||
|
||||
# Generate the tuples with the special group
|
||||
# a = generate_df_tuples(special_groups=special_groups_A)
|
||||
b = generate_df_tuples(special_groups=special_groups_B, prefix="zzzBD")
|
||||
a_complement = [(comp)
|
||||
for n in range(1, 31)
|
||||
for comp in complement_pairs(n)]
|
||||
a = generate_df_tuples(special_groups=a_complement, prefix="zzzAD")
|
||||
|
||||
# b_complement = [(comp)
|
||||
# for n in range(1, 31)
|
||||
# for comp in complement_pairs(n)]
|
||||
# b = generate_df_tuples(special_groups=b_complement, prefix="zzzBD")
|
||||
|
||||
|
||||
# a = generate_damage_files_index(
|
||||
@@ -32,14 +39,14 @@ b = generate_df_tuples(special_groups=special_groups_B, prefix="zzzBD")
|
||||
# # undamage_file="zzzBU.TXT"
|
||||
# )
|
||||
|
||||
# data_A = DataProcessor(file_index=a, base_path="D:/thesis/data/dataset_A", include_time=True)
|
||||
data_A = DataProcessor(file_index=a, base_path="D:/thesis/data/dataset_A", include_time=True)
|
||||
# data_A.create_vector_column(overwrite=True)
|
||||
# # data_A.create_limited_sensor_vector_column(overwrite=True)
|
||||
# data_A.export_to_csv("D:/thesis/data/converted/raw")
|
||||
data_A.export_to_csv("D:/thesis/data/converted/raw")
|
||||
|
||||
data_B = DataProcessor(file_index=b, base_path="D:/thesis/data/dataset_B", include_time=True)
|
||||
# data_B = DataProcessor(file_index=b, base_path="D:/thesis/data/dataset_B", include_time=True)
|
||||
# data_B.create_vector_column(overwrite=True)
|
||||
# # data_B.create_limited_sensor_vector_column(overwrite=True)
|
||||
data_B.export_to_csv("D:/thesis/data/converted/raw_B")
|
||||
# data_B.export_to_csv("D:/thesis/data/converted/raw_B")
|
||||
# a = load("D:/cache.joblib")
|
||||
# breakpoint()
|
||||
|
||||
Reference in New Issue
Block a user