fix(src): update damage base path and adjust STFT processing parameters

This commit is contained in:
nuluh
2025-10-16 12:16:38 +07:00
parent 511014d37d
commit e347b63e6e
2 changed files with 17 additions and 11 deletions

View File

@@ -4,7 +4,7 @@ import re
import sys import sys
import numpy as np import numpy as np
from colorama import Fore, Style, init from colorama import Fore, Style, init
from typing import TypedDict, Dict, List from typing import TypedDict, Tuple, List
from joblib import load from joblib import load
from pprint import pprint from pprint import pprint
@@ -35,8 +35,8 @@ def complement_pairs(n, prefix, extension):
if a != orig_a: # skip original a if a != orig_a: # skip original a
yield (filename, [a, a + 25]) # use yield instead of return to return a generator of tuples yield (filename, [a, a + 25]) # use yield instead of return to return a generator of tuples
def generate_df_tuples(prefix: str, total_dfs: int=30, extension: str="TXT", first_col_start: int=1, last_col_offset: int=25, def generate_df_tuples(prefix: str, extension: str="TXT", first_col_start: int=1, last_col_offset: int=25,
group_size: int=5, special_groups: list=None, group: bool=True): group_size: int=5, special_groups: list=None, group: bool=True, undamage_file: str=None) -> List[Tuple[str, List[int]]]:
""" """
Generate a structured list of tuples containing DataFrame references and column indices. Generate a structured list of tuples containing DataFrame references and column indices.
@@ -78,6 +78,12 @@ def generate_df_tuples(prefix: str, total_dfs: int=30, extension: str="TXT", fir
# Add special groups at specified positions (other than beginning) # Add special groups at specified positions (other than beginning)
if special_groups: if special_groups:
result.insert(0, special_groups) result.insert(0, special_groups)
if undamage_file:
for i in range (1, 6):
n = 5 + i
bottom_end = i
top_end = bottom_end + 25
result[0].append((undamage_file, [bottom_end, top_end]))
return result return result

View File

@@ -8,7 +8,7 @@ import multiprocessing # Added import for multiprocessing
from typing import Union, Tuple from typing import Union, Tuple
# Define the base directory where DAMAGE_X folders are located # Define the base directory where DAMAGE_X folders are located
damage_base_path = 'D:/thesis/data/converted/raw' damage_base_path = 'D:/thesis/data/converted/raw_B'
# Define output directories for each sensor # Define output directories for each sensor
output_dirs = { output_dirs = {
@@ -77,7 +77,7 @@ def process_damage_case(damage_num):
damage_folder = os.path.join(damage_base_path, f'DAMAGE_{damage_num}') damage_folder = os.path.join(damage_base_path, f'DAMAGE_{damage_num}')
if damage_num == 0: if damage_num == 0:
# Number of test runs per damage case # Number of test runs per damage case
num_test_runs = 120 num_test_runs = 125
else: else:
num_test_runs = 5 num_test_runs = 5
# Check if the damage folder exists # Check if the damage folder exists
@@ -122,12 +122,12 @@ def process_damage_case(damage_num):
# only inlcude 21 samples vector features for first 45 num_test_runs else include 22 samples vector features # only inlcude 21 samples vector features for first 45 num_test_runs else include 22 samples vector features
if damage_num == 0: if damage_num == 0:
print(f"Processing damage_num = 0, test_num = {test_num}") print(f"Processing damage_num = 0, test_num = {test_num}")
if test_num <= 45: if test_num <= 60:
df_stft = df_stft.iloc[:22, :] df_stft = df_stft.iloc[:20, :]
print(f"Reduced df_stft shape (21 samples): {df_stft.shape}") print(f"Reduced df_stft shape (20 samples): {df_stft.shape}")
else: else:
df_stft = df_stft.iloc[:21, :] df_stft = df_stft.iloc[:21, :]
print(f"Reduced df_stft shape (22 samples): {df_stft.shape}") print(f"Reduced df_stft shape (21 samples): {df_stft.shape}")
# Append to the aggregated list # Append to the aggregated list
aggregated_stft.append(df_stft) aggregated_stft.append(df_stft)