Add Working Milestone with Initial Results and Model Inference (#82)

* wip: add function to create stratified train-test split from STFT data

* feat(src): implement working function for dataset B to create ready data from STFT files stft_files and add setup.py for package configuration

* feat(notebook): Update variable names for clarity, remove unused imports, and streamline data processing. Implement data concatenation using pandas concat for efficiency. Add validation steps for Dataset B and improve model training consistency across sensors.

* fix(.gitignore): add rule to ignore egg-info directories and ensure proper formatting

* docs(README): add instructions for running stft.ipynb notebook

* feat(notebook): Add evaluation metrics and confusion matrix visualizations for model predictions on Dataset B. Remove commented-out code and integrate data preparation using create_ready_data function.

---------

Co-authored-by: nuluh <dam.ar@outlook.com>
This commit was merged in pull request #82.
This commit is contained in:
Rifqi D. Panuluh
2025-05-24 01:30:10 +07:00
committed by GitHub
parent a32415cebf
commit d151062115
7 changed files with 305 additions and 132 deletions

0
code/src/ml/__init__.py Normal file
View File

View File

@@ -0,0 +1,57 @@
import numpy as np
import pandas as pd
import os
from sklearn.model_selection import train_test_split as sklearn_split
def create_ready_data(
stft_data_path: str,
stratify: np.ndarray = None,
) -> tuple:
"""
Create a stratified train-test split from STFT data.
Parameters:
-----------
stft_data_path : str
Path to the directory containing STFT data files (e.g. 'data/converted/raw/sensor1')
stratify : np.ndarray, optional
Labels to use for stratified sampling
Returns:
--------
tuple
(X_train, X_test, y_train, y_test) - Split datasets
"""
ready_data = []
for file in os.listdir(stft_data_path):
ready_data.append(pd.read_csv(os.path.join(stft_data_path, file)))
y_data = [i for i in range(len(ready_data))]
# Combine all dataframes in ready_data into a single dataframe
if ready_data: # Check if the list is not empty
# Use pandas concat function instead of iterative concatenation
combined_data = pd.concat(ready_data, axis=0, ignore_index=True)
print(f"Type of combined data: {type(combined_data)}")
print(f"Shape of combined data: {combined_data.shape}")
else:
print("No data available in ready_data list")
combined_data = pd.DataFrame()
# Store the result in x1a for compatibility with subsequent code
X = combined_data
for i in range(len(y_data)):
y_data[i] = [y_data[i]] * ready_data[i].shape[0]
y_data[i] = np.array(y_data[i])
if y_data:
# Use numpy concatenate function instead of iterative concatenation
y = np.concatenate(y_data, axis=0)
else:
print("No labels available in y_data list")
y = np.array([])
return X, y