862 lines
26 KiB
Plaintext
862 lines
26 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import pandas as pd\n",
|
|
"import numpy as np\n",
|
|
"import matplotlib.pyplot as plt"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"sensor1 = pd.read_csv('D:/thesis/data/converted/raw/DAMAGE_1/DAMAGE_1_TEST1_01.csv',sep=',')\n",
|
|
"sensor2 = pd.read_csv('D:/thesis/data/converted/raw/DAMAGE_1/DAMAGE_1_TEST1_02.csv',sep=',')"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"sensor1.columns"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"df1 = pd.DataFrame()\n",
|
|
"df1['s1'] = sensor1[sensor1.columns[-1]]\n",
|
|
"df1['s2'] = sensor2[sensor2.columns[-1]]\n",
|
|
"df1\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def merge_two_sensors(damage_path, damage):\n",
|
|
" df = pd.DataFrame()\n",
|
|
" for file in os.listdir(damage_path):\n",
|
|
" pattern = re.compile(r'DAMAGE_\\d+_TEST\\d+_\\d{2}\\.csv')\n",
|
|
" try:\n",
|
|
" assert pattern.match(file), f\"File {file} does not match the required format, skipping...\"\n",
|
|
" # assert \"TEST01\" in file, f\"File {file} does not contain 'TEST01', skipping...\" #TODO: should be trained using the whole test file\n",
|
|
" print(f\"Processing file: {file}\")\n",
|
|
" # Append the full path of the file to sensor1 or sensor2 based on the filename\n",
|
|
" if file.endswith('_01.csv'):\n",
|
|
" df['sensor 1'] = pd.read_csv(os.path.join('D:/thesis/data/converted/raw', damage, file), sep=',', usecols=[1])\n",
|
|
" elif file.endswith('_02.csv'):\n",
|
|
" df['sensor 2'] = pd.read_csv(os.path.join('D:/thesis/data/converted/raw', damage, file), sep=',', usecols=[1])\n",
|
|
" except AssertionError as e:\n",
|
|
" print(e)\n",
|
|
" continue # Skip to the next iteration\n",
|
|
" return df"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import os\n",
|
|
"import re\n",
|
|
"\n",
|
|
"df = []\n",
|
|
"for damage in os.listdir('D:/thesis/data/converted/raw'):\n",
|
|
" damage_path = os.path.join('D:/thesis/data/converted/raw', damage)\n",
|
|
" df.append(merge_two_sensors(damage_path, damage))\n",
|
|
" "
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"len(df)\n",
|
|
"df"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Combined Plot for sensor 1 and sensor 2 from data1 file in which motor is operated at 800 rpm\n",
|
|
"\n",
|
|
"plt.plot(df1['s2'], label='sensor 2')\n",
|
|
"plt.plot(df1['s1'], label='sensor 1', alpha=0.5)\n",
|
|
"plt.xlabel(\"Number of samples\")\n",
|
|
"plt.ylabel(\"Amplitude\")\n",
|
|
"plt.title(\"Raw vibration signal\")\n",
|
|
"plt.ylim(-7.5, 5)\n",
|
|
"plt.legend()\n",
|
|
"plt.show()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"signal_sensor1_test1 = []\n",
|
|
"signal_sensor2_test1 = []\n",
|
|
"\n",
|
|
"for data in df:\n",
|
|
" signal_sensor1_test1.append(data['sensor 1'].values)\n",
|
|
" signal_sensor2_test1.append(data['sensor 2'].values)\n",
|
|
"\n",
|
|
"print(len(signal_sensor1_test1))\n",
|
|
"print(len(signal_sensor2_test1))"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Applying Short-Time Fourier Transform (STFT)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"os.getcwd()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import os\n",
|
|
"import pandas as pd\n",
|
|
"import numpy as np\n",
|
|
"from scipy.signal import stft, hann\n",
|
|
"from multiprocessing import Pool\n",
|
|
"\n",
|
|
"\n",
|
|
"\n",
|
|
"# Function to compute and append STFT data\n",
|
|
"def process_stft(args):\n",
|
|
" # Define STFT parameters\n",
|
|
" window_size = 1024\n",
|
|
" hop_size = 512\n",
|
|
" window = hann(window_size)\n",
|
|
"\n",
|
|
" Fs = 1024 # Sampling frequency in Hz\n",
|
|
" \n",
|
|
" damage_num, test_num, sensor_suffix = args\n",
|
|
" sensor_name = active_sensors[sensor_suffix]\n",
|
|
" sensor_num = sensor_suffix[-1] # '1' or '2'\n",
|
|
" \n",
|
|
" # Construct the file path\n",
|
|
" file_name = f'DAMAGE_{damage_num}_TEST{test_num}_{sensor_suffix}.csv'\n",
|
|
" file_path = os.path.join(damage_base_path, f'DAMAGE_{damage_num}', file_name)\n",
|
|
" \n",
|
|
" # Check if the file exists\n",
|
|
" if not os.path.isfile(file_path):\n",
|
|
" print(f\"File {file_path} does not exist. Skipping...\")\n",
|
|
" return\n",
|
|
" \n",
|
|
" # Read the CSV\n",
|
|
" try:\n",
|
|
" df = pd.read_csv(file_path)\n",
|
|
" except Exception as e:\n",
|
|
" print(f\"Error reading {file_path}: {e}. Skipping...\")\n",
|
|
" return\n",
|
|
" \n",
|
|
" # Ensure the CSV has exactly two columns\n",
|
|
" if df.shape[1] != 2:\n",
|
|
" print(f\"Unexpected number of columns in {file_path}. Skipping...\")\n",
|
|
" return\n",
|
|
" \n",
|
|
" # Extract sensor data\n",
|
|
" sensor_column = df.columns[1]\n",
|
|
" sensor_data = df[sensor_column].values\n",
|
|
" \n",
|
|
" # Compute STFT\n",
|
|
" frequencies, times, Zxx = stft(sensor_data, fs=Fs, window=window, nperseg=window_size, noverlap=window_size - hop_size)\n",
|
|
" magnitude = np.abs(Zxx)\n",
|
|
" flattened_stft = magnitude.flatten()\n",
|
|
" \n",
|
|
" # Define the output CSV file path\n",
|
|
" stft_file_name = f'stft_data{sensor_num}_{damage_num}.csv'\n",
|
|
" sensor_output_dir = os.path.join(damage_base_path, sensor_name.lower())\n",
|
|
" os.makedirs(sensor_output_dir, exist_ok=True)\n",
|
|
" stft_file_path = os.path.join(sensor_output_dir, stft_file_name)\n",
|
|
" print(stft_file_path)\n",
|
|
" # Append the flattened STFT to the CSV\n",
|
|
" try:\n",
|
|
" flattened_stft_df = pd.DataFrame([flattened_stft])\n",
|
|
" if not os.path.isfile(stft_file_path):\n",
|
|
" # Create a new CSV\n",
|
|
" flattened_stft_df.to_csv(stft_file_path, index=False, header=False)\n",
|
|
" else:\n",
|
|
" # Append to existing CSV\n",
|
|
" flattened_stft_df.to_csv(stft_file_path, mode='a', index=False, header=False)\n",
|
|
" print(f\"Appended STFT data to {stft_file_path}\")\n",
|
|
" except Exception as e:\n",
|
|
" print(f\"Error writing to {stft_file_path}: {e}\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Define the base path where DAMAGE_X folders are located\n",
|
|
"damage_base_path = 'D:/thesis/data/converted/raw/'\n",
|
|
"\n",
|
|
"# Define active sensors\n",
|
|
"active_sensors = {\n",
|
|
" '01': 'sensor1', # Beginning map sensor\n",
|
|
" '02': 'sensor2' # End map sensor\n",
|
|
"}\n",
|
|
"\n",
|
|
"# Define damage cases and test runs\n",
|
|
"damage_cases = range(1, 7) # Adjust based on actual number of damage cases\n",
|
|
"test_runs = range(1, 6) # TEST01 to TEST05\n",
|
|
"args_list = []\n",
|
|
"\n",
|
|
"# Prepare the list of arguments for parallel processing\n",
|
|
"for damage_num in damage_cases:\n",
|
|
" for test_num in test_runs:\n",
|
|
" for sensor_suffix in active_sensors.keys():\n",
|
|
" args_list.append((damage_num, test_num, sensor_suffix))\n",
|
|
"\n",
|
|
"print(len(args_list))\n",
|
|
"args_list"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Process STFTs sequentially instead of in parallel\n",
|
|
"if __name__ == \"__main__\":\n",
|
|
" print(f\"Starting sequential STFT processing...\")\n",
|
|
" for i, arg in enumerate(args_list, 1):\n",
|
|
" process_stft(arg)\n",
|
|
" print(f\"Processed {i}/{len(args_list)} files\")\n",
|
|
" print(\"STFT processing completed.\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from scipy.signal import stft, hann\n",
|
|
"\n",
|
|
"# Applying STFT\n",
|
|
"vibration_data = signal_sensor1_test1[1]\n",
|
|
"window_size = 1024\n",
|
|
"hop_size = 512\n",
|
|
"window = hann(window_size) # Creating a Hanning window\n",
|
|
"Fs = 1024\n",
|
|
"\n",
|
|
"frequencies, times, Zxx = stft(vibration_data, \n",
|
|
" fs=Fs, \n",
|
|
" window=window, \n",
|
|
" nperseg=window_size, \n",
|
|
" noverlap=window_size - hop_size)\n",
|
|
"# Plotting the STFT Data\n",
|
|
"plt.pcolormesh(times, frequencies, np.abs(Zxx), shading='gouraud')\n",
|
|
"plt.title(f'STFT Magnitude for case {1} signal sensor 2')\n",
|
|
"plt.ylabel(f'Frequency [Hz]')\n",
|
|
"plt.xlabel(f'Time [sec]')\n",
|
|
"plt.show()\n",
|
|
"\n",
|
|
"# get current y ticks in list\n",
|
|
"print(len(frequencies))\n",
|
|
"print(len(times))\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Loading STFT Data from CSV Files"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import os\n",
|
|
"os.listdir('D:/thesis/data/working')"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import pandas as pd\n",
|
|
"import matplotlib.pyplot as plt\n",
|
|
"ready_data1 = []\n",
|
|
"for file in os.listdir('D:/thesis/data/working/sensor1'):\n",
|
|
" ready_data1.append(pd.read_csv(os.path.join('D:/thesis/data/working/sensor1', file)))\n",
|
|
"# ready_data1[1]\n",
|
|
"# colormesh give title x is frequency and y is time and rotate/transpose the data\n",
|
|
"# Plotting the STFT Data"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"ready_data1[1]\n",
|
|
"plt.pcolormesh(ready_data1[1])"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"for i in range(6):\n",
|
|
" plt.pcolormesh(ready_data1[i])\n",
|
|
" plt.title(f'STFT Magnitude for case {i} sensor 1')\n",
|
|
" plt.xlabel(f'Frequency [Hz]')\n",
|
|
" plt.ylabel(f'Time [sec]')\n",
|
|
" plt.show()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"ready_data2 = []\n",
|
|
"for file in os.listdir('D:/thesis/data/working/sensor2'):\n",
|
|
" ready_data2.append(pd.read_csv(os.path.join('D:/thesis/data/working/sensor2', file)))\n",
|
|
"ready_data2[5]"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"print(len(ready_data1))\n",
|
|
"print(len(ready_data2))"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"x1 = 0\n",
|
|
"\n",
|
|
"for i in range(len(ready_data1)):\n",
|
|
" print(ready_data1[i].shape)\n",
|
|
" x1 = x1 + ready_data1[i].shape[0]\n",
|
|
"\n",
|
|
"print(x1)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"x2 = 0\n",
|
|
"\n",
|
|
"for i in range(len(ready_data2)):\n",
|
|
" print(ready_data2[i].shape)\n",
|
|
" x2 = x2 + ready_data2[i].shape[0]\n",
|
|
"\n",
|
|
"print(x2)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"x1 = ready_data1[0]\n",
|
|
"# print(x1)\n",
|
|
"print(type(x1))\n",
|
|
"for i in range(len(ready_data1) - 1):\n",
|
|
" #print(i)\n",
|
|
" x1 = np.concatenate((x1, ready_data1[i + 1]), axis=0)\n",
|
|
"# print(x1)\n",
|
|
"pd.DataFrame(x1)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"x2 = ready_data2[0]\n",
|
|
"\n",
|
|
"for i in range(len(ready_data2) - 1):\n",
|
|
" #print(i)\n",
|
|
" x2 = np.concatenate((x2, ready_data2[i + 1]), axis=0)\n",
|
|
"pd.DataFrame(x2)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"print(x1.shape)\n",
|
|
"print(x2.shape)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"y_1 = [1,1,1,1]\n",
|
|
"y_2 = [0,1,1,1]\n",
|
|
"y_3 = [1,0,1,1]\n",
|
|
"y_4 = [1,1,0,0]"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"y_1 = 0\n",
|
|
"y_2 = 1\n",
|
|
"y_3 = 2\n",
|
|
"y_4 = 3\n",
|
|
"y_5 = 4\n",
|
|
"y_6 = 5"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"y_data = [y_1, y_2, y_3, y_4, y_5, y_6]"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"for i in range(len(y_data)):\n",
|
|
" print(ready_data1[i].shape[0])"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"for i in range(len(y_data)):\n",
|
|
" y_data[i] = [y_data[i]]*ready_data1[i].shape[0]\n",
|
|
" y_data[i] = np.array(y_data[i])"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"y_data"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"y = y_data[0]\n",
|
|
"\n",
|
|
"for i in range(len(y_data) - 1):\n",
|
|
" #print(i)\n",
|
|
" y = np.concatenate((y, y_data[i+1]), axis=0)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"print(y.shape)\n",
|
|
"print(np.unique(y))"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from sklearn.model_selection import train_test_split\n",
|
|
"\n",
|
|
"x_train1, x_test1, y_train, y_test = train_test_split(x1, y, test_size=0.2, random_state=2)\n",
|
|
"x_train2, x_test2, y_train, y_test = train_test_split(x2, y, test_size=0.2, random_state=2)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from sklearn.model_selection import train_test_split\n",
|
|
"from sklearn.metrics import accuracy_score\n",
|
|
"from sklearn.ensemble import RandomForestClassifier, BaggingClassifier\n",
|
|
"from sklearn.tree import DecisionTreeClassifier\n",
|
|
"from sklearn.neighbors import KNeighborsClassifier\n",
|
|
"from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n",
|
|
"from sklearn.svm import SVC\n",
|
|
"from xgboost import XGBClassifier"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Check the shapes of x_train and y_train\n",
|
|
"print(\"Shape of x1_train:\", x_train1.shape)\n",
|
|
"print(\"Shape of x2_train:\", x_train2.shape)\n",
|
|
"print(\"Shape of y_train:\", y_train.shape)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"accuracies1 = []\n",
|
|
"accuracies2 = []\n",
|
|
"\n",
|
|
"\n",
|
|
"# 1. Random Forest\n",
|
|
"rf_model = RandomForestClassifier()\n",
|
|
"rf_model.fit(x_train1, y_train)\n",
|
|
"rf_pred1 = rf_model.predict(x_test1)\n",
|
|
"acc1 = accuracy_score(y_test, rf_pred1) * 100\n",
|
|
"accuracies1.append(acc1)\n",
|
|
"# format with color coded if acc1 > 90\n",
|
|
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
|
|
"print(\"Random Forest Accuracy for sensor 1:\", acc1)\n",
|
|
"rf_model.fit(x_train2, y_train)\n",
|
|
"rf_pred2 = rf_model.predict(x_test2)\n",
|
|
"acc2 = accuracy_score(y_test, rf_pred2) * 100\n",
|
|
"accuracies2.append(acc2)\n",
|
|
"# format with color coded if acc2 > 90\n",
|
|
"acc2 = f\"\\033[92m{acc2:.2f}\\033[00m\" if acc2 > 90 else f\"{acc2:.2f}\"\n",
|
|
"print(\"Random Forest Accuracy for sensor 2:\", acc2)\n",
|
|
"# print(rf_pred)\n",
|
|
"# print(y_test)\n",
|
|
"\n",
|
|
"# 2. Bagged Trees\n",
|
|
"bagged_model = BaggingClassifier(estimator=DecisionTreeClassifier(), n_estimators=10)\n",
|
|
"bagged_model.fit(x_train1, y_train)\n",
|
|
"bagged_pred1 = bagged_model.predict(x_test1)\n",
|
|
"acc1 = accuracy_score(y_test, bagged_pred1) * 100\n",
|
|
"accuracies1.append(acc1)\n",
|
|
"# format with color coded if acc1 > 90\n",
|
|
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
|
|
"print(\"Bagged Trees Accuracy for sensor 1:\", acc1)\n",
|
|
"bagged_model.fit(x_train2, y_train)\n",
|
|
"bagged_pred2 = bagged_model.predict(x_test2)\n",
|
|
"acc2 = accuracy_score(y_test, bagged_pred2) * 100\n",
|
|
"accuracies2.append(acc2)\n",
|
|
"# format with color coded if acc2 > 90\n",
|
|
"acc2 = f\"\\033[92m{acc2:.2f}\\033[00m\" if acc2 > 90 else f\"{acc2:.2f}\"\n",
|
|
"print(\"Bagged Trees Accuracy for sensor 2:\", acc2)\n",
|
|
"\n",
|
|
"# 3. Decision Tree\n",
|
|
"dt_model = DecisionTreeClassifier()\n",
|
|
"dt_model.fit(x_train1, y_train)\n",
|
|
"dt_pred1 = dt_model.predict(x_test1)\n",
|
|
"acc1 = accuracy_score(y_test, dt_pred1) * 100\n",
|
|
"accuracies1.append(acc1)\n",
|
|
"# format with color coded if acc1 > 90\n",
|
|
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
|
|
"print(\"Decision Tree Accuracy for sensor 1:\", acc1)\n",
|
|
"dt_model.fit(x_train2, y_train)\n",
|
|
"dt_pred2 = dt_model.predict(x_test2)\n",
|
|
"acc2 = accuracy_score(y_test, dt_pred2) * 100\n",
|
|
"accuracies2.append(acc2)\n",
|
|
"# format with color coded if acc2 > 90\n",
|
|
"acc2 = f\"\\033[92m{acc2:.2f}\\033[00m\" if acc2 > 90 else f\"{acc2:.2f}\"\n",
|
|
"print(\"Decision Tree Accuracy for sensor 2:\", acc2)\n",
|
|
"\n",
|
|
"# 4. KNeighbors\n",
|
|
"knn_model = KNeighborsClassifier()\n",
|
|
"knn_model.fit(x_train1, y_train)\n",
|
|
"knn_pred1 = knn_model.predict(x_test1)\n",
|
|
"acc1 = accuracy_score(y_test, knn_pred1) * 100\n",
|
|
"accuracies1.append(acc1)\n",
|
|
"# format with color coded if acc1 > 90\n",
|
|
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
|
|
"print(\"KNeighbors Accuracy for sensor 1:\", acc1)\n",
|
|
"knn_model.fit(x_train2, y_train)\n",
|
|
"knn_pred2 = knn_model.predict(x_test2)\n",
|
|
"acc2 = accuracy_score(y_test, knn_pred2) * 100\n",
|
|
"accuracies2.append(acc2)\n",
|
|
"# format with color coded if acc2 > 90\n",
|
|
"acc2 = f\"\\033[92m{acc2:.2f}\\033[00m\" if acc2 > 90 else f\"{acc2:.2f}\"\n",
|
|
"print(\"KNeighbors Accuracy for sensor 2:\", acc2)\n",
|
|
"\n",
|
|
"# 5. Linear Discriminant Analysis\n",
|
|
"lda_model = LinearDiscriminantAnalysis()\n",
|
|
"lda_model.fit(x_train1, y_train)\n",
|
|
"lda_pred1 = lda_model.predict(x_test1)\n",
|
|
"acc1 = accuracy_score(y_test, lda_pred1) * 100\n",
|
|
"accuracies1.append(acc1)\n",
|
|
"# format with color coded if acc1 > 90\n",
|
|
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
|
|
"print(\"Linear Discriminant Analysis Accuracy for sensor 1:\", acc1)\n",
|
|
"lda_model.fit(x_train2, y_train)\n",
|
|
"lda_pred2 = lda_model.predict(x_test2)\n",
|
|
"acc2 = accuracy_score(y_test, lda_pred2) * 100\n",
|
|
"accuracies2.append(acc2)\n",
|
|
"# format with color coded if acc2 > 90\n",
|
|
"acc2 = f\"\\033[92m{acc2:.2f}\\033[00m\" if acc2 > 90 else f\"{acc2:.2f}\"\n",
|
|
"print(\"Linear Discriminant Analysis Accuracy for sensor 2:\", acc2)\n",
|
|
"\n",
|
|
"# 6. Support Vector Machine\n",
|
|
"svm_model = SVC()\n",
|
|
"svm_model.fit(x_train1, y_train)\n",
|
|
"svm_pred1 = svm_model.predict(x_test1)\n",
|
|
"acc1 = accuracy_score(y_test, svm_pred1) * 100\n",
|
|
"accuracies1.append(acc1)\n",
|
|
"# format with color coded if acc1 > 90\n",
|
|
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
|
|
"print(\"Support Vector Machine Accuracy for sensor 1:\", acc1)\n",
|
|
"svm_model.fit(x_train2, y_train)\n",
|
|
"svm_pred2 = svm_model.predict(x_test2)\n",
|
|
"acc2 = accuracy_score(y_test, svm_pred2) * 100\n",
|
|
"accuracies2.append(acc2)\n",
|
|
"# format with color coded if acc2 > 90\n",
|
|
"acc2 = f\"\\033[92m{acc2:.2f}\\033[00m\" if acc2 > 90 else f\"{acc2:.2f}\"\n",
|
|
"print(\"Support Vector Machine Accuracy for sensor 2:\", acc2)\n",
|
|
"\n",
|
|
"# 7. XGBoost\n",
|
|
"xgboost_model = XGBClassifier()\n",
|
|
"xgboost_model.fit(x_train1, y_train)\n",
|
|
"xgboost_pred1 = xgboost_model.predict(x_test1)\n",
|
|
"acc1 = accuracy_score(y_test, xgboost_pred1) * 100\n",
|
|
"accuracies1.append(acc1)\n",
|
|
"# format with color coded if acc1 > 90\n",
|
|
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
|
|
"print(\"XGBoost Accuracy:\", acc1)\n",
|
|
"xgboost_model.fit(x_train2, y_train)\n",
|
|
"xgboost_pred2 = xgboost_model.predict(x_test2)\n",
|
|
"acc2 = accuracy_score(y_test, xgboost_pred2) * 100\n",
|
|
"accuracies2.append(acc2)\n",
|
|
"# format with color coded if acc2 > 90\n",
|
|
"acc2 = f\"\\033[92m{acc2:.2f}\\033[00m\" if acc2 > 90 else f\"{acc2:.2f}\"\n",
|
|
"print(\"XGBoost Accuracy:\", acc2)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"print(accuracies1)\n",
|
|
"print(accuracies2)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import numpy as np\n",
|
|
"import matplotlib.pyplot as plt\n",
|
|
"\n",
|
|
"models = [rf_model, bagged_model, dt_model, knn_model, lda_model, svm_model, xgboost_model]\n",
|
|
"model_names = [\"Random Forest\", \"Bagged Trees\", \"Decision Tree\", \"KNN\", \"LDA\", \"SVM\", \"XGBoost\"]\n",
|
|
"\n",
|
|
"bar_width = 0.35 # Width of each bar\n",
|
|
"index = np.arange(len(model_names)) # Index for the bars\n",
|
|
"\n",
|
|
"# Plotting the bar graph\n",
|
|
"plt.figure(figsize=(14, 8))\n",
|
|
"\n",
|
|
"# Bar plot for Sensor 1\n",
|
|
"plt.bar(index, accuracies1, width=bar_width, color='blue', label='Sensor 1')\n",
|
|
"\n",
|
|
"# Bar plot for Sensor 2\n",
|
|
"plt.bar(index + bar_width, accuracies2, width=bar_width, color='orange', label='Sensor 2')\n",
|
|
"\n",
|
|
"# Add values on top of each bar\n",
|
|
"for i, acc1, acc2 in zip(index, accuracies1, accuracies2):\n",
|
|
" plt.text(i, acc1 + .1, f'{acc1:.2f}%', ha='center', va='bottom', color='black')\n",
|
|
" plt.text(i + bar_width, acc2 + 1, f'{acc2:.2f}%', ha='center', va='bottom', color='black')\n",
|
|
"\n",
|
|
"# Customize the plot\n",
|
|
"plt.xlabel('Model Name →')\n",
|
|
"plt.ylabel('Accuracy →')\n",
|
|
"plt.title('Accuracy of classifiers for Sensors 1 and 2 with 513 features')\n",
|
|
"plt.xticks(index + bar_width / 2, model_names) # Set x-tick positions\n",
|
|
"plt.legend()\n",
|
|
"plt.ylim(0, 100)\n",
|
|
"\n",
|
|
"# Show the plot\n",
|
|
"plt.show()\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import pandas as pd\n",
|
|
"import numpy as np\n",
|
|
"import os\n",
|
|
"import matplotlib.pyplot as plt"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def spectograph(data_dir: str):\n",
|
|
" # print(os.listdir(data_dir))\n",
|
|
" for damage in os.listdir(data_dir):\n",
|
|
" # print(damage)\n",
|
|
" d = os.path.join(data_dir, damage)\n",
|
|
" # print(d)\n",
|
|
" for file in os.listdir(d):\n",
|
|
" # print(file)\n",
|
|
" f = os.path.join(d, file)\n",
|
|
" print(f)\n",
|
|
" # sensor1 = pd.read_csv(f, skiprows=1, sep=';')\n",
|
|
" # sensor2 = pd.read_csv(f, skiprows=1, sep=';')\n",
|
|
"\n",
|
|
" # df1 = pd.DataFrame()\n",
|
|
"\n",
|
|
" # df1['s1'] = sensor1[sensor1.columns[-1]]\n",
|
|
" # df1['s2'] = sensor2[sensor2.columns[-1]]\n",
|
|
" # # Combined Plot for sensor 1 and sensor 2 from data1 file in which motor is operated at 800 rpm\n",
|
|
"\n",
|
|
" # plt.plot(df1['s2'], label='sensor 2')\n",
|
|
" # plt.plot(df1['s1'], label='sensor 1')\n",
|
|
" # plt.xlabel(\"Number of samples\")\n",
|
|
" # plt.ylabel(\"Amplitude\")\n",
|
|
" # plt.title(\"Raw vibration signal\")\n",
|
|
" # plt.legend()\n",
|
|
" # plt.show()\n",
|
|
"\n",
|
|
" # from scipy import signal\n",
|
|
" # from scipy.signal.windows import hann\n",
|
|
"\n",
|
|
" # vibration_data = df1['s1']\n",
|
|
"\n",
|
|
" # # Applying STFT\n",
|
|
" # window_size = 1024\n",
|
|
" # hop_size = 512\n",
|
|
" # window = hann(window_size) # Creating a Hanning window\n",
|
|
" # frequencies, times, Zxx = signal.stft(vibration_data, window=window, nperseg=window_size, noverlap=window_size - hop_size)\n",
|
|
"\n",
|
|
" # # Plotting the STFT Data\n",
|
|
" # plt.pcolormesh(times, frequencies, np.abs(Zxx), shading='gouraud')\n",
|
|
" # plt.title(f'STFT Magnitude for case 1 signal sensor 1 ')\n",
|
|
" # plt.ylabel('Frequency [Hz]')\n",
|
|
" # plt.xlabel('Time [sec]')\n",
|
|
" # plt.show()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Test with Outside of Its Training Data"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.10.8"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 2
|
|
}
|