Compare commits
5 Commits
feature/48
...
latex/54-d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4a796694bf | ||
|
|
6357136e6c | ||
|
|
c7584e2dd8 | ||
|
|
80ee9a3ec4 | ||
|
|
f9f346a57e |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -2,4 +2,3 @@
|
||||
data/**/*.csv
|
||||
.venv/
|
||||
*.pyc
|
||||
*.egg-info/
|
||||
3
.vscode/settings.json
vendored
3
.vscode/settings.json
vendored
@@ -1,4 +1,3 @@
|
||||
{
|
||||
"python.analysis.extraPaths": ["./code/src/features"],
|
||||
"jupyter.notebookFileRoot": "${workspaceFolder}/code"
|
||||
"python.analysis.extraPaths": ["./code/src/features"]
|
||||
}
|
||||
|
||||
@@ -16,8 +16,3 @@ The repository is private and access is restricted only to those who have been g
|
||||
All contents of this repository, including the thesis idea, code, and associated data, are copyrighted © 2024 by Rifqi Panuluh. Unauthorized use or duplication is prohibited.
|
||||
|
||||
[LICENSE](https://github.com/nuluh/thesis?tab=License-1-ov-file#readme)
|
||||
|
||||
## How to Run `stft.ipynb`
|
||||
|
||||
1. run `pip install -e .` in root project first
|
||||
2. run the notebook
|
||||
@@ -121,9 +121,8 @@
|
||||
"signal_sensor2_test1 = []\n",
|
||||
"\n",
|
||||
"for data in df:\n",
|
||||
" if not data.empty and 'sensor 1' in data.columns and 'sensor 2' in data.columns:\n",
|
||||
" signal_sensor1_test1.append(data['sensor 1'].values)\n",
|
||||
" signal_sensor2_test1.append(data['sensor 2'].values)\n",
|
||||
" signal_sensor1_test1.append(data['sensor 1'].values)\n",
|
||||
" signal_sensor2_test1.append(data['sensor 2'].values)\n",
|
||||
"\n",
|
||||
"print(len(signal_sensor1_test1))\n",
|
||||
"print(len(signal_sensor2_test1))"
|
||||
@@ -155,7 +154,9 @@
|
||||
"import pandas as pd\n",
|
||||
"import numpy as np\n",
|
||||
"from scipy.signal import stft, hann\n",
|
||||
"# from multiprocessing import Pool\n",
|
||||
"from multiprocessing import Pool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Function to compute and append STFT data\n",
|
||||
"def process_stft(args):\n",
|
||||
@@ -198,22 +199,23 @@
|
||||
" # Compute STFT\n",
|
||||
" frequencies, times, Zxx = stft(sensor_data, fs=Fs, window=window, nperseg=window_size, noverlap=window_size - hop_size)\n",
|
||||
" magnitude = np.abs(Zxx)\n",
|
||||
" df_stft = pd.DataFrame(magnitude, index=frequencies, columns=times).T\n",
|
||||
" df_stft.columns = [f\"Freq_{i}\" for i in frequencies]\n",
|
||||
" flattened_stft = magnitude.flatten()\n",
|
||||
" \n",
|
||||
" # Define the output CSV file path\n",
|
||||
" stft_file_name = f'stft_data{sensor_num}_{damage_num}.csv'\n",
|
||||
" sensor_output_dir = os.path.join(damage_base_path, sensor_name.lower())\n",
|
||||
" os.makedirs(sensor_output_dir, exist_ok=True)\n",
|
||||
" stft_file_path = os.path.join(sensor_output_dir, stft_file_name)\n",
|
||||
" print(stft_file_path)\n",
|
||||
" # Append the flattened STFT to the CSV\n",
|
||||
" try:\n",
|
||||
" flattened_stft_df = pd.DataFrame([flattened_stft])\n",
|
||||
" if not os.path.isfile(stft_file_path):\n",
|
||||
" # Create a new CSV\n",
|
||||
" df_stft.to_csv(stft_file_path, index=False, header=False)\n",
|
||||
" flattened_stft_df.to_csv(stft_file_path, index=False, header=False)\n",
|
||||
" else:\n",
|
||||
" # Append to existing CSV\n",
|
||||
" df_stft.to_csv(stft_file_path, mode='a', index=False, header=False)\n",
|
||||
" flattened_stft_df.to_csv(stft_file_path, mode='a', index=False, header=False)\n",
|
||||
" print(f\"Appended STFT data to {stft_file_path}\")\n",
|
||||
" except Exception as e:\n",
|
||||
" print(f\"Error writing to {stft_file_path}: {e}\")"
|
||||
@@ -293,7 +295,7 @@
|
||||
"\n",
|
||||
"# get current y ticks in list\n",
|
||||
"print(len(frequencies))\n",
|
||||
"print(len(times))"
|
||||
"print(len(times))\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -321,9 +323,10 @@
|
||||
"source": [
|
||||
"import pandas as pd\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"ready_data1a = []\n",
|
||||
"ready_data1 = []\n",
|
||||
"for file in os.listdir('D:/thesis/data/converted/raw/sensor1'):\n",
|
||||
" ready_data1a.append(pd.read_csv(os.path.join('D:/thesis/data/converted/raw/sensor1', file)))\n",
|
||||
" ready_data1.append(pd.read_csv(os.path.join('D:/thesis/data/converted/raw/sensor1', file)))\n",
|
||||
"ready_data1[0]\n",
|
||||
"# colormesh give title x is frequency and y is time and rotate/transpose the data\n",
|
||||
"# Plotting the STFT Data"
|
||||
]
|
||||
@@ -334,8 +337,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"len(ready_data1a)\n",
|
||||
"# plt.pcolormesh(ready_data1[0])"
|
||||
"ready_data1[1]\n",
|
||||
"plt.pcolormesh(ready_data1[1])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -345,7 +348,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for i in range(6):\n",
|
||||
" plt.pcolormesh(ready_data1a[i])\n",
|
||||
" plt.pcolormesh(ready_data1[i])\n",
|
||||
" plt.title(f'STFT Magnitude for case {i} sensor 1')\n",
|
||||
" plt.xlabel(f'Frequency [Hz]')\n",
|
||||
" plt.ylabel(f'Time [sec]')\n",
|
||||
@@ -358,9 +361,10 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ready_data2a = []\n",
|
||||
"ready_data2 = []\n",
|
||||
"for file in os.listdir('D:/thesis/data/converted/raw/sensor2'):\n",
|
||||
" ready_data2a.append(pd.read_csv(os.path.join('D:/thesis/data/converted/raw/sensor2', file)))"
|
||||
" ready_data2.append(pd.read_csv(os.path.join('D:/thesis/data/converted/raw/sensor2', file)))\n",
|
||||
"ready_data2[5]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -369,8 +373,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(len(ready_data1a))\n",
|
||||
"print(len(ready_data2a))"
|
||||
"print(len(ready_data1))\n",
|
||||
"print(len(ready_data2))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -379,16 +383,35 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"x1a = 0\n",
|
||||
"print(type(ready_data1a[0]))\n",
|
||||
"ready_data1a[0].iloc[:,0]"
|
||||
"x1 = 0\n",
|
||||
"\n",
|
||||
"for i in range(len(ready_data1)):\n",
|
||||
" print(ready_data1[i].shape)\n",
|
||||
" x1 = x1 + ready_data1[i].shape[0]\n",
|
||||
"\n",
|
||||
"print(x1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"x2 = 0\n",
|
||||
"\n",
|
||||
"for i in range(len(ready_data2)):\n",
|
||||
" print(ready_data2[i].shape)\n",
|
||||
" x2 = x2 + ready_data2[i].shape[0]\n",
|
||||
"\n",
|
||||
"print(x2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Checking length of the total array"
|
||||
"### Appending"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -397,14 +420,14 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"x1a = 0\n",
|
||||
"print(type(x1a))\n",
|
||||
"for i in range(len(ready_data1a)):\n",
|
||||
" print(type(ready_data1a[i].shape[0]))\n",
|
||||
" x1a = x1a + ready_data1a[i].shape[0]\n",
|
||||
" print(type(x1a))\n",
|
||||
"\n",
|
||||
"print(x1a)"
|
||||
"x1 = ready_data1[0]\n",
|
||||
"# print(x1)\n",
|
||||
"print(type(x1))\n",
|
||||
"for i in range(len(ready_data1) - 1):\n",
|
||||
" #print(i)\n",
|
||||
" x1 = np.concatenate((x1, ready_data1[i + 1]), axis=0)\n",
|
||||
"# print(x1)\n",
|
||||
"pd.DataFrame(x1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -413,75 +436,29 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"x2a = 0\n",
|
||||
"x2 = ready_data2[0]\n",
|
||||
"\n",
|
||||
"for i in range(len(ready_data2a)):\n",
|
||||
" print(ready_data2a[i].shape)\n",
|
||||
" x2a = x2a + ready_data2a[i].shape[0]\n",
|
||||
"\n",
|
||||
"print(x2a)"
|
||||
"for i in range(len(ready_data2) - 1):\n",
|
||||
" #print(i)\n",
|
||||
" x2 = np.concatenate((x2, ready_data2[i + 1]), axis=0)\n",
|
||||
"pd.DataFrame(x2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(x1.shape)\n",
|
||||
"print(x2.shape)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Flatten 6 array into one array"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Combine all dataframes in ready_data1a into a single dataframe\n",
|
||||
"if ready_data1a: # Check if the list is not empty\n",
|
||||
" # Use pandas concat function instead of iterative concatenation\n",
|
||||
" combined_data = pd.concat(ready_data1a, axis=0, ignore_index=True)\n",
|
||||
" \n",
|
||||
" print(f\"Type of combined data: {type(combined_data)}\")\n",
|
||||
" print(f\"Shape of combined data: {combined_data.shape}\")\n",
|
||||
" \n",
|
||||
" # Display the combined dataframe\n",
|
||||
" combined_data\n",
|
||||
"else:\n",
|
||||
" print(\"No data available in ready_data1a list\")\n",
|
||||
" combined_data = pd.DataFrame()\n",
|
||||
"\n",
|
||||
"# Store the result in x1a for compatibility with subsequent code\n",
|
||||
"x1a = combined_data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Combine all dataframes in ready_data1a into a single dataframe\n",
|
||||
"if ready_data2a: # Check if the list is not empty\n",
|
||||
" # Use pandas concat function instead of iterative concatenation\n",
|
||||
" combined_data = pd.concat(ready_data2a, axis=0, ignore_index=True)\n",
|
||||
" \n",
|
||||
" print(f\"Type of combined data: {type(combined_data)}\")\n",
|
||||
" print(f\"Shape of combined data: {combined_data.shape}\")\n",
|
||||
" \n",
|
||||
" # Display the combined dataframe\n",
|
||||
" combined_data\n",
|
||||
"else:\n",
|
||||
" print(\"No data available in ready_data1a list\")\n",
|
||||
" combined_data = pd.DataFrame()\n",
|
||||
"\n",
|
||||
"# Store the result in x1a for compatibility with subsequent code\n",
|
||||
"x2a = combined_data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Creating the label"
|
||||
"### Labeling"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -504,8 +481,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_data = [y_1, y_2, y_3, y_4, y_5, y_6]\n",
|
||||
"y_data"
|
||||
"y_data = [y_1, y_2, y_3, y_4, y_5, y_6]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -515,7 +491,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for i in range(len(y_data)):\n",
|
||||
" print(ready_data1a[i].shape[0])"
|
||||
" print(ready_data1[i].shape[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -524,9 +500,19 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"for i in range(len(y_data)):\n",
|
||||
" y_data[i] = [y_data[i]]*ready_data1a[i].shape[0]"
|
||||
" print(ready_data2[i].shape[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for i in range(len(y_data)):\n",
|
||||
" y_data[i] = [y_data[i]]*ready_data1[i].shape[0]\n",
|
||||
" y_data[i] = np.array(y_data[i])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -536,7 +522,7 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# len(y_data[0])\n",
|
||||
"y_data"
|
||||
"y_data[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -568,10 +554,10 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from src.ml.model_selection import create_ready_data\n",
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"\n",
|
||||
"X1a, y = create_ready_data('D:/thesis/data/converted/raw/sensor1')\n",
|
||||
"X2a, y = create_ready_data('D:/thesis/data/converted/raw/sensor2')"
|
||||
"x_train1, x_test1, y_train, y_test = train_test_split(x1, y, test_size=0.2, random_state=2)\n",
|
||||
"x_train2, x_test2, y_train, y_test = train_test_split(x2, y, test_size=0.2, random_state=2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -581,17 +567,6 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"\n",
|
||||
"x_train1, x_test1, y_train, y_test = train_test_split(X1a, y, test_size=0.2, random_state=2)\n",
|
||||
"x_train2, x_test2, y_train, y_test = train_test_split(X2a, y, test_size=0.2, random_state=2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.metrics import accuracy_score\n",
|
||||
"from sklearn.ensemble import RandomForestClassifier, BaggingClassifier\n",
|
||||
"from sklearn.tree import DecisionTreeClassifier\n",
|
||||
@@ -624,17 +599,16 @@
|
||||
"\n",
|
||||
"\n",
|
||||
"# 1. Random Forest\n",
|
||||
"rf_model1 = RandomForestClassifier()\n",
|
||||
"rf_model1.fit(x_train1, y_train)\n",
|
||||
"rf_pred1 = rf_model1.predict(x_test1)\n",
|
||||
"rf_model = RandomForestClassifier()\n",
|
||||
"rf_model.fit(x_train1, y_train)\n",
|
||||
"rf_pred1 = rf_model.predict(x_test1)\n",
|
||||
"acc1 = accuracy_score(y_test, rf_pred1) * 100\n",
|
||||
"accuracies1.append(acc1)\n",
|
||||
"# format with color coded if acc1 > 90\n",
|
||||
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
|
||||
"print(\"Random Forest Accuracy for sensor 1:\", acc1)\n",
|
||||
"rf_model2 = RandomForestClassifier()\n",
|
||||
"rf_model2.fit(x_train2, y_train)\n",
|
||||
"rf_pred2 = rf_model2.predict(x_test2)\n",
|
||||
"rf_model.fit(x_train2, y_train)\n",
|
||||
"rf_pred2 = rf_model.predict(x_test2)\n",
|
||||
"acc2 = accuracy_score(y_test, rf_pred2) * 100\n",
|
||||
"accuracies2.append(acc2)\n",
|
||||
"# format with color coded if acc2 > 90\n",
|
||||
@@ -644,17 +618,16 @@
|
||||
"# print(y_test)\n",
|
||||
"\n",
|
||||
"# 2. Bagged Trees\n",
|
||||
"bagged_model1 = BaggingClassifier(estimator=DecisionTreeClassifier(), n_estimators=10)\n",
|
||||
"bagged_model1.fit(x_train1, y_train)\n",
|
||||
"bagged_pred1 = bagged_model1.predict(x_test1)\n",
|
||||
"bagged_model = BaggingClassifier(estimator=DecisionTreeClassifier(), n_estimators=10)\n",
|
||||
"bagged_model.fit(x_train1, y_train)\n",
|
||||
"bagged_pred1 = bagged_model.predict(x_test1)\n",
|
||||
"acc1 = accuracy_score(y_test, bagged_pred1) * 100\n",
|
||||
"accuracies1.append(acc1)\n",
|
||||
"# format with color coded if acc1 > 90\n",
|
||||
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
|
||||
"print(\"Bagged Trees Accuracy for sensor 1:\", acc1)\n",
|
||||
"bagged_model2 = BaggingClassifier(estimator=DecisionTreeClassifier(), n_estimators=10)\n",
|
||||
"bagged_model2.fit(x_train2, y_train)\n",
|
||||
"bagged_pred2 = bagged_model2.predict(x_test2)\n",
|
||||
"bagged_model.fit(x_train2, y_train)\n",
|
||||
"bagged_pred2 = bagged_model.predict(x_test2)\n",
|
||||
"acc2 = accuracy_score(y_test, bagged_pred2) * 100\n",
|
||||
"accuracies2.append(acc2)\n",
|
||||
"# format with color coded if acc2 > 90\n",
|
||||
@@ -670,9 +643,8 @@
|
||||
"# format with color coded if acc1 > 90\n",
|
||||
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
|
||||
"print(\"Decision Tree Accuracy for sensor 1:\", acc1)\n",
|
||||
"dt_model2 = DecisionTreeClassifier()\n",
|
||||
"dt_model2.fit(x_train2, y_train)\n",
|
||||
"dt_pred2 = dt_model2.predict(x_test2)\n",
|
||||
"dt_model.fit(x_train2, y_train)\n",
|
||||
"dt_pred2 = dt_model.predict(x_test2)\n",
|
||||
"acc2 = accuracy_score(y_test, dt_pred2) * 100\n",
|
||||
"accuracies2.append(acc2)\n",
|
||||
"# format with color coded if acc2 > 90\n",
|
||||
@@ -688,9 +660,8 @@
|
||||
"# format with color coded if acc1 > 90\n",
|
||||
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
|
||||
"print(\"KNeighbors Accuracy for sensor 1:\", acc1)\n",
|
||||
"knn_model2 = KNeighborsClassifier()\n",
|
||||
"knn_model2.fit(x_train2, y_train)\n",
|
||||
"knn_pred2 = knn_model2.predict(x_test2)\n",
|
||||
"knn_model.fit(x_train2, y_train)\n",
|
||||
"knn_pred2 = knn_model.predict(x_test2)\n",
|
||||
"acc2 = accuracy_score(y_test, knn_pred2) * 100\n",
|
||||
"accuracies2.append(acc2)\n",
|
||||
"# format with color coded if acc2 > 90\n",
|
||||
@@ -706,9 +677,8 @@
|
||||
"# format with color coded if acc1 > 90\n",
|
||||
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
|
||||
"print(\"Linear Discriminant Analysis Accuracy for sensor 1:\", acc1)\n",
|
||||
"lda_model2 = LinearDiscriminantAnalysis()\n",
|
||||
"lda_model2.fit(x_train2, y_train)\n",
|
||||
"lda_pred2 = lda_model2.predict(x_test2)\n",
|
||||
"lda_model.fit(x_train2, y_train)\n",
|
||||
"lda_pred2 = lda_model.predict(x_test2)\n",
|
||||
"acc2 = accuracy_score(y_test, lda_pred2) * 100\n",
|
||||
"accuracies2.append(acc2)\n",
|
||||
"# format with color coded if acc2 > 90\n",
|
||||
@@ -724,9 +694,8 @@
|
||||
"# format with color coded if acc1 > 90\n",
|
||||
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
|
||||
"print(\"Support Vector Machine Accuracy for sensor 1:\", acc1)\n",
|
||||
"svm_model2 = SVC()\n",
|
||||
"svm_model2.fit(x_train2, y_train)\n",
|
||||
"svm_pred2 = svm_model2.predict(x_test2)\n",
|
||||
"svm_model.fit(x_train2, y_train)\n",
|
||||
"svm_pred2 = svm_model.predict(x_test2)\n",
|
||||
"acc2 = accuracy_score(y_test, svm_pred2) * 100\n",
|
||||
"accuracies2.append(acc2)\n",
|
||||
"# format with color coded if acc2 > 90\n",
|
||||
@@ -742,9 +711,8 @@
|
||||
"# format with color coded if acc1 > 90\n",
|
||||
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
|
||||
"print(\"XGBoost Accuracy:\", acc1)\n",
|
||||
"xgboost_model2 = XGBClassifier()\n",
|
||||
"xgboost_model2.fit(x_train2, y_train)\n",
|
||||
"xgboost_pred2 = xgboost_model2.predict(x_test2)\n",
|
||||
"xgboost_model.fit(x_train2, y_train)\n",
|
||||
"xgboost_pred2 = xgboost_model.predict(x_test2)\n",
|
||||
"acc2 = accuracy_score(y_test, xgboost_pred2) * 100\n",
|
||||
"accuracies2.append(acc2)\n",
|
||||
"# format with color coded if acc2 > 90\n",
|
||||
@@ -821,75 +789,57 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from src.ml.model_selection import create_ready_data\n",
|
||||
"def spectograph(data_dir: str):\n",
|
||||
" # print(os.listdir(data_dir))\n",
|
||||
" for damage in os.listdir(data_dir):\n",
|
||||
" # print(damage)\n",
|
||||
" d = os.path.join(data_dir, damage)\n",
|
||||
" # print(d)\n",
|
||||
" for file in os.listdir(d):\n",
|
||||
" # print(file)\n",
|
||||
" f = os.path.join(d, file)\n",
|
||||
" print(f)\n",
|
||||
" # sensor1 = pd.read_csv(f, skiprows=1, sep=';')\n",
|
||||
" # sensor2 = pd.read_csv(f, skiprows=1, sep=';')\n",
|
||||
"\n",
|
||||
"X1b, y = create_ready_data('D:/thesis/data/converted/raw_B/sensor1')\n",
|
||||
"X2b, y = create_ready_data('D:/thesis/data/converted/raw_B/sensor2')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y.shape"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.metrics import accuracy_score, classification_report\n",
|
||||
"# 4. Validate on Dataset B\n",
|
||||
"y_pred_svm = svm_model.predict(X1b)\n",
|
||||
" # df1 = pd.DataFrame()\n",
|
||||
"\n",
|
||||
"# 5. Evaluate\n",
|
||||
"print(\"Accuracy on Dataset B:\", accuracy_score(y, y_pred_svm))\n",
|
||||
"print(classification_report(y, y_pred_svm))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.metrics import accuracy_score, classification_report\n",
|
||||
"# 4. Validate on Dataset B\n",
|
||||
"y_pred = rf_model2.predict(X2b)\n",
|
||||
" # df1['s1'] = sensor1[sensor1.columns[-1]]\n",
|
||||
" # df1['s2'] = sensor2[sensor2.columns[-1]]\n",
|
||||
" # # Combined Plot for sensor 1 and sensor 2 from data1 file in which motor is operated at 800 rpm\n",
|
||||
"\n",
|
||||
"# 5. Evaluate\n",
|
||||
"print(\"Accuracy on Dataset B:\", accuracy_score(y, y_pred))\n",
|
||||
"print(classification_report(y, y_pred))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_predict = svm_model2.predict(X2b.iloc[[5312],:])\n",
|
||||
"print(y_predict)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y[5312]"
|
||||
" # plt.plot(df1['s2'], label='sensor 2')\n",
|
||||
" # plt.plot(df1['s1'], label='sensor 1')\n",
|
||||
" # plt.xlabel(\"Number of samples\")\n",
|
||||
" # plt.ylabel(\"Amplitude\")\n",
|
||||
" # plt.title(\"Raw vibration signal\")\n",
|
||||
" # plt.legend()\n",
|
||||
" # plt.show()\n",
|
||||
"\n",
|
||||
" # from scipy import signal\n",
|
||||
" # from scipy.signal.windows import hann\n",
|
||||
"\n",
|
||||
" # vibration_data = df1['s1']\n",
|
||||
"\n",
|
||||
" # # Applying STFT\n",
|
||||
" # window_size = 1024\n",
|
||||
" # hop_size = 512\n",
|
||||
" # window = hann(window_size) # Creating a Hanning window\n",
|
||||
" # frequencies, times, Zxx = signal.stft(vibration_data, window=window, nperseg=window_size, noverlap=window_size - hop_size)\n",
|
||||
"\n",
|
||||
" # # Plotting the STFT Data\n",
|
||||
" # plt.pcolormesh(times, frequencies, np.abs(Zxx), shading='gouraud')\n",
|
||||
" # plt.title(f'STFT Magnitude for case 1 signal sensor 1 ')\n",
|
||||
" # plt.ylabel('Frequency [Hz]')\n",
|
||||
" # plt.xlabel('Time [sec]')\n",
|
||||
" # plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Confusion Matrix"
|
||||
"## Test with Outside of Its Training Data"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -897,52 +847,7 @@
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"cm = confusion_matrix(y, y_pred_svm) # -> ndarray\n",
|
||||
"\n",
|
||||
"# get the class labels\n",
|
||||
"labels = svm_model.classes_\n",
|
||||
"\n",
|
||||
"# Plot\n",
|
||||
"disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=labels)\n",
|
||||
"disp.plot(cmap=plt.cm.Blues) # You can change colormap\n",
|
||||
"plt.title(\"SVM Sensor1 CM Train w/ Dataset A Val w/ Dataset B\")\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Self-test CM"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# 1. Predict sensor 1 on Dataset A\n",
|
||||
"y_train_pred = svm_model.predict(x_train1)\n",
|
||||
"\n",
|
||||
"# 2. Import confusion matrix tools\n",
|
||||
"from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"\n",
|
||||
"# 3. Create and plot confusion matrix\n",
|
||||
"cm_train = confusion_matrix(y_train, y_train_pred)\n",
|
||||
"labels = svm_model.classes_\n",
|
||||
"\n",
|
||||
"disp = ConfusionMatrixDisplay(confusion_matrix=cm_train, display_labels=labels)\n",
|
||||
"disp.plot(cmap=plt.cm.Blues)\n",
|
||||
"plt.title(\"Confusion Matrix: Train & Test on Dataset A\")\n",
|
||||
"plt.show()\n"
|
||||
]
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import os
|
||||
from sklearn.model_selection import train_test_split as sklearn_split
|
||||
|
||||
|
||||
def create_ready_data(
|
||||
stft_data_path: str,
|
||||
stratify: np.ndarray = None,
|
||||
) -> tuple:
|
||||
"""
|
||||
Create a stratified train-test split from STFT data.
|
||||
|
||||
Parameters:
|
||||
-----------
|
||||
stft_data_path : str
|
||||
Path to the directory containing STFT data files (e.g. 'data/converted/raw/sensor1')
|
||||
stratify : np.ndarray, optional
|
||||
Labels to use for stratified sampling
|
||||
|
||||
Returns:
|
||||
--------
|
||||
tuple
|
||||
(X_train, X_test, y_train, y_test) - Split datasets
|
||||
"""
|
||||
ready_data = []
|
||||
for file in os.listdir(stft_data_path):
|
||||
ready_data.append(pd.read_csv(os.path.join(stft_data_path, file)))
|
||||
|
||||
y_data = [i for i in range(len(ready_data))]
|
||||
|
||||
# Combine all dataframes in ready_data into a single dataframe
|
||||
if ready_data: # Check if the list is not empty
|
||||
# Use pandas concat function instead of iterative concatenation
|
||||
combined_data = pd.concat(ready_data, axis=0, ignore_index=True)
|
||||
|
||||
print(f"Type of combined data: {type(combined_data)}")
|
||||
print(f"Shape of combined data: {combined_data.shape}")
|
||||
else:
|
||||
print("No data available in ready_data list")
|
||||
combined_data = pd.DataFrame()
|
||||
|
||||
# Store the result in x1a for compatibility with subsequent code
|
||||
X = combined_data
|
||||
|
||||
for i in range(len(y_data)):
|
||||
y_data[i] = [y_data[i]] * ready_data[i].shape[0]
|
||||
y_data[i] = np.array(y_data[i])
|
||||
|
||||
if y_data:
|
||||
# Use numpy concatenate function instead of iterative concatenation
|
||||
y = np.concatenate(y_data, axis=0)
|
||||
else:
|
||||
print("No labels available in y_data list")
|
||||
y = np.array([])
|
||||
|
||||
return X, y
|
||||
@@ -2,7 +2,6 @@ import pandas as pd
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import numpy as np
|
||||
from colorama import Fore, Style, init
|
||||
from typing import TypedDict, Dict, List
|
||||
from joblib import load
|
||||
@@ -226,56 +225,25 @@ class DataProcessor:
|
||||
"""
|
||||
idx = self._create_vector_column_index()
|
||||
# if overwrite:
|
||||
for i in range(len(self.data)): # damage(s)
|
||||
for j in range(len(self.data[i])): # col(s)
|
||||
for i in range(len(self.data)):
|
||||
for j in range(len(self.data[i])):
|
||||
# Get the appropriate indices for slicing from idx
|
||||
indices = idx[j]
|
||||
|
||||
# Get the current DataFrame
|
||||
df = self.data[i][j]
|
||||
|
||||
# Keep the 'Time' column and select only specifid 'Real' colmns
|
||||
# First, we add 1 to all indices to acount for 'Time' being at positiion 0
|
||||
# Keep the 'Time' column and select only specified 'Real' columns
|
||||
# First, we add 1 to all indices to account for 'Time' being at position 0
|
||||
real_indices = [index + 1 for index in indices]
|
||||
|
||||
# Create list with Time column index (0) and the adjustedd Real indices
|
||||
# Create list with Time column index (0) and the adjusted Real indices
|
||||
all_indices = [0] + [real_indices[0]] + [real_indices[-1]]
|
||||
|
||||
# Apply the slicing
|
||||
self.data[i][j] = df.iloc[:, all_indices]
|
||||
# TODO: if !overwrite:
|
||||
|
||||
def export_to_csv(self, output_dir: str, file_prefix: str = "DAMAGE"):
|
||||
"""
|
||||
Export the processed data to CSV files in the required folder structure.
|
||||
|
||||
:param output_dir: Directory to save the CSV files.
|
||||
:param file_prefix: Prefix for the output filenames.
|
||||
"""
|
||||
for group_idx, group in enumerate(self.data, start=1):
|
||||
group_folder = os.path.join(output_dir, f"{file_prefix}_{group_idx}")
|
||||
os.makedirs(group_folder, exist_ok=True)
|
||||
for test_idx, df in enumerate(group, start=1):
|
||||
# Ensure columns are named uniquely if duplicated
|
||||
df = df.copy()
|
||||
df.columns = ["Time", "Real_0", "Real_1"] # Rename
|
||||
|
||||
# Export first Real column
|
||||
out1 = os.path.join(
|
||||
group_folder, f"{file_prefix}_{group_idx}_TEST{test_idx}_01.csv"
|
||||
)
|
||||
df[["Time", "Real_0"]].rename(columns={"Real_0": "Real"}).to_csv(
|
||||
out1, index=False
|
||||
)
|
||||
|
||||
# Export last Real column
|
||||
out2 = os.path.join(
|
||||
group_folder, f"{file_prefix}_{group_idx}_TEST{test_idx}_02.csv"
|
||||
)
|
||||
df[["Time", "Real_1"]].rename(columns={"Real_1": "Real"}).to_csv(
|
||||
out2, index=False
|
||||
)
|
||||
|
||||
|
||||
def create_damage_files(base_path, output_base, prefix):
|
||||
# Initialize colorama
|
||||
|
||||
@@ -4,22 +4,5 @@ from joblib import dump, load
|
||||
# a = generate_damage_files_index(
|
||||
# num_damage=6, file_index_start=1, col=5, base_path="D:/thesis/data/dataset_A"
|
||||
# )
|
||||
|
||||
b = generate_damage_files_index(
|
||||
num_damage=6,
|
||||
file_index_start=1,
|
||||
col=5,
|
||||
base_path="D:/thesis/data/dataset_B",
|
||||
prefix="zzzBD",
|
||||
)
|
||||
# data_A = DataProcessor(file_index=a)
|
||||
# # data.create_vector_column(overwrite=True)
|
||||
# data_A.create_limited_sensor_vector_column(overwrite=True)
|
||||
# data_A.export_to_csv("D:/thesis/data/converted/raw")
|
||||
|
||||
data_B = DataProcessor(file_index=b)
|
||||
# data.create_vector_column(overwrite=True)
|
||||
data_B.create_limited_sensor_vector_column(overwrite=True)
|
||||
data_B.export_to_csv("D:/thesis/data/converted/raw_B")
|
||||
# a = load("D:/cache.joblib")
|
||||
# breakpoint()
|
||||
# dump(DataProcessor(file_index=a), "D:/cache.joblib")
|
||||
a = load("D:/cache.joblib")
|
||||
|
||||
41
latex/appendix/important/abdeljaber2017.tex
Normal file
41
latex/appendix/important/abdeljaber2017.tex
Normal file
@@ -0,0 +1,41 @@
|
||||
2 %Nomor
|
||||
|
||||
%for mult rows
|
||||
|
||||
& %Judul Jurnal
|
||||
Real-time vibration-based structural damage detection using one-dimensional convolutional neural networks \href{https://doi.org/10.1016/j.jsv.2016.10.043}{10.1016/j.jsv.
|
||||
2016.10.043}
|
||||
|
||||
%for mult rows
|
||||
|
||||
% & %Author
|
||||
% % Satish B Satpal; Yogesh Khandare; Anirban Guha; Sauvik Banerjee
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Nama Jurnal
|
||||
% International Journal of Advanced Structural Engineering (IJASE)
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Sumber
|
||||
% \href{http://dx.doi.org/10.1186/2008-6695-5-2}{ResearchGate}
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Tahun
|
||||
% 2020
|
||||
|
||||
% %for mult rows
|
||||
|
||||
& %Tujuan penelitian
|
||||
Mengidentifikasi lokasi kerusakan struktur secara \textit{real-time} dengan memproses sinyal getaran mentah yang diambil dari jaringan-jaringan akselerometer pada setiap titik tanpa proses tambahan atau ekstraksi fitur.
|
||||
|
||||
& %Kesimpulan
|
||||
% Studi ini menilai kemampuan mesin vektor pendukung untuk memprediksi intensitas kerusakan dan lokasi pada balok kantilever. Meskipun berhasil memprediksi kerusakan dengan sedikit kesalahan, tingkat kebisingan dan lokasi kerusakan memengaruhi keakuratan. Tingkat kebisingan yang tinggi mempengaruhi kinerja secara signifikan, terutama pada intensitas kerusakan yang lebih rendah.
|
||||
& % Gap Research
|
||||
\begin{enumerate}
|
||||
\item Riset ini hanya dilakukan dengan \textit{full-grid array} akselerometer yang diletakkan pada setiap \textit{node} kerusakan, sehingga memerlukan banyak perangkat akselerometer.
|
||||
|
||||
\item Tidak ada komparasi performa efisiensi dan akurasi dengan algoritma pembelajaran mesin lain yang lebih populer sebelumnya.
|
||||
\end{enumerate}
|
||||
68
latex/appendix/important/van2020.tex
Normal file
68
latex/appendix/important/van2020.tex
Normal file
@@ -0,0 +1,68 @@
|
||||
1
|
||||
|
||||
%for mult rows
|
||||
|
||||
&
|
||||
Statistical Feature Extraction in Machine Fault Detection using Vibration Signal (\href{https://doi.org/10.1109/ICTC49870.2020.9289285}{10.1109/ICTC49870.
|
||||
2020.9289285})
|
||||
%for mult rows
|
||||
|
||||
% &
|
||||
% Donghui Xu; Xiang Xu; Michael C. Forde; Antonio Caballero
|
||||
|
||||
%for mult rows
|
||||
|
||||
% &
|
||||
% Construction and Building Materials
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% &
|
||||
% \href{https://doi.org/10.1016/j.conbuildmat.2023.132596}{ScienceDirect}
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% &
|
||||
% 2023
|
||||
|
||||
% %for mult rows
|
||||
|
||||
&
|
||||
\begin{enumerate}[series=enum]
|
||||
\item Menginvestigasi cara mengklasifikasi kondisi \textit{gearbox} normal dan rusak menggunakan sinyal getaran berbasis pada kombinasi antara analisis statistik dan FFT dengan algoritma pembelajaran mesin (ANN, Logistic Regression, dan SVM)
|
||||
|
||||
\item Mengurangi waktu latih dan kompleksitas kalkulasi dengan analisis statistik sebagai input data meliputi sembilan fitur: mean, median, min, max, kurtosis, \textit{skewness}, \textit{standard deviation}, and \textit{range}.
|
||||
\end{enumerate}
|
||||
|
||||
&
|
||||
\begin{enumerate}[series=enum2]
|
||||
\item Nilai \textit{maximum} dan kurtosis adalah fitur yang paling signifikan untuk mengklasifikasi kelas label pembelajaran mesin.
|
||||
|
||||
\item ANN meraih akurasi 100\% pada input FFT penuh dan analisis statistik, sedangkan Regresi Logistik (LR) dan SVM meraih akurasi 100\% dengan input FFT penuh namun hanya mendapat akurasi 91\% dengan input analisis statistik
|
||||
\end{enumerate}
|
||||
|
||||
&
|
||||
\begin{enumerate}
|
||||
\item Lorem
|
||||
\item Ipsum
|
||||
\end{enumerate}
|
||||
|
||||
%-------------page break----------------
|
||||
% \\
|
||||
% &
|
||||
% &
|
||||
% &
|
||||
% &
|
||||
% &
|
||||
% &
|
||||
% &
|
||||
% \begin{enumerate}[resume=enum]
|
||||
% \item Menyajikan berbagai perkembangan penelitian, mendiskusikan dan membandingkannya kelebihan dan kekurangannya
|
||||
% \item Meringkas kesesuaian berbagai metode pembelajaran mesin untuk masalah SHM yang berbeda
|
||||
% \item Terakhir, tren masa depan
|
||||
% \end{enumerate}
|
||||
% &
|
||||
% \begin{enumerate}[resume=enum2]
|
||||
% \item SVM dan hutan acak kurang mendapat perhatian dibandingkan dengan jaringan saraf. Ini digunakan untuk klasifikasi kerusakan. Namun, pemrosesan awal data jauh lebih rumit.
|
||||
|
||||
% \end{enumerate}
|
||||
509
latex/appendix/summary_related_paper.tex
Normal file
509
latex/appendix/summary_related_paper.tex
Normal file
@@ -0,0 +1,509 @@
|
||||
\documentclass[12pt,a4paper]{report}
|
||||
\usepackage{hyperref}
|
||||
\usepackage[top=1cm,right=3cm,bottom=1cm,left=3cm]{geometry}
|
||||
\usepackage{multirow}
|
||||
\usepackage{array}
|
||||
% \usepackage{makecell}
|
||||
\usepackage{pdflscape}
|
||||
\usepackage{longtable,booktabs}
|
||||
\usepackage{colortbl,xcolor}
|
||||
\usepackage{enumitem}
|
||||
\usepackage{pdfpages}
|
||||
\usepackage{caption}
|
||||
\usepackage[bahasa]{babel}
|
||||
\usepackage{xpatch,csquotes}
|
||||
\usepackage[backend=biber]{biblatex}
|
||||
\addbibresource{export.bib}
|
||||
\DeclareSourcemap{
|
||||
\maps[datatype = bibtex]{
|
||||
\map{
|
||||
\step[fieldsource = abstract,
|
||||
match = \regexp{([^\\])\%},
|
||||
replace = \regexp{\$1\\\%}]
|
||||
}
|
||||
}
|
||||
}
|
||||
% \usepackage{tablefootnote}
|
||||
% \usepackage{showframe}
|
||||
\definecolor{Gray}{gray}{0.95}
|
||||
\newcolumntype{a}{>{\columncolor{Gray}}p}
|
||||
\renewcommand{\thefootnote}{\textit{\alph{footnote}}}
|
||||
% \newcolumntype{b}{>{\raggedright\arraybackslash}p}
|
||||
|
||||
\title{Tugas 2 \\ Metode Penelitian}
|
||||
\author{Rifqi Damar Panuluh \\ 20210110224}
|
||||
|
||||
\begin{document}
|
||||
\maketitle
|
||||
\begin{landscape}
|
||||
% Table generated by Excel2LaTeX from sheet 'Sheet1'
|
||||
% \begin{table}[h]
|
||||
\centering
|
||||
\begin{longtable}{
|
||||
>{\raggedleft\arraybackslash}p{0.02\linewidth} %1
|
||||
>{\raggedright\arraybackslash}a{0.1\linewidth} %2
|
||||
% >{\raggedright\arraybackslash}p{0.1\linewidth} %3
|
||||
% >{\raggedright\arraybackslash}a{0.075\linewidth} %4
|
||||
% p{0.065\linewidth} %5
|
||||
% >{\raggedleft\arraybackslash}p{0.05\linewidth} %6
|
||||
>{\raggedright\arraybackslash}p{0.25\linewidth} %7
|
||||
>{\raggedright\arraybackslash}a{0.25\linewidth} %8
|
||||
>{\raggedright\arraybackslash}p{0.25\linewidth} %9
|
||||
}
|
||||
|
||||
\caption{Tinjauan pustaka, topik: pemanfaatan data getaran untuk monitor kesehatan struktur jembatan}
|
||||
\label{tab:my_label}
|
||||
\\
|
||||
\toprule
|
||||
\toprule
|
||||
\rowcolor{white}
|
||||
No. %1
|
||||
&
|
||||
Judul %2
|
||||
% &
|
||||
% Nama Penulis %3
|
||||
% &
|
||||
% Nama Jurnal %4
|
||||
% &
|
||||
% Sumber %5
|
||||
% &
|
||||
% Tahun %6
|
||||
&
|
||||
Tujuan Penelitian %7
|
||||
&
|
||||
Kesimpulan %8
|
||||
&
|
||||
Gap Research %9
|
||||
|
||||
\\\midrule
|
||||
\endfirsthead
|
||||
\toprule
|
||||
\rowcolor{white}
|
||||
No. %1
|
||||
&
|
||||
Judul %2
|
||||
% &
|
||||
% Nama Penulis %3
|
||||
% &
|
||||
% Nama Jurnal %4
|
||||
% &
|
||||
% Sumber %5
|
||||
% &
|
||||
% Tahun %6
|
||||
&
|
||||
Tujuan Penelitian %7
|
||||
&
|
||||
Kesimpulan %8
|
||||
|
||||
\\\midrule
|
||||
\endhead
|
||||
\midrule
|
||||
\multicolumn{4}{r}{\textit{berlanjut di halaman berikutnya}}
|
||||
\endfoot
|
||||
\bottomrule
|
||||
\bottomrule
|
||||
\endlastfoot
|
||||
|
||||
%-----1
|
||||
\input{important/van2020}
|
||||
\\
|
||||
%-----2
|
||||
\input{important/abdeljaber2017}
|
||||
\\
|
||||
%------3
|
||||
\\
|
||||
3
|
||||
|
||||
& %Judul Jurnal
|
||||
Real-time nondestructive structural health monitoring using support vector machines and wavelets (Ahmet Bulut; Ambuj K. Singh; Peter Shin; Tony Fountain; Hector Jasso; Linjun Yan; Ahmed Elgamal)
|
||||
|
||||
%for mult rows
|
||||
|
||||
% & %Author
|
||||
% Ahmet Bulut; Ambuj K. Singh; Peter Shin; Tony Fountain; Hector Jasso; Linjun Yan; Ahmed Elgamal
|
||||
|
||||
%for mult rows
|
||||
|
||||
% & %Nama Jurnal
|
||||
% Case Studies in Construction Materials 13 (2020) e00406
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Sumber
|
||||
% SPIE
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Tahun
|
||||
% 2005
|
||||
|
||||
%for mult rows
|
||||
|
||||
& %Tujuan penelitian
|
||||
Eksplorasi efektivitas SVM dalam deteksi kerusakan; Validasi model SVM dengan data nyata jembatan
|
||||
|
||||
& %Kesimpulan
|
||||
\begin{enumerate} [series=enum]
|
||||
\item SVM menunjukkan akurasi tinggi dalam mengidentifikasi lokasi kerusakan
|
||||
\item Rekomendasi untuk penyetelan parameter SVM
|
||||
\end{enumerate}
|
||||
|
||||
|
||||
|
||||
|
||||
%-----------4
|
||||
\\
|
||||
4
|
||||
|
||||
& %Judul Jurnal
|
||||
A novel approach of Structural Health Monitoring by the application of FFT and wavelet transform using an index of frequency dispersion (Fragkiskos P. Pentaris; John Stonham; John P. Makris)
|
||||
|
||||
%for mult rows
|
||||
|
||||
% & %Author
|
||||
% Fragkiskos P. Pentaris; John Stonham; John P. Makris
|
||||
|
||||
%for mult rows
|
||||
|
||||
% & %Nama Jurnal
|
||||
% International Journal of Geology
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Sumber
|
||||
% Research Gate
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Tahun
|
||||
% 2013
|
||||
|
||||
%for mult rows
|
||||
|
||||
& %Tujuan penelitian
|
||||
\begin{enumerate}
|
||||
\item Memeriksa peran FFT dalam pemrosesan awal data getaran
|
||||
\item Menilai dampak FFT terhadap keakuratan deteksi kerusakan
|
||||
\end{enumerate}
|
||||
|
||||
& %Kesimpulan
|
||||
\begin{enumerate} [series=enum]
|
||||
\item FFT meningkatkan rasio \textit{signal-to-noise} dan meningkatkan deteksi kerusakan.
|
||||
\item Menyarankan integrasi dengan algoritme lain untuk meningkatkan akurasi.
|
||||
\end{enumerate}
|
||||
|
||||
\\ %-------------page break----------------
|
||||
|
||||
|
||||
|
||||
|
||||
%-----------4
|
||||
\\
|
||||
5
|
||||
|
||||
& %Judul Jurnal
|
||||
Review of Vibration-Based Structural Health Monitoring Using Deep Learning (Gyungmin Toh; Junhong Park)
|
||||
|
||||
%for mult rows
|
||||
|
||||
% & %Author
|
||||
% Gyungmin Toh;
|
||||
% Junhong Park
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Nama Jurnal
|
||||
% Apllied Sciences
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Sumber
|
||||
% MDPI
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Tahun
|
||||
% 2020
|
||||
|
||||
%for mult rows
|
||||
|
||||
& %Tujuan penelitian
|
||||
\begin{enumerate}
|
||||
\item ringkasan studi penerapan algoritma pembelajaran mesin untuk kesalahan pemantauan (\textit{monitoring}) menggunakan faktor getaran untuk mengkategorikan penelitian.
|
||||
\item Menyediakan interpretasi singkat tentang jaringan saraf dalam untuk pengaplikasian lebih lanjut dalam analisis getaran struktural.
|
||||
\end{enumerate}
|
||||
|
||||
& %Kesimpulan
|
||||
\begin{enumerate} [series=enum]
|
||||
\item Deep learning has the advantage of being able to perform health monitoring on complex structures with high accuracy.
|
||||
\end{enumerate}
|
||||
%-------------page break----------------
|
||||
|
||||
|
||||
|
||||
|
||||
%-----------4
|
||||
\\
|
||||
6
|
||||
|
||||
& %Judul Jurnal
|
||||
A deep learning approach to condition monitoring of cantilever beams via time-frequency extended signatures (Habil. Darian M. Onchis)
|
||||
|
||||
%for mult rows
|
||||
|
||||
% & %Author
|
||||
% Habil. Darian M. Onchis
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Nama Jurnal
|
||||
% Computers in Industry
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Sumber
|
||||
% Science Direct
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Tahun
|
||||
% 2019
|
||||
|
||||
%for mult rows
|
||||
|
||||
& %Tujuan penelitian
|
||||
\begin{enumerate}
|
||||
\item ringkasan studi penerapan algoritma pembelajaran mesin untuk kesalahan pemantauan (\textit{monitoring}) menggunakan faktor getaran untuk mengkategorikan penelitian.
|
||||
\item Menyediakan interpretasi singkat tentang jaringan saraf dalam untuk pengaplikasian lebih lanjut dalam analisis getaran struktural.
|
||||
\end{enumerate}
|
||||
|
||||
& %Kesimpulan
|
||||
\begin{enumerate} [series=enum]
|
||||
\item Deep learning has the advantage of being able to perform health monitoring on complex structures with high accuracy.
|
||||
\end{enumerate}
|
||||
|
||||
\\ %-------------page break----------------
|
||||
|
||||
|
||||
% %------------5
|
||||
% 5
|
||||
|
||||
% & %Judul Jurnal
|
||||
% Advances and development trends in eco-friendly pavements
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Author
|
||||
% Aimin Sha, Zhuangzhuang Liu, Wei Jiang, Lin Qi, Liqun Hu, Wenxiu Jiao ,Diego Maria Barbieri
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Nama Jurnal
|
||||
% Journal of Road Engineering 1 (2021)
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Sumber
|
||||
% ScienceDirect
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Tahun
|
||||
% 2021
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Tujuan penelitian
|
||||
% Mengembangkan solusi teknis untuk mengatasi tantangan yang terkait dengan penciptaan infrastruktur hijau dan berkelanjutan, misalnya, pengurangan dampak lingkungan, peningkatan keselamatan lalu lintas, dan efisiensi transportasi, dll.\cite{Sha2021}
|
||||
% &
|
||||
% \begin{enumerate} [series=enum]
|
||||
% \item Temuan penelitian terbaru terkait jalan ramah lingkungan
|
||||
% trotoar diringkas dan dibahas sesuai dengan enam kunci yang berbeda
|
||||
% karakteristik: permeabel, pengurangan kebisingan, luminescence diri, knalpot
|
||||
% dekomposisi, penyerapan panas rendah serta \textit{anti-icing} / \textit{de-icing}.\cite{Sha2021}
|
||||
% \end{enumerate}
|
||||
% \\
|
||||
% & %Judul Jurnal
|
||||
% Advances and development trends in eco-friendly pavements
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Author
|
||||
% Aimin Sha, Zhuangzhuang Liu, Wei Jiang, Lin Qi, Liqun Hu, Wenxiu Jiao ,Diego Maria Barbieri
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Nama Jurnal
|
||||
% Journal of Road Engineering 1 (2021)
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Sumber
|
||||
% ScienceDirect
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Tahun
|
||||
% 2021
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Tujuan penelitian
|
||||
% Mengembangkan solusi teknis untuk mengatasi tantangan yang terkait dengan penciptaan infrastruktur hijau dan berkelanjutan, misalnya, pengurangan dampak lingkungan, peningkatan keselamatan lalu lintas, dan efisiensi transportasi, dll.\cite{Sha2021}
|
||||
% &
|
||||
% \begin{enumerate}[resume=enum]
|
||||
% \item Teknologi ini dapat memecahkan beberapa tantangan utama yang terkait dengan konstruksi jalan dan lalu lintas (misalnya, kebisingan, efek pulau panas, dan pembangkitan polusi). Sebagian besar solusi saat ini hanya tersedia menampilkan satu fungsi ramah lingkungan pada satu waktu.\cite{Sha2021}
|
||||
% \end{enumerate}
|
||||
|
||||
% %-----------5
|
||||
% \\
|
||||
% 5
|
||||
|
||||
% & %Judul Jurnal
|
||||
% Micromobility injury events: Motor vehicle crashes and other transportation systems factors
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Author
|
||||
% Kevin Fang
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Nama Jurnal
|
||||
% Transportation Research Interdisciplinary Perspectives 14 (2022) 100574
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Sumber
|
||||
% ScienceDirect
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Tahun
|
||||
% 2022
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Tujuan penelitian
|
||||
% Menginformasikan transportasi strategi kebijakan untuk mencoba dan meningkatkan kinerja keselamatan, Dengan cara mengeksplorasi keadaan di mana cedera pengendara mikromobilitas mengalami cederanya, dengan fokus pada faktor-faktor yang berkaitan dengan sistem transportasi.\cite{Fang2022}
|
||||
% &
|
||||
% \begin{enumerate} [series=enum]
|
||||
% \item Kecelakaan kendaraan bermotor secara mengejutkan menjulang sebagai sesuatu yang kemungkinan adalah faktor umum dalam cedera mikromobilitas. Masalah perkerasan, konflik
|
||||
% dengan pengguna non-otomatis, dan medan juga muncul sebagai faktor cedera yang terukur.\cite{Fang2022}
|
||||
% \end{enumerate}
|
||||
% \\
|
||||
% & %Judul Jurnal
|
||||
% Micromobility injury events: Motor vehicle crashes and other transportation systems factors
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Author
|
||||
% Kevin Fang
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Nama Jurnal
|
||||
% Transportation Research Interdisciplinary Perspectives 14 (2022) 100574
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Sumber
|
||||
% ScienceDirect
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Tahun
|
||||
% 2022
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Tujuan penelitian
|
||||
% Menginformasikan transportasi strategi kebijakan untuk mencoba dan meningkatkan kinerja keselamatan, Dengan cara mengeksplorasi keadaan di mana cedera pengendara mikromobilitas mengalami cederanya, dengan fokus pada faktor-faktor yang berkaitan dengan sistem transportasi.\cite{Fang2022}
|
||||
% &
|
||||
% \begin{enumerate} [resume=enum]
|
||||
% \item Di antara faktor-faktor yang berhubungan dengan transportasi, analisis regresi
|
||||
% menunjukkan bahwa terluka dalam kecelakaan kendaraan bermotor atau di medan berbukit
|
||||
% sesuai dengan kemungkinan yang lebih besar dari rawat inap dan cedera kepala.\cite{Fang2022}
|
||||
% \end{enumerate}
|
||||
% \\
|
||||
% & %Judul Jurnal
|
||||
% Micromobility injury events: Motor vehicle crashes and other transportation systems factors
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Author
|
||||
% Kevin Fang
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Nama Jurnal
|
||||
% Transportation Research Interdisciplinary Perspectives 14 (2022) 100574
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Sumber
|
||||
% ScienceDirect
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Tahun
|
||||
% 2022
|
||||
|
||||
% %for mult rows
|
||||
|
||||
% & %Tujuan penelitian
|
||||
% Menginformasikan transportasi strategi kebijakan untuk mencoba dan meningkatkan kinerja keselamatan, Dengan cara mengeksplorasi keadaan di mana cedera pengendara mikromobilitas mengalami cederanya, dengan fokus pada faktor-faktor yang berkaitan dengan sistem transportasi.\cite{Fang2022}
|
||||
% &
|
||||
% \begin{enumerate} [resume=enum]
|
||||
% \item Mitigasi yang berhasil yang memaksimalkan kinerja mode keselamatan mikromobilitas dapat membantu menarik dan mempertahankan pengguna dan menjaga kepercayaan dari pembuat kebijakan yang peduli keselamatan.\cite{Fang2022}
|
||||
% \end{enumerate}
|
||||
% \end{tabular}
|
||||
\end{longtable}
|
||||
% \end{table}
|
||||
\end{landscape}
|
||||
\clearpage
|
||||
\pagenumbering{roman}
|
||||
\setcounter{page}{2}
|
||||
\thispagestyle{empty}
|
||||
\printbibliography
|
||||
|
||||
\clearpage
|
||||
\begin{titlepage}
|
||||
\
|
||||
\vfill
|
||||
\centering\noindent \Huge{LAMPIRAN}
|
||||
\vfill
|
||||
\
|
||||
\end{titlepage}
|
||||
|
||||
|
||||
% \clearpage
|
||||
% \thispagestyle{empty}
|
||||
% \centering
|
||||
% \frame{\includegraphics[page=1,scale=.7]{assets/1-s2.0-S2095756420300295-main.pdf}}
|
||||
% \captionof{figure}{Halaman pertama jurnal pertama}
|
||||
|
||||
% \clearpage
|
||||
% \thispagestyle{empty}
|
||||
% \centering
|
||||
% \frame{\includegraphics[page=1,scale=.7]{assets/1-s2.0-S2214509520300024-main.pdf}}
|
||||
% \captionof{figure}{Halaman pertama jurnal kedua}
|
||||
|
||||
% \clearpage
|
||||
% \thispagestyle{empty}
|
||||
% \centering
|
||||
% \frame{\includegraphics[page=1,scale=.7]{assets/1-s2.0-S2214509520300784-main.pdf}}
|
||||
% \captionof{figure}{Halaman pertama jurnal ketiga}
|
||||
|
||||
% \clearpage
|
||||
% \thispagestyle{empty}
|
||||
% \centering
|
||||
% \frame{\includegraphics[page=1,scale=.7]{assets/1-s2.0-S2097049821000044-main.pdf}}
|
||||
% \captionof{figure}{Halaman pertama jurnal keempat}
|
||||
|
||||
% \clearpage
|
||||
% \thispagestyle{empty}
|
||||
% \centering
|
||||
% \frame{\includegraphics[page=1,scale=.7]{assets/1-s2.0-S2590198222000379-main.pdf}}
|
||||
% \captionof{figure}{Halaman pertama jurnal kelima}
|
||||
\end{document}
|
||||
Reference in New Issue
Block a user