Compare commits
53 Commits
latex/fron
...
latex/75-e
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b491c7cf8b | ||
|
|
ddae7ef6a4 | ||
|
|
e13b8874c7 | ||
|
|
dc2f1eb649 | ||
|
|
0ccbb7c2b1 | ||
|
|
3700531c2f | ||
|
|
a16ebae170 | ||
|
|
f5dada1b9c | ||
|
|
37c9a0765a | ||
|
|
8656289a1c | ||
|
|
d6df4e5349 | ||
|
|
b5cfebf938 | ||
|
|
1387206c7e | ||
|
|
1914cc3bf7 | ||
|
|
cec43cb291 | ||
|
|
6bedf9e297 | ||
|
|
15fe8339ec | ||
|
|
983e9c5834 | ||
|
|
d743ba451e | ||
|
|
4a56579d31 | ||
|
|
305999ec40 | ||
|
|
3e9085c15d | ||
|
|
44210ef372 | ||
|
|
9192d4c81c | ||
|
|
0373743ca7 | ||
|
|
49d6395e6f | ||
|
|
bf9cca2d90 | ||
|
|
08420296e6 | ||
|
|
1540213eec | ||
|
|
6fd4b7465e | ||
|
|
85a0aebf36 | ||
|
|
8d1edfdbf7 | ||
|
|
ff862d9467 | ||
|
|
dfb64db1d8 | ||
|
|
dcb2e52a38 | ||
|
|
f21fd8d195 | ||
|
|
9b5b42a756 | ||
|
|
3e3de577ba | ||
|
|
76a09c0219 | ||
|
|
1a994fd59c | ||
|
|
cdb3010b78 | ||
|
|
1f8da59a6b | ||
|
|
b177dd04d8 | ||
|
|
c9f4447e62 | ||
|
|
a89d4caf75 | ||
|
|
8a3c1ae585 | ||
|
|
7b934d3fba | ||
|
|
7dbc5bba0f | ||
|
|
aaccad7ae8 | ||
|
|
2c453ec403 | ||
|
|
7da3179d08 | ||
|
|
254b24cb21 | ||
|
|
d151062115 |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,4 +1,5 @@
|
||||
# Ignore CSV files in the data directory and all its subdirectories
|
||||
data/**/*.csv
|
||||
.venv/
|
||||
*.pyc
|
||||
*.pyc
|
||||
*.egg-info/
|
||||
3
.vscode/settings.json
vendored
3
.vscode/settings.json
vendored
@@ -1,3 +1,4 @@
|
||||
{
|
||||
"python.analysis.extraPaths": ["./code/src/features"]
|
||||
"python.analysis.extraPaths": ["./code/src/features"],
|
||||
"jupyter.notebookFileRoot": "${workspaceFolder}/code"
|
||||
}
|
||||
|
||||
@@ -16,3 +16,8 @@ The repository is private and access is restricted only to those who have been g
|
||||
All contents of this repository, including the thesis idea, code, and associated data, are copyrighted © 2024 by Rifqi Panuluh. Unauthorized use or duplication is prohibited.
|
||||
|
||||
[LICENSE](https://github.com/nuluh/thesis?tab=License-1-ov-file#readme)
|
||||
|
||||
## How to Run `stft.ipynb`
|
||||
|
||||
1. run `pip install -e .` in root project first
|
||||
2. run the notebook
|
||||
@@ -155,7 +155,7 @@
|
||||
"import pandas as pd\n",
|
||||
"import numpy as np\n",
|
||||
"from scipy.signal import stft, hann\n",
|
||||
"from multiprocessing import Pool\n",
|
||||
"# from multiprocessing import Pool\n",
|
||||
"\n",
|
||||
"# Function to compute and append STFT data\n",
|
||||
"def process_stft(args):\n",
|
||||
@@ -321,9 +321,9 @@
|
||||
"source": [
|
||||
"import pandas as pd\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"ready_data1 = []\n",
|
||||
"ready_data1a = []\n",
|
||||
"for file in os.listdir('D:/thesis/data/converted/raw/sensor1'):\n",
|
||||
" ready_data1.append(pd.read_csv(os.path.join('D:/thesis/data/converted/raw/sensor1', file)))\n",
|
||||
" ready_data1a.append(pd.read_csv(os.path.join('D:/thesis/data/converted/raw/sensor1', file)))\n",
|
||||
"# colormesh give title x is frequency and y is time and rotate/transpose the data\n",
|
||||
"# Plotting the STFT Data"
|
||||
]
|
||||
@@ -334,8 +334,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ready_data1[0]\n",
|
||||
"plt.pcolormesh(ready_data1[0])"
|
||||
"# len(ready_data1a)\n",
|
||||
"# plt.pcolormesh(ready_data1[0])\n",
|
||||
"ready_data1a[0].max().max()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -345,7 +346,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for i in range(6):\n",
|
||||
" plt.pcolormesh(ready_data1[i])\n",
|
||||
" plt.pcolormesh(ready_data1a[i], cmap=\"jet\", vmax=0.03, vmin=0.0)\n",
|
||||
" plt.colorbar() \n",
|
||||
" plt.title(f'STFT Magnitude for case {i} sensor 1')\n",
|
||||
" plt.xlabel(f'Frequency [Hz]')\n",
|
||||
" plt.ylabel(f'Time [sec]')\n",
|
||||
@@ -358,9 +360,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ready_data2 = []\n",
|
||||
"ready_data2a = []\n",
|
||||
"for file in os.listdir('D:/thesis/data/converted/raw/sensor2'):\n",
|
||||
" ready_data2.append(pd.read_csv(os.path.join('D:/thesis/data/converted/raw/sensor2', file)))"
|
||||
" ready_data2a.append(pd.read_csv(os.path.join('D:/thesis/data/converted/raw/sensor2', file)))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -369,8 +371,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(len(ready_data1))\n",
|
||||
"print(len(ready_data2))"
|
||||
"print(len(ready_data1a))\n",
|
||||
"print(len(ready_data2a))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -379,10 +381,16 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"x1 = 0\n",
|
||||
"print(type(ready_data1[0]))\n",
|
||||
"ready_data1[0].iloc[:,0]\n",
|
||||
"# x1 = x1 + ready_data1[0].shape[0]"
|
||||
"x1a = 0\n",
|
||||
"print(type(ready_data1a[0]))\n",
|
||||
"ready_data1a[0].iloc[:,0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Checking length of the total array"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -391,16 +399,14 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"x1 = 0\n",
|
||||
"print(type(x1))\n",
|
||||
"for i in range(len(ready_data1)):\n",
|
||||
" # print(ready_data1[i].shape)\n",
|
||||
" # print(ready_data1[i].)\n",
|
||||
" print(type(ready_data1[i].shape[0]))\n",
|
||||
" x1 = x1 + ready_data1[i].shape[0]\n",
|
||||
" print(type(x1))\n",
|
||||
"x1a = 0\n",
|
||||
"print(type(x1a))\n",
|
||||
"for i in range(len(ready_data1a)):\n",
|
||||
" print(type(ready_data1a[i].shape[0]))\n",
|
||||
" x1a = x1a + ready_data1a[i].shape[0]\n",
|
||||
" print(type(x1a))\n",
|
||||
"\n",
|
||||
"print(x1)"
|
||||
"print(x1a)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -409,13 +415,20 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"x2 = 0\n",
|
||||
"x2a = 0\n",
|
||||
"\n",
|
||||
"for i in range(len(ready_data2)):\n",
|
||||
" print(ready_data2[i].shape)\n",
|
||||
" x2 = x2 + ready_data2[i].shape[0]\n",
|
||||
"for i in range(len(ready_data2a)):\n",
|
||||
" print(ready_data2a[i].shape)\n",
|
||||
" x2a = x2a + ready_data2a[i].shape[0]\n",
|
||||
"\n",
|
||||
"print(x2)"
|
||||
"print(x2a)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Flatten 6 array into one array"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -424,28 +437,22 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"x1 = ready_data1[0]\n",
|
||||
"# print(x1)\n",
|
||||
"print(type(x1))\n",
|
||||
"for i in range(len(ready_data1) - 1):\n",
|
||||
" #print(i)\n",
|
||||
" x1 = np.concatenate((x1, ready_data1[i + 1]), axis=0)\n",
|
||||
"# print(x1)\n",
|
||||
"pd.DataFrame(x1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"x2 = ready_data2[0]\n",
|
||||
"# Combine all dataframes in ready_data1a into a single dataframe\n",
|
||||
"if ready_data1a: # Check if the list is not empty\n",
|
||||
" # Use pandas concat function instead of iterative concatenation\n",
|
||||
" combined_data = pd.concat(ready_data1a, axis=0, ignore_index=True)\n",
|
||||
" \n",
|
||||
" print(f\"Type of combined data: {type(combined_data)}\")\n",
|
||||
" print(f\"Shape of combined data: {combined_data.shape}\")\n",
|
||||
" \n",
|
||||
" # Display the combined dataframe\n",
|
||||
" combined_data\n",
|
||||
"else:\n",
|
||||
" print(\"No data available in ready_data1a list\")\n",
|
||||
" combined_data = pd.DataFrame()\n",
|
||||
"\n",
|
||||
"for i in range(len(ready_data2) - 1):\n",
|
||||
" #print(i)\n",
|
||||
" x2 = np.concatenate((x2, ready_data2[i + 1]), axis=0)\n",
|
||||
"pd.DataFrame(x2)"
|
||||
"# Store the result in x1a for compatibility with subsequent code\n",
|
||||
"x1a = combined_data"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -454,20 +461,29 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(x1.shape)\n",
|
||||
"print(x2.shape)"
|
||||
"# Combine all dataframes in ready_data1a into a single dataframe\n",
|
||||
"if ready_data2a: # Check if the list is not empty\n",
|
||||
" # Use pandas concat function instead of iterative concatenation\n",
|
||||
" combined_data = pd.concat(ready_data2a, axis=0, ignore_index=True)\n",
|
||||
" \n",
|
||||
" print(f\"Type of combined data: {type(combined_data)}\")\n",
|
||||
" print(f\"Shape of combined data: {combined_data.shape}\")\n",
|
||||
" \n",
|
||||
" # Display the combined dataframe\n",
|
||||
" combined_data\n",
|
||||
"else:\n",
|
||||
" print(\"No data available in ready_data1a list\")\n",
|
||||
" combined_data = pd.DataFrame()\n",
|
||||
"\n",
|
||||
"# Store the result in x1a for compatibility with subsequent code\n",
|
||||
"x2a = combined_data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_1 = [1,1,1,1]\n",
|
||||
"y_2 = [0,1,1,1]\n",
|
||||
"y_3 = [1,0,1,1]\n",
|
||||
"y_4 = [1,1,0,0]"
|
||||
"### Creating the label"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -490,39 +506,41 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_data = [y_1, y_2, y_3, y_4, y_5, y_6]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for i in range(len(y_data)):\n",
|
||||
" print(ready_data1[i].shape[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for i in range(len(y_data)):\n",
|
||||
" y_data[i] = [y_data[i]]*ready_data1[i].shape[0]\n",
|
||||
" y_data[i] = np.array(y_data[i])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_data = [y_1, y_2, y_3, y_4, y_5, y_6]\n",
|
||||
"y_data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for i in range(len(y_data)):\n",
|
||||
" print(ready_data1a[i].shape[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"for i in range(len(y_data)):\n",
|
||||
" y_data[i] = [y_data[i]]*ready_data1a[i].shape[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"len(y_data[0])\n",
|
||||
"# y_data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -552,10 +570,10 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"from src.ml.model_selection import create_ready_data\n",
|
||||
"\n",
|
||||
"x_train1, x_test1, y_train, y_test = train_test_split(x1, y, test_size=0.2, random_state=2)\n",
|
||||
"x_train2, x_test2, y_train, y_test = train_test_split(x2, y, test_size=0.2, random_state=2)"
|
||||
"X1a, y = create_ready_data('D:/thesis/data/converted/raw/sensor1')\n",
|
||||
"X2a, y = create_ready_data('D:/thesis/data/converted/raw/sensor2')"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -565,6 +583,17 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"\n",
|
||||
"x_train1, x_test1, y_train, y_test = train_test_split(X1a, y, test_size=0.2, random_state=2)\n",
|
||||
"x_train2, x_test2, y_train, y_test = train_test_split(X2a, y, test_size=0.2, random_state=2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.metrics import accuracy_score\n",
|
||||
"from sklearn.ensemble import RandomForestClassifier, BaggingClassifier\n",
|
||||
"from sklearn.tree import DecisionTreeClassifier\n",
|
||||
@@ -592,130 +621,15 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"accuracies1 = []\n",
|
||||
"accuracies2 = []\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# 1. Random Forest\n",
|
||||
"rf_model = RandomForestClassifier()\n",
|
||||
"rf_model.fit(x_train1, y_train)\n",
|
||||
"rf_pred1 = rf_model.predict(x_test1)\n",
|
||||
"acc1 = accuracy_score(y_test, rf_pred1) * 100\n",
|
||||
"accuracies1.append(acc1)\n",
|
||||
"# format with color coded if acc1 > 90\n",
|
||||
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
|
||||
"print(\"Random Forest Accuracy for sensor 1:\", acc1)\n",
|
||||
"rf_model.fit(x_train2, y_train)\n",
|
||||
"rf_pred2 = rf_model.predict(x_test2)\n",
|
||||
"acc2 = accuracy_score(y_test, rf_pred2) * 100\n",
|
||||
"accuracies2.append(acc2)\n",
|
||||
"# format with color coded if acc2 > 90\n",
|
||||
"acc2 = f\"\\033[92m{acc2:.2f}\\033[00m\" if acc2 > 90 else f\"{acc2:.2f}\"\n",
|
||||
"print(\"Random Forest Accuracy for sensor 2:\", acc2)\n",
|
||||
"# print(rf_pred)\n",
|
||||
"# print(y_test)\n",
|
||||
"\n",
|
||||
"# 2. Bagged Trees\n",
|
||||
"bagged_model = BaggingClassifier(estimator=DecisionTreeClassifier(), n_estimators=10)\n",
|
||||
"bagged_model.fit(x_train1, y_train)\n",
|
||||
"bagged_pred1 = bagged_model.predict(x_test1)\n",
|
||||
"acc1 = accuracy_score(y_test, bagged_pred1) * 100\n",
|
||||
"accuracies1.append(acc1)\n",
|
||||
"# format with color coded if acc1 > 90\n",
|
||||
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
|
||||
"print(\"Bagged Trees Accuracy for sensor 1:\", acc1)\n",
|
||||
"bagged_model.fit(x_train2, y_train)\n",
|
||||
"bagged_pred2 = bagged_model.predict(x_test2)\n",
|
||||
"acc2 = accuracy_score(y_test, bagged_pred2) * 100\n",
|
||||
"accuracies2.append(acc2)\n",
|
||||
"# format with color coded if acc2 > 90\n",
|
||||
"acc2 = f\"\\033[92m{acc2:.2f}\\033[00m\" if acc2 > 90 else f\"{acc2:.2f}\"\n",
|
||||
"print(\"Bagged Trees Accuracy for sensor 2:\", acc2)\n",
|
||||
"\n",
|
||||
"# 3. Decision Tree\n",
|
||||
"dt_model = DecisionTreeClassifier()\n",
|
||||
"dt_model.fit(x_train1, y_train)\n",
|
||||
"dt_pred1 = dt_model.predict(x_test1)\n",
|
||||
"acc1 = accuracy_score(y_test, dt_pred1) * 100\n",
|
||||
"accuracies1.append(acc1)\n",
|
||||
"# format with color coded if acc1 > 90\n",
|
||||
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
|
||||
"print(\"Decision Tree Accuracy for sensor 1:\", acc1)\n",
|
||||
"dt_model.fit(x_train2, y_train)\n",
|
||||
"dt_pred2 = dt_model.predict(x_test2)\n",
|
||||
"acc2 = accuracy_score(y_test, dt_pred2) * 100\n",
|
||||
"accuracies2.append(acc2)\n",
|
||||
"# format with color coded if acc2 > 90\n",
|
||||
"acc2 = f\"\\033[92m{acc2:.2f}\\033[00m\" if acc2 > 90 else f\"{acc2:.2f}\"\n",
|
||||
"print(\"Decision Tree Accuracy for sensor 2:\", acc2)\n",
|
||||
"\n",
|
||||
"# 4. KNeighbors\n",
|
||||
"knn_model = KNeighborsClassifier()\n",
|
||||
"knn_model.fit(x_train1, y_train)\n",
|
||||
"knn_pred1 = knn_model.predict(x_test1)\n",
|
||||
"acc1 = accuracy_score(y_test, knn_pred1) * 100\n",
|
||||
"accuracies1.append(acc1)\n",
|
||||
"# format with color coded if acc1 > 90\n",
|
||||
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
|
||||
"print(\"KNeighbors Accuracy for sensor 1:\", acc1)\n",
|
||||
"knn_model.fit(x_train2, y_train)\n",
|
||||
"knn_pred2 = knn_model.predict(x_test2)\n",
|
||||
"acc2 = accuracy_score(y_test, knn_pred2) * 100\n",
|
||||
"accuracies2.append(acc2)\n",
|
||||
"# format with color coded if acc2 > 90\n",
|
||||
"acc2 = f\"\\033[92m{acc2:.2f}\\033[00m\" if acc2 > 90 else f\"{acc2:.2f}\"\n",
|
||||
"print(\"KNeighbors Accuracy for sensor 2:\", acc2)\n",
|
||||
"\n",
|
||||
"# 5. Linear Discriminant Analysis\n",
|
||||
"lda_model = LinearDiscriminantAnalysis()\n",
|
||||
"lda_model.fit(x_train1, y_train)\n",
|
||||
"lda_pred1 = lda_model.predict(x_test1)\n",
|
||||
"acc1 = accuracy_score(y_test, lda_pred1) * 100\n",
|
||||
"accuracies1.append(acc1)\n",
|
||||
"# format with color coded if acc1 > 90\n",
|
||||
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
|
||||
"print(\"Linear Discriminant Analysis Accuracy for sensor 1:\", acc1)\n",
|
||||
"lda_model.fit(x_train2, y_train)\n",
|
||||
"lda_pred2 = lda_model.predict(x_test2)\n",
|
||||
"acc2 = accuracy_score(y_test, lda_pred2) * 100\n",
|
||||
"accuracies2.append(acc2)\n",
|
||||
"# format with color coded if acc2 > 90\n",
|
||||
"acc2 = f\"\\033[92m{acc2:.2f}\\033[00m\" if acc2 > 90 else f\"{acc2:.2f}\"\n",
|
||||
"print(\"Linear Discriminant Analysis Accuracy for sensor 2:\", acc2)\n",
|
||||
"\n",
|
||||
"# 6. Support Vector Machine\n",
|
||||
"svm_model = SVC()\n",
|
||||
"svm_model.fit(x_train1, y_train)\n",
|
||||
"svm_pred1 = svm_model.predict(x_test1)\n",
|
||||
"acc1 = accuracy_score(y_test, svm_pred1) * 100\n",
|
||||
"accuracies1.append(acc1)\n",
|
||||
"# format with color coded if acc1 > 90\n",
|
||||
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
|
||||
"print(\"Support Vector Machine Accuracy for sensor 1:\", acc1)\n",
|
||||
"svm_model.fit(x_train2, y_train)\n",
|
||||
"svm_pred2 = svm_model.predict(x_test2)\n",
|
||||
"acc2 = accuracy_score(y_test, svm_pred2) * 100\n",
|
||||
"accuracies2.append(acc2)\n",
|
||||
"# format with color coded if acc2 > 90\n",
|
||||
"acc2 = f\"\\033[92m{acc2:.2f}\\033[00m\" if acc2 > 90 else f\"{acc2:.2f}\"\n",
|
||||
"print(\"Support Vector Machine Accuracy for sensor 2:\", acc2)\n",
|
||||
"\n",
|
||||
"# 7. XGBoost\n",
|
||||
"xgboost_model = XGBClassifier()\n",
|
||||
"xgboost_model.fit(x_train1, y_train)\n",
|
||||
"xgboost_pred1 = xgboost_model.predict(x_test1)\n",
|
||||
"acc1 = accuracy_score(y_test, xgboost_pred1) * 100\n",
|
||||
"accuracies1.append(acc1)\n",
|
||||
"# format with color coded if acc1 > 90\n",
|
||||
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
|
||||
"print(\"XGBoost Accuracy:\", acc1)\n",
|
||||
"xgboost_model.fit(x_train2, y_train)\n",
|
||||
"xgboost_pred2 = xgboost_model.predict(x_test2)\n",
|
||||
"acc2 = accuracy_score(y_test, xgboost_pred2) * 100\n",
|
||||
"accuracies2.append(acc2)\n",
|
||||
"# format with color coded if acc2 > 90\n",
|
||||
"acc2 = f\"\\033[92m{acc2:.2f}\\033[00m\" if acc2 > 90 else f\"{acc2:.2f}\"\n",
|
||||
"print(\"XGBoost Accuracy:\", acc2)"
|
||||
"def train_and_evaluate_model(model, model_name, sensor_label, x_train, y_train, x_test, y_test):\n",
|
||||
" model.fit(x_train, y_train)\n",
|
||||
" y_pred = model.predict(x_test)\n",
|
||||
" accuracy = accuracy_score(y_test, y_pred) * 100\n",
|
||||
" return {\n",
|
||||
" \"model\": model_name,\n",
|
||||
" \"sensor\": sensor_label,\n",
|
||||
" \"accuracy\": accuracy\n",
|
||||
" }"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -724,8 +638,59 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(accuracies1)\n",
|
||||
"print(accuracies2)"
|
||||
"# Define models for sensor1\n",
|
||||
"models_sensor1 = {\n",
|
||||
" # \"Random Forest\": RandomForestClassifier(),\n",
|
||||
" # \"Bagged Trees\": BaggingClassifier(estimator=DecisionTreeClassifier(), n_estimators=10),\n",
|
||||
" # \"Decision Tree\": DecisionTreeClassifier(),\n",
|
||||
" # \"KNN\": KNeighborsClassifier(),\n",
|
||||
" # \"LDA\": LinearDiscriminantAnalysis(),\n",
|
||||
" \"SVM\": SVC(),\n",
|
||||
" \"XGBoost\": XGBClassifier()\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"results_sensor1 = []\n",
|
||||
"for name, model in models_sensor1.items():\n",
|
||||
" res = train_and_evaluate_model(model, name, \"sensor1\", x_train1, y_train, x_test1, y_test)\n",
|
||||
" results_sensor1.append(res)\n",
|
||||
" print(f\"{name} on sensor1: Accuracy = {res['accuracy']:.2f}%\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"models_sensor2 = {\n",
|
||||
" # \"Random Forest\": RandomForestClassifier(),\n",
|
||||
" # \"Bagged Trees\": BaggingClassifier(estimator=DecisionTreeClassifier(), n_estimators=10),\n",
|
||||
" # \"Decision Tree\": DecisionTreeClassifier(),\n",
|
||||
" # \"KNN\": KNeighborsClassifier(),\n",
|
||||
" # \"LDA\": LinearDiscriminantAnalysis(),\n",
|
||||
" \"SVM\": SVC(),\n",
|
||||
" \"XGBoost\": XGBClassifier()\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"results_sensor2 = []\n",
|
||||
"for name, model in models_sensor2.items():\n",
|
||||
" res = train_and_evaluate_model(model, name, \"sensor2\", x_train2, y_train, x_test2, y_test)\n",
|
||||
" results_sensor2.append(res)\n",
|
||||
" print(f\"{name} on sensor2: Accuracy = {res['accuracy']:.2f}%\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"all_results = {\n",
|
||||
" \"sensor1\": results_sensor1,\n",
|
||||
" \"sensor2\": results_sensor2\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"print(all_results)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -737,36 +702,48 @@
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"\n",
|
||||
"models = [rf_model, bagged_model, dt_model, knn_model, lda_model, svm_model, xgboost_model]\n",
|
||||
"model_names = [\"Random Forest\", \"Bagged Trees\", \"Decision Tree\", \"KNN\", \"LDA\", \"SVM\", \"XGBoost\"]\n",
|
||||
"def prepare_plot_data(results_dict):\n",
|
||||
" # Gather unique model names\n",
|
||||
" models_set = {entry['model'] for sensor in results_dict.values() for entry in sensor}\n",
|
||||
" models = sorted(list(models_set))\n",
|
||||
" \n",
|
||||
" # Create dictionaries mapping sensor -> accuracy list ordered by model name\n",
|
||||
" sensor_accuracies = {}\n",
|
||||
" for sensor, entries in results_dict.items():\n",
|
||||
" # Build a mapping: model -> accuracy for the given sensor\n",
|
||||
" mapping = {entry['model']: entry['accuracy'] for entry in entries}\n",
|
||||
" # Order the accuracies consistent with the sorted model names\n",
|
||||
" sensor_accuracies[sensor] = [mapping.get(model, 0) for model in models]\n",
|
||||
" \n",
|
||||
" return models, sensor_accuracies\n",
|
||||
"\n",
|
||||
"bar_width = 0.35 # Width of each bar\n",
|
||||
"index = np.arange(len(model_names)) # Index for the bars\n",
|
||||
"def plot_accuracies(models, sensor_accuracies):\n",
|
||||
" bar_width = 0.35\n",
|
||||
" x = np.arange(len(models))\n",
|
||||
" sensors = list(sensor_accuracies.keys())\n",
|
||||
" \n",
|
||||
" plt.figure(figsize=(10, 6))\n",
|
||||
" # Assume two sensors for plotting grouped bars\n",
|
||||
" plt.bar(x - bar_width/2, sensor_accuracies[sensors[0]], width=bar_width, color='blue', label=sensors[0])\n",
|
||||
" plt.bar(x + bar_width/2, sensor_accuracies[sensors[1]], width=bar_width, color='orange', label=sensors[1])\n",
|
||||
" \n",
|
||||
" # Add text labels on top of bars\n",
|
||||
" for i, (a1, a2) in enumerate(zip(sensor_accuracies[sensors[0]], sensor_accuracies[sensors[1]])):\n",
|
||||
" plt.text(x[i] - bar_width/2, a1 + 0.1, f\"{a1:.2f}%\", ha='center', va='bottom', color='black')\n",
|
||||
" plt.text(x[i] + bar_width/2, a2 + 0.1, f\"{a2:.2f}%\", ha='center', va='bottom', color='black')\n",
|
||||
" \n",
|
||||
" plt.xlabel('Model Name')\n",
|
||||
" plt.ylabel('Accuracy (%)')\n",
|
||||
" plt.title('Accuracy of Classifiers for Each Sensor')\n",
|
||||
" plt.xticks(x, models)\n",
|
||||
" plt.legend()\n",
|
||||
" plt.ylim(0, 105)\n",
|
||||
" plt.tight_layout()\n",
|
||||
" plt.show()\n",
|
||||
"\n",
|
||||
"# Plotting the bar graph\n",
|
||||
"plt.figure(figsize=(14, 8))\n",
|
||||
"\n",
|
||||
"# Bar plot for Sensor 1\n",
|
||||
"plt.bar(index, accuracies1, width=bar_width, color='blue', label='Sensor 1')\n",
|
||||
"\n",
|
||||
"# Bar plot for Sensor 2\n",
|
||||
"plt.bar(index + bar_width, accuracies2, width=bar_width, color='orange', label='Sensor 2')\n",
|
||||
"\n",
|
||||
"# Add values on top of each bar\n",
|
||||
"for i, acc1, acc2 in zip(index, accuracies1, accuracies2):\n",
|
||||
" plt.text(i, acc1 + .1, f'{acc1:.2f}%', ha='center', va='bottom', color='black')\n",
|
||||
" plt.text(i + bar_width, acc2 + 1, f'{acc2:.2f}%', ha='center', va='bottom', color='black')\n",
|
||||
"\n",
|
||||
"# Customize the plot\n",
|
||||
"plt.xlabel('Model Name →')\n",
|
||||
"plt.ylabel('Accuracy →')\n",
|
||||
"plt.title('Accuracy of classifiers for Sensors 1 and 2 with 513 features')\n",
|
||||
"plt.xticks(index + bar_width / 2, model_names) # Set x-tick positions\n",
|
||||
"plt.legend()\n",
|
||||
"plt.ylim(0, 100)\n",
|
||||
"\n",
|
||||
"# Show the plot\n",
|
||||
"plt.show()\n"
|
||||
"# Use the functions\n",
|
||||
"models, sensor_accuracies = prepare_plot_data(all_results)\n",
|
||||
"plot_accuracies(models, sensor_accuracies)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -787,51 +764,10 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def spectograph(data_dir: str):\n",
|
||||
" # print(os.listdir(data_dir))\n",
|
||||
" for damage in os.listdir(data_dir):\n",
|
||||
" # print(damage)\n",
|
||||
" d = os.path.join(data_dir, damage)\n",
|
||||
" # print(d)\n",
|
||||
" for file in os.listdir(d):\n",
|
||||
" # print(file)\n",
|
||||
" f = os.path.join(d, file)\n",
|
||||
" print(f)\n",
|
||||
" # sensor1 = pd.read_csv(f, skiprows=1, sep=';')\n",
|
||||
" # sensor2 = pd.read_csv(f, skiprows=1, sep=';')\n",
|
||||
"from src.ml.model_selection import create_ready_data\n",
|
||||
"\n",
|
||||
" # df1 = pd.DataFrame()\n",
|
||||
"\n",
|
||||
" # df1['s1'] = sensor1[sensor1.columns[-1]]\n",
|
||||
" # df1['s2'] = sensor2[sensor2.columns[-1]]\n",
|
||||
"ed\n",
|
||||
" # # Combined Plot for sensor 1 and sensor 2 from data1 file in which motor is operated at 800 rpm\n",
|
||||
"\n",
|
||||
" # plt.plot(df1['s2'], label='sensor 2')\n",
|
||||
" # plt.plot(df1['s1'], label='sensor 1')\n",
|
||||
" # plt.xlabel(\"Number of samples\")\n",
|
||||
" # plt.ylabel(\"Amplitude\")\n",
|
||||
" # plt.title(\"Raw vibration signal\")\n",
|
||||
" # plt.legend()\n",
|
||||
" # plt.show()\n",
|
||||
"\n",
|
||||
" # from scipy import signal\n",
|
||||
" # from scipy.signal.windows import hann\n",
|
||||
"\n",
|
||||
" # vibration_data = df1['s1']\n",
|
||||
"\n",
|
||||
" # # Applying STFT\n",
|
||||
" # window_size = 1024\n",
|
||||
" # hop_size = 512\n",
|
||||
" # window = hann(window_size) # Creating a Hanning window\n",
|
||||
" # frequencies, times, Zxx = signal.stft(vibration_data, window=window, nperseg=window_size, noverlap=window_size - hop_size)\n",
|
||||
"\n",
|
||||
" # # Plotting the STFT Data\n",
|
||||
" # plt.pcolormesh(times, frequencies, np.abs(Zxx), shading='gouraud')\n",
|
||||
" # plt.title(f'STFT Magnitude for case 1 signal sensor 1 ')\n",
|
||||
" # plt.ylabel('Frequency [Hz]')\n",
|
||||
" # plt.xlabel('Time [sec]')\n",
|
||||
" # plt.show()"
|
||||
"X1b, y = create_ready_data('D:/thesis/data/converted/raw_B/sensor1')\n",
|
||||
"X2b, y = create_ready_data('D:/thesis/data/converted/raw_B/sensor2')"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -840,7 +776,115 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"spectograph('D:/thesis/data/converted/raw')"
|
||||
"y.shape"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.metrics import accuracy_score, classification_report\n",
|
||||
"# 4. Validate on Dataset B\n",
|
||||
"y_pred_svm = svm_model.predict(X1b)\n",
|
||||
"\n",
|
||||
"# 5. Evaluate\n",
|
||||
"print(\"Accuracy on Dataset B:\", accuracy_score(y, y_pred_svm))\n",
|
||||
"print(classification_report(y, y_pred_svm))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.metrics import accuracy_score, classification_report\n",
|
||||
"# 4. Validate on Dataset B\n",
|
||||
"y_pred = rf_model2.predict(X2b)\n",
|
||||
"\n",
|
||||
"# 5. Evaluate\n",
|
||||
"print(\"Accuracy on Dataset B:\", accuracy_score(y, y_pred))\n",
|
||||
"print(classification_report(y, y_pred))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_predict = svm_model2.predict(X2b.iloc[[5312],:])\n",
|
||||
"print(y_predict)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y[5312]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Confusion Matrix"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"cm = confusion_matrix(y, y_pred_svm) # -> ndarray\n",
|
||||
"\n",
|
||||
"# get the class labels\n",
|
||||
"labels = svm_model.classes_\n",
|
||||
"\n",
|
||||
"# Plot\n",
|
||||
"disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=labels)\n",
|
||||
"disp.plot(cmap=plt.cm.Blues) # You can change colormap\n",
|
||||
"plt.title(\"SVM Sensor1 CM Train w/ Dataset A Val w/ Dataset B\")\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Self-test CM"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# 1. Predict sensor 1 on Dataset A\n",
|
||||
"y_train_pred = svm_model.predict(x_train1)\n",
|
||||
"\n",
|
||||
"# 2. Import confusion matrix tools\n",
|
||||
"from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"\n",
|
||||
"# 3. Create and plot confusion matrix\n",
|
||||
"cm_train = confusion_matrix(y_train, y_train_pred)\n",
|
||||
"labels = svm_model.classes_\n",
|
||||
"\n",
|
||||
"disp = ConfusionMatrixDisplay(confusion_matrix=cm_train, display_labels=labels)\n",
|
||||
"disp.plot(cmap=plt.cm.Blues)\n",
|
||||
"plt.title(\"Confusion Matrix: Train & Test on Dataset A\")\n",
|
||||
"plt.show()\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
57
code/src/ml/model_selection.py
Normal file
57
code/src/ml/model_selection.py
Normal file
@@ -0,0 +1,57 @@
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import os
|
||||
from sklearn.model_selection import train_test_split as sklearn_split
|
||||
|
||||
|
||||
def create_ready_data(
|
||||
stft_data_path: str,
|
||||
stratify: np.ndarray = None,
|
||||
) -> tuple:
|
||||
"""
|
||||
Create a stratified train-test split from STFT data.
|
||||
|
||||
Parameters:
|
||||
-----------
|
||||
stft_data_path : str
|
||||
Path to the directory containing STFT data files (e.g. 'data/converted/raw/sensor1')
|
||||
stratify : np.ndarray, optional
|
||||
Labels to use for stratified sampling
|
||||
|
||||
Returns:
|
||||
--------
|
||||
tuple
|
||||
(X_train, X_test, y_train, y_test) - Split datasets
|
||||
"""
|
||||
ready_data = []
|
||||
for file in os.listdir(stft_data_path):
|
||||
ready_data.append(pd.read_csv(os.path.join(stft_data_path, file)))
|
||||
|
||||
y_data = [i for i in range(len(ready_data))]
|
||||
|
||||
# Combine all dataframes in ready_data into a single dataframe
|
||||
if ready_data: # Check if the list is not empty
|
||||
# Use pandas concat function instead of iterative concatenation
|
||||
combined_data = pd.concat(ready_data, axis=0, ignore_index=True)
|
||||
|
||||
print(f"Type of combined data: {type(combined_data)}")
|
||||
print(f"Shape of combined data: {combined_data.shape}")
|
||||
else:
|
||||
print("No data available in ready_data list")
|
||||
combined_data = pd.DataFrame()
|
||||
|
||||
# Store the result in x1a for compatibility with subsequent code
|
||||
X = combined_data
|
||||
|
||||
for i in range(len(y_data)):
|
||||
y_data[i] = [y_data[i]] * ready_data[i].shape[0]
|
||||
y_data[i] = np.array(y_data[i])
|
||||
|
||||
if y_data:
|
||||
# Use numpy concatenate function instead of iterative concatenation
|
||||
y = np.concatenate(y_data, axis=0)
|
||||
else:
|
||||
print("No labels available in y_data list")
|
||||
y = np.array([])
|
||||
|
||||
return X, y
|
||||
@@ -962,4 +962,16 @@
|
||||
|
||||
@thesis{zotero-622,
|
||||
type = {thesis}
|
||||
}
|
||||
}
|
||||
|
||||
@thesis{rytter1993,
|
||||
title = {Vibrational {{Based Inspection}} of {{Civil Engineering Structures}}},
|
||||
author = {Rytter, Anders},
|
||||
date = {1993},
|
||||
institution = {Aalborg University},
|
||||
location = {Aalborg},
|
||||
url = {https://vbn.aau.dk/en/publications/vibrational-based-inspection-of-civil-engineering-structures},
|
||||
abstract = {The thesis has been written in relation to two different research projects. Firstly, an offshore test programme, Integrated Experimental/Numerical Analysis of the Dynamic behavior of offshore structures, which was performed at the department of Building Technology and Structural Engineering at the University of Aalborg from 1988 to 1991. Secondly, a research project, In-Field Vibration Based Inspection of Civil Engineering Structures, which has been performed as a pilot project by the Consulting Engineers Rambøll, Hannemann and Højlund in cooperation with the department of Building Technology and Structural Engineering at the University of Aalborg since the beginning of 1992. Both projects have been supported by the Danish Technical Research Council. Further, the first mentioned project was supported by the Danish Energy Agency. Their financial support is gratefully acknowledged.},
|
||||
langid = {english},
|
||||
keywords = {Beam,Bridges,Cracks,Damping,Offshore Platform,Piles,Structural Damage,VBI,Vibration Based Inspection}
|
||||
}
|
||||
|
||||
@@ -1,25 +1,68 @@
|
||||
\chapter{PENDAHULUAN}
|
||||
|
||||
\section{Latar Belakang}
|
||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nunc consequat lectus dolor, a commodo odio suscipit nec. Aliquam posuere elit eget tellus dapibus, auctor ornare mi porttitor. Donec auctor aliquet nisl, quis convallis ligula rutrum id. Duis tortor ipsum, scelerisque vestibulum viverra eu, maximus vel mi. Nullam volutpat nunc et varius tempor. Vivamus convallis mi eros, aliquam semper dui tincidunt a. Morbi nunc dui, accumsan ac arcu nec, condimentum efficitur mauris. Etiam sed mauris semper, volutpat justo eu, placerat mauris. Suspendisse at erat eu arcu gravida mattis et id nunc. Aliquam malesuada magna odio, ac dictum erat vestibulum a. Mauris vel nisi sit amet elit tempor bibendum sit amet a velit. Morbi dignissim facilisis placerat.\par
|
||||
|
||||
\begin{figure}
|
||||
\centering
|
||||
\includegraphics[width=0.5\linewidth]{frontmatter/img/slice1.jpg}
|
||||
\caption{Enter Caption}
|
||||
\label{fig:enter-label}
|
||||
\end{figure}
|
||||
\indent Monitor Kesehatan Struktur (\textit{Structural Health Monitoring} atau SHM) merupakan . Salah satu komponen struktural yang umum digunakan dalam penyambungan adalah sambungan baut (\textit{bolt joint}), yang dikenal karena kemudahan dalam perakitan dan penggunaan ulang. Namun demikian, sambungan berulir ini rentan mengalami kelonggaran akibat beban kejut atau getaran terus-menerus \parencite{chen2017}. Kelonggaran baut yang tidak terdeteksi sejak dini dapat menyebabkan kerusakan serius pada struktur, sehingga identifikasi dini terhadap kerusakan sambungan baut menjadi krusial dalam bidang teknik sipil, mesin, dan kedirgantaraan.
|
||||
|
||||
Pellentesque vel accumsan lorem, id vulputate metus. Nulla mollis orci ante, et euismod erat venenatis eget. Proin tempus lobortis feugiat. Fusce vitae sem quis lacus iaculis dignissim ut eget turpis. Vivamus ut nisl in enim porttitor fringilla vel et mauris. Mauris quis porttitor magna. Pellentesque molestie viverra arcu at tincidunt. Maecenas non elit arcu.\par
|
||||
\indent Deteksi kelonggaran baut telah dilakukan melalui berbagai metode. Kelompok pertama adalah inspeksi \textit{in-situ}, seperti inspeksi visual atau penggunaan alat mekanis seperti kunci torsi dan palu. Meskipun sederhana dan murah, metode ini sulit untuk mendeteksi kerusakan pada tahap awal \parencite{j.h.park2015}. Metode palu lebih efektif dibanding visual untuk mendeteksi awal kelonggaran, tetapi akurasinya dapat terganggu oleh kebisingan lingkungan, serta memakan waktu bila diaplikasikan pada struktur dengan banyak sambungan seperti jembatan \parencite{j.h.park2015,wang2013}.
|
||||
|
||||
Etiam feugiat enim sit amet tortor interdum lobortis. Curabitur elementum faucibus sapien. Morbi eget facilisis lorem. In sed suscipit metus. Etiam porttitor, libero sit amet sodales hendrerit, libero dolor hendrerit nulla, sed convallis risus leo posuere metus. Cras gravida ac elit viverra ultrices. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia curae; Maecenas dictum urna elit, nec eleifend nulla mattis sit amet. Pellentesque suscipit metus vitae leo suscipit, a vehicula quam pretium. Sed eu est ut risus convallis hendrerit a vulputate justo. Nulla sollicitudin quam ut risus euismod, quis consequat dui mattis. Mauris id eros varius, pellentesque quam quis, venenatis tellus. Nulla vitae condimentum nisl. Vestibulum suscipit scelerisque dui, non posuere purus finibus nec. Nulla ultrices felis quis vestibulum porta. Suspendisse potenti.\par
|
||||
\indent Kelompok kedua menggunakan teknik berbasis penglihatan komputer seperti kamera dan pencitraan digital, termasuk deteksi rotasi kepala baut menggunakan CNN dan Faster R-CNN \parencite{zhang2020,zhao2019}. Meskipun teknik ini dapat mendeteksi kerusakan secara visual tanpa dipengaruhi oleh kebisingan akustik, tantangan tetap ada dalam hal penempatan kamera dan beban komputasi tinggi dari model deep learning, terutama dalam kondisi sempit seperti mesin kendaraan atau turbin.
|
||||
|
||||
Nam tempus tincidunt interdum. Pellentesque at ligula ac massa semper efficitur vitae non ante. Suspendisse potenti. Cras vitae interdum erat, nec facilisis urna. Nulla commodo porttitor tellus non posuere. Vestibulum tristique ut urna quis porttitor. Sed pellentesque lectus sit amet ultrices aliquam. Aliquam erat volutpat. Nam dictum eu erat a mollis. Donec eget nulla vel risus aliquet suscipit sed at libero.\par
|
||||
\indent Kelompok ketiga dan yang menjadi fokus penelitian ini adalah teknik berbasis sensor, terutama pendekatan berbasis getaran (\textit{vibration-based}). Metode ini tidak hanya efektif dalam mengatasi keterbatasan teknik sebelumnya, tetapi juga mampu mendeteksi kelonggaran baut pada tahap awal secara andal dan akurat \parencite{nichols2004,razi2013}. Dalam penelitian ini, deteksi dilakukan melalui data akselerasi struktur yang diambil dari titik-titik sambungan dalam \textit{sistem grid} yang mewakili koneksi baut secara arah kolom.
|
||||
|
||||
\indent Pada penelitian sebelumnya oleh \textcite{abdeljaber2017}, deteksi kerusakan struktur menggunakan 1-D Convolutional Neural Network (1-D CNN) telah diterapkan secara efektif pada struktur grid dengan 30 titik sensor. Namun, keterbatasan tetap muncul dalam hal kebutuhan sumber daya komputasi yang tinggi ketika memproses data mentah berdimensi besar dari semua sensor secara simultan \parencite{yang2020, liu2022}. Beberapa studi menyarankan bahwa transformasi sinyal seperti STFT dapat digunakan sebagai alternatif ekstraksi fitur sebelum dilakukan klasifikasi \parencite{shahid2022}. Pendekatan ini tidak hanya mengurangi kompleksitas perhitungan tetapi juga dapat mempertahankan karakteristik penting dari sinyal yang tereduksi.
|
||||
|
||||
\indent Oleh karena itu, penelitian ini mengadopsi pendekatan pengurangan jumlah sensor menjadi hanya dua per jalur kolom (atas dan bawah), merepresentasikan sambungan vertikal seperti susunan baut, dengan tujuan menyederhanakan model tanpa kehilangan akurasi deteksi kerusakan. Data diproses melalui transformasi STFT sebelum diklasifikasikan menggunakan model algoritma pembelajaran mesin klasik. Dengan mengevaluasi berbagai pengklasifikasi dan validasi silang antar kolom, studi ini berkontribusi dalam menciptakan sistem SHM yang efisien, rendah biaya, dan mudah diimplementasikan.
|
||||
|
||||
|
||||
\section{Rumusan Masalah}
|
||||
Untuk memandu arah penelitian ini, beberapa permasalahan utama yang akan dibahas adalah sebagai berikut:
|
||||
|
||||
Maecenas hendrerit pharetra bibendum. Donec ut tortor ac augue aliquam ullamcorper nec id eros. Quisque consectetur elementum ipsum vitae posuere. Sed ultricies ipsum nibh, vitae volutpat neque bibendum at. Morbi dictum metus eu bibendum malesuada. Nam scelerisque purus erat, id dictum nisl pretium vitae. Curabitur finibus commodo dui ac molestie. In sed sem ac dui dapibus ullamcorper. Aenean molestie nulla eu lorem maximus hendrerit. Vivamus viverra velit dolor, in vehicula eros facilisis at. Vivamus in rhoncus sem.
|
||||
\begin{enumerate}
|
||||
\item Apakah sinyal getaran yang hanya diperoleh dari sensor pada bagian atas dan bawah suatu jalur kolom masih mampu merepresentasikan fitur-fitur penting yang diperlukan untuk mengklasifikasikan kerusakan struktur secara akurat?
|
||||
|
||||
\item Apakah penggabungan data dari beberapa jalur kolom dapat meningkatkan kemampuan generalisasi model, meskipun jumlah sensor pada tiap jalur dibatasi?
|
||||
|
||||
\item Apakah algoritma pemelajaran mesin klasik yang sederhana masih mampu menghasilkan model dengan kinerja yang cukup layak dibandingkan dengan model \textit{supervised} yang lebih kompleks ketika diterapkan pada skenario dengan input data sensor yang terbatas?
|
||||
\end{enumerate}
|
||||
% \section{Identifikasi Masalah}
|
||||
% \begin{itemize}
|
||||
% \item Kebanyakan kerangka kerja pada monitoring kesehatan struktur membutuhkan deretan sensor yang banyak, hal ini dibutuhkan biaya yang tinggi dan kurang praktikal untuk banyak pengaplikasian.
|
||||
|
||||
% \item Banyak model dengan performa tinggi bergantung pada teknik pemelajaran mendalam, sehingga dibutuhkan sumberdaya komputasi yang tinggi dan memungkinkan kurangnya kemudahan dan keterjangkauan untuk aplikasikan.
|
||||
|
||||
% \item Kurangnya kesederhanaan, pendeketan umum yang menyeimbangkan penggunaan sensor dengan keandalan dalam lokalisasi kerusakan.
|
||||
% \end{itemize}
|
||||
\section{Lingkup Penelitian}
|
||||
Studi ini berfokus pada dataset yang tersedia secara publik didapat dari Queen's University Grandstand Simulator (QUGS), sebuah kerangka besi level laboratorium yang dipasang dengan tiga puluh titik sensor akselerometer dan \textit{white shaker noise}. Riset terdahulu telah dilakukan pengaplikasian pemelajaran mesin jaringan saraf terhadap seluruh sensor yang terpasang penuh pada setiap titik \textit{joint} untuk mencapai akurasi yang tinggi. Akan tetapi, pada praktiknya, instrumentasi penuh seperti ini terkadang kurang efektif dari segi biaya dan kurang layak dalam skala besar.
|
||||
|
||||
\section{Tujuan Penelitian}
|
||||
\begin{enumerate}
|
||||
\item Mengembangkan alur sistem (\textit{pipeline}) pemantauan kesehatan struktur (Structural Health Monitoring/SHM) yang disederhanakan dengan hanya menggunakan sepasang sensor di ujung-ujung struktur.
|
||||
|
||||
% \item Memperlakukan setiap grup kolom sensor sebagai elemen balok satu dimensi yang disederhanakan, dan mengevaluasi apakah karakteristik kerusakan tetap terjaga dalam energi getaran yang ditransmisikan antara kedua ujungnya.
|
||||
|
||||
% \item Menyusun setiap grup kolom sebagai satu dataset terpisah dan melakukan lima pengujian berbeda, di mana masing-masing grup kolom berperan sebagai data validasi secara bergantian.
|
||||
|
||||
% \item Menyertakan data dari setiap grup kolom ke dalam data pelatihan untuk membentuk satu model umum yang dapat digunakan untuk seluruh grup kolom.
|
||||
|
||||
\item Mengeksplorasi kemungkinan generalisasi satu model terhadap berbagai jalur kolom hanya dengan memanfaatkan data dari sensor pada kedua ujung kolom.
|
||||
\end{enumerate}
|
||||
|
||||
% Dalam merespon hal tersebut, penelitian ini memperkenalkan pendekatan baru yang menekankan efisiensi pada penanganan data dan interpretasi fisik. Data pada sensor-sensor yang terpasang pada struktur grid ini dikelompokkan menjadi beberapa grup kolom, dan hanya menyisakan sensor awal dan sensor paling akhir dari setiap grup sensor sebagai input pengklasifikasian. Terdapat hipotesis bahwa energi getaran bergerak di sepanjang jalur kolom terjaga secara cukup baik antara ujung-ujung sensor untuk memungkinkan algoritma pemelajaran mesin, seperti Support-Vector Machine (SVM), Bagged Trees, Random Forest, Decision Tree, KNN, LDA, dan XGBoost, medeteksi dan mengklasifikasi secara akurat letak kerusakan.
|
||||
|
||||
\section{Manfaat Penelitian}
|
||||
% \subsubsection{Dolor}
|
||||
|
||||
Penelitian ini memberikan beberapa manfaat yang diharapkan dapat berkontribusi dalam pengembangan sistem deteksi kerusakan struktur, antara lain:
|
||||
|
||||
\begin{enumerate}
|
||||
\item Penelitian ini tidak berfokus pada pengembangan arsitektur model baru maupun penerapan \textit{transfer learning}, melainkan pada perancangan alur (\textit{pipeline}) klasifikasi yang sederhana dan mudah dipahami sebagai solusi tahap awal untuk pengembangan sistem monitor kesehatan struktur.
|
||||
|
||||
\item Dengan pemilihan titik sensor strategis yang terbatas (hanya di ujung atas dan bawah jalur kolom \textit{grid}) serta prapemrosesan berbasis transformasi STFT, penelitian ini menunjukkan bahwa efisiensi dapat dicapai tanpa mengorbankan akurasi secara signifikan.
|
||||
|
||||
\item Studi ini membuktikan bahwa algoritma pembelajaran mesin klasik seperti \gls{svm} \gls{svm}, KNN, dan LDA masih mampu memberikan performa model yang kompetitif dalam klasifikasi kerusakan, apabila dipadukan dengan ekstraksi fitur yang tepat.
|
||||
|
||||
\item Hasil penelitian ini diharapkan dapat menjadi alternatif sistem SHM yang lebih terjangkau dan praktis untuk diterapkan pada struktur nyata, khususnya dalam kondisi keterbatasan sumber daya.
|
||||
|
||||
\item Rangkaian eksperimen dan pendekatan sistematis dalam penelitian ini dapat dijadikan tolok ukur atau \textit{baseline} untuk studi komparatif selanjutnya dan pengembangan model arsitektur yang lebih kompleks.
|
||||
\end{enumerate}
|
||||
@@ -3,7 +3,7 @@ Alur keseluruhan penelitian ini dilakukan melalui tahapan-tahapan sebagai beriku
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\includegraphics[width=0.3\linewidth]{chapters/id/flow.png}
|
||||
\includegraphics[width=0.3\linewidth]{chapters/img/flow.png}
|
||||
\caption{Diagram alir tahapan penelitian}
|
||||
\label{fig:flowchart}
|
||||
\end{figure}
|
||||
|
||||
BIN
latex/chapters/img/accel393.png
Normal file
BIN
latex/chapters/img/accel393.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 325 KiB |
BIN
latex/chapters/img/datalogger.png
Normal file
BIN
latex/chapters/img/datalogger.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 1.1 MiB |
BIN
latex/chapters/img/original_data.png
Normal file
BIN
latex/chapters/img/original_data.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 793 KiB |
BIN
latex/chapters/img/shaker.png
Normal file
BIN
latex/chapters/img/shaker.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 148 KiB |
0
latex/frontmatter/acknowledgement.tex
Normal file
0
latex/frontmatter/acknowledgement.tex
Normal file
15
latex/frontmatter/acronym.tex
Normal file
15
latex/frontmatter/acronym.tex
Normal file
@@ -0,0 +1,15 @@
|
||||
% Define an abbreviation (acronym)
|
||||
% Acronyms for the thesis
|
||||
\newacronym{ml}{ML}{machine learning}
|
||||
\newacronym{stft}{STFT}{short-time fourier transform}
|
||||
\newacronym{ai}{AI}{artificial intelligence}
|
||||
\newacronym{dl}{DL}{deep learning}
|
||||
\newacronym{nn}{NN}{neural network}
|
||||
\newacronym{fft}{FFT}{fast fourier transform}
|
||||
\newacronym{svm}{SVM}{support vector machine}
|
||||
\newacronym{cnn}{CNN}{convolutional neural network}
|
||||
\newacronym{rnn}{RNN}{recurrent neural network}
|
||||
\newacronym{vbi}{VBI}{vibration-based inspection}
|
||||
\newacronym{shm}{SHM}{structural health monitoring}
|
||||
\newacronym{fea}{FEA}{finite element analysis}
|
||||
\newacronym{1d-cnn}{1-D CNN}{\textit{One-Dimensional Convolutional Neural Network}}
|
||||
86
latex/frontmatter/glossaries.tex
Normal file
86
latex/frontmatter/glossaries.tex
Normal file
@@ -0,0 +1,86 @@
|
||||
% Define the Indonesian term and link it to the English term
|
||||
\newglossaryentry{jaringansaraf}{
|
||||
name=Jaringan Saraf,
|
||||
description={The Indonesian term for \gls{nn}}
|
||||
}
|
||||
% \newglossaryentry{pemelajaranmesin}{
|
||||
% name=Pemelajaran Mesin,
|
||||
% description={Lihat \gls{machinelearning}}
|
||||
% }
|
||||
|
||||
% Define the English term and link it to its acronym
|
||||
% \newglossaryentry{neuralnetwork}{
|
||||
% name=Neural Network,
|
||||
% description={A computational model inspired by the human brain, see \gls{nn}}
|
||||
% }
|
||||
% \newacronym
|
||||
% [description={statistical pattern recognition technique}]
|
||||
% {svm}{SVM}{support vector machine}
|
||||
% \newglossaryentry{machinelearning}{
|
||||
% name=Machine Learning,
|
||||
% description={A program or system that trains a model from input data. The trained model can make useful predictions from new (never-before-seen) data drawn from the same distribution as the one used to train the model.}}
|
||||
% \longnewglossaryentry{machinelearning}{name={machine learning}}
|
||||
% {A program or system that trains a model from input data. The trained model can make useful predictions from new (never-before-seen) data drawn from the same distribution as the one used to train the model.}
|
||||
% \newterm[see={machinelearning}]{pemelajaranmesin}
|
||||
% \newglossaryentry{pemelajaran mesin}{}
|
||||
% \addterm{machinelearning}{pemelajaran mesin}{pemelajaran mesin}{machine learning}{A program or system that trains a model from input data. The trained model can make useful predictions from new (never-before-seen) data drawn from the same distribution as the one used to train the model.}
|
||||
|
||||
\newglossaryentry{algoritma-genetika}{
|
||||
name={Algoritma Genetika},
|
||||
description={Kelas algoritma optimasi dan pencarian yang terinspirasi oleh proses evolusi biologis, seperti seleksi alam, mutasi, dan rekombinasi. Algoritma ini sering digunakan untuk menemukan solusi perkiraan untuk masalah yang kompleks dan sulit dipecahkan secara analitis.},
|
||||
sort={Algoritma Genetika}
|
||||
}
|
||||
|
||||
\newglossaryentry{deep-learning}{
|
||||
name={\textit{deep learning}},
|
||||
description={Bagian dari keluarga metode pembelajaran mesin yang lebih luas berdasarkan jaringan saraf tiruan dengan banyak lapisan (deep neural networks). Arsitektur ini memungkinkan model untuk belajar representasi data secara hierarkis, mulai dari fitur tingkat rendah hingga konsep abstrak tingkat tinggi.},
|
||||
sort={Pembelajaran Mendalam}
|
||||
}
|
||||
|
||||
\newglossaryentry{jaringan-saraf-tiruan}{
|
||||
name={Jaringan Saraf Tiruan (Artificial Neural Network)},
|
||||
description={Model komputasi yang terinspirasi oleh struktur dan fungsi jaringan saraf biologis di otak. JST terdiri dari unit pemrosesan yang saling terhubung (neuron) yang bekerja secara paralel untuk memproses informasi dan belajar dari data melalui penyesuaian bobot koneksi.},
|
||||
sort={Jaringan Saraf Tiruan}
|
||||
}
|
||||
|
||||
\newglossaryentry{pemrosesan-bahasa-alami}{
|
||||
name={Pemrosesan Bahasa Alami (Natural Language Processing)},
|
||||
description={Cabang ilmu komputer dan kecerdasan buatan yang berfokus pada interaksi antara komputer dan bahasa manusia. Tujuannya adalah untuk memungkinkan komputer memproses, memahami, menafsirkan, dan menghasilkan bahasa manusia dengan cara yang bermakna dan berguna.},
|
||||
sort={Pemrosesan Bahasa Alami}
|
||||
}
|
||||
|
||||
\newglossaryentry{pembelajaran-penguatan}{
|
||||
name={Pembelajaran Penguatan (Reinforcement Learning)},
|
||||
description={Area pembelajaran mesin yang berkaitan dengan bagaimana agen perangkat lunak harus mengambil tindakan dalam suatu lingkungan untuk memaksimalkan beberapa gagasan tentang imbalan kumulatif. Agen belajar melalui trial-and-error, menerima umpan balik berupa imbalan atau hukuman.},
|
||||
sort={Pembelajaran Penguatan}
|
||||
}
|
||||
|
||||
\newglossaryentry{visi-komputer}{
|
||||
name={Visi Komputer (Computer Vision)},
|
||||
description={Bidang interdisipliner yang membahas bagaimana komputer dapat dibuat untuk mendapatkan pemahaman tingkat tinggi dari gambar atau video digital. Dari perspektif rekayasa, ia berupaya mengotomatiskan tugas-tugas yang dapat dilakukan oleh sistem visual manusia.},
|
||||
sort={Visi Komputer}
|
||||
}
|
||||
|
||||
\newglossaryentry{model-generatif}{
|
||||
name={Model Generatif},
|
||||
description={Jenis model statistik dalam pembelajaran mesin yang bertujuan untuk mempelajari distribusi probabilitas dari data pelatihan. Setelah dilatih, model ini dapat menghasilkan sampel data baru yang mirip dengan data pelatihan, seperti membuat gambar, teks, atau suara baru.},
|
||||
sort={Model Generatif}
|
||||
}
|
||||
|
||||
\newglossaryentry{heuristik}{
|
||||
name={Heuristik},
|
||||
description={Teknik pemecahan masalah yang menggunakan pendekatan praktis atau jalan pintas yang tidak dijamin optimal atau sempurna, tetapi cukup untuk mencapai tujuan jangka pendek atau perkiraan solusi. Heuristik sering digunakan ketika pencarian solusi optimal terlalu mahal secara komputasi.},
|
||||
sort={Heuristik}
|
||||
}
|
||||
|
||||
\newglossaryentry{validasi-silang}{
|
||||
name={Validasi Silang (Cross-Validation)},
|
||||
description={Teknik statistik untuk mengevaluasi seberapa baik hasil analisis statistik (seperti model prediktif) akan generalisasi ke kumpulan data independen. Ini penting untuk menghindari overfitting dan mendapatkan estimasi kinerja model yang lebih andal pada data yang belum pernah dilihat.},
|
||||
sort={Validasi Silang}
|
||||
}
|
||||
|
||||
\newglossaryentry{bias-algoritmik}{
|
||||
name={Bias Algoritmik},
|
||||
description={Mengacu pada kesalahan sistematis atau hasil yang tidak adil yang dihasilkan oleh sistem kecerdasan buatan karena asumsi yang salah dalam proses pembelajaran mesin atau karena data pelatihan yang bias. Bias ini dapat mereplikasi atau bahkan memperkuat prasangka sosial yang ada.},
|
||||
sort={Bias Algoritmik}
|
||||
}
|
||||
241
latex/frontmatter/notations.tex
Normal file
241
latex/frontmatter/notations.tex
Normal file
@@ -0,0 +1,241 @@
|
||||
% --- Glossary Definitions ---
|
||||
% Note: Descriptions are based on the provided Indonesian text but translated to English
|
||||
% for typical glossary conventions. You can adjust the language as needed.
|
||||
|
||||
\newglossaryentry{not:signal}{
|
||||
name={\ensuremath{S}},
|
||||
description={vektor sinyal akselerometer berdimensi 1$\times$262144},
|
||||
sort={s},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:sampling_freq}{
|
||||
name={\ensuremath{f_s}},
|
||||
description={frekuensi dengan nilai \textit{sampling} ($s$) di mana sinyal kontinu didigitalkan},
|
||||
sort={fs},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:time_length}{
|
||||
name={\ensuremath{t}},
|
||||
description={panjang waktu data dalam detik},
|
||||
sort={t},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:dataset_A}{
|
||||
name={\ensuremath{\mathcal{A}}},
|
||||
description={matriks dataset A},
|
||||
sort={adataset},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:dataset_B}{
|
||||
name={\ensuremath{\mathcal{B}}},
|
||||
description={matriks dataset B},
|
||||
sort={bdataset},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:damage_file}{
|
||||
name={\ensuremath{\mathbf{D}}},
|
||||
description={matriks akselerometer untuk setiap berkas dengan bentuk $262144\times30$},
|
||||
sort={filedamage},
|
||||
type=notation,
|
||||
}
|
||||
\newglossaryentry{not:joint_index}{
|
||||
name={\ensuremath{n}},
|
||||
description={indeks atau nomor kerusakan \textit{joint}},
|
||||
sort={indexjoint},
|
||||
type=notation,
|
||||
}
|
||||
\newglossaryentry{not:damage_file_set_case}{
|
||||
name={\ensuremath{\mathbf{d}}},
|
||||
description={set matriks kerusakan},
|
||||
sort={damagefilesetcase},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:k}{
|
||||
name={$k$},
|
||||
description={Index for measurement nodes, an integer ranging from 0 to 29.},
|
||||
sort={k},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:Fk}{
|
||||
name={$F_{k}$},
|
||||
description={Filename string for the raw time-domain signal from node $k$. The specific format mentioned is \texttt{zzzAD}$k$\texttt{.TXT}.},
|
||||
sort={Fk},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:nkFk}{
|
||||
name={$n_{k}^{F_{k}}$},
|
||||
description={Represents the measurement \textit{node} with index $k$. The raw time-domain signal data from this node, $x_k$, has a length of $L=262144$ samples.},
|
||||
sort={nkFk},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:i}{
|
||||
name={$i$},
|
||||
description={Index for ``damage-case'' folders, an integer ranging from 0 to 5.},
|
||||
sort={i},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:di}{
|
||||
name={$d_{i}$},
|
||||
description={Set representing the $i$-th damage scenario, containing data from five consecutive nodes: $\bigl\{\,n_{5i}^{F_{5i}},\;n_{5i+1}^{F_{5i+1}},\;\dots,\;n_{5i+4}^{F_{5i+4}}\bigr\}$. Cardinality: $|d_i|=5$ nodes.},
|
||||
sort={di},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:diTD}{
|
||||
name={$d_{i}^{\mathrm{TD}}$},
|
||||
description={Time-domain subset of nodes from damage case $d_i$, containing only the first and last nodes: $\bigl\{\,n_{5i}^{F_{5i}},\;n_{5i+4}^{F_{5i+4}}\bigr\}$. Cardinality: $|d_{i}^{\mathrm{TD}}| = 2$ nodes.},
|
||||
sort={diTD},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:calT}{
|
||||
name={$\mathcal{T}$},
|
||||
description={Short-Time Fourier Transform (STFT) operator. It maps a raw time-domain signal $n_k^{F_k}$ (or $x_k$) from $\mathbb{R}^{L}$ (with $L=262144$) to a magnitude spectrogram matrix $\widetilde{n}_k^{F_k}$ in $\mathbb{R}^{513 \times 513}$.},
|
||||
sort={Tcal},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:L}{
|
||||
name={$L$},
|
||||
description={Length of the raw time-domain signal, $L=262144$ samples.},
|
||||
sort={L},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:Nw}{
|
||||
name={$N_{w}$},
|
||||
description={Length of the Hanning window used in the STFT, $N_{w}=1024$ samples.},
|
||||
sort={Nw},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:Nh}{
|
||||
name={$N_{h}$},
|
||||
description={Hop size (or step size) used in the STFT, $N_{h}=512$ samples.},
|
||||
sort={Nh},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:wn}{
|
||||
name={$w[n]$},
|
||||
description={Value of the Hanning window function at sample index $n$. The window spans $N_w$ samples.},
|
||||
sort={wn},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:n_summation}{
|
||||
name={$n$},
|
||||
description={Sample index within the Hanning window and for the STFT summation, an integer ranging from $0$ to $N_w-1$.},
|
||||
sort={n_summation},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:xkm}{
|
||||
name={$x_k[m]$}, % Or x_k if it's treated as the whole signal vector
|
||||
description={Represents the raw time-domain signal for node $k$. As a discrete signal, it consists of $L=262144$ samples. $x_k[m]$ would be the $m$-th sample.},
|
||||
sort={xkm},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:Skpt}{
|
||||
name={$S_k(p,t)$},
|
||||
description={Complex-valued result of the STFT for node $k$ at frequency bin $p$ and time frame $t$. This is a scalar value for each $(p,t)$ pair.},
|
||||
sort={Skpt},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:p}{
|
||||
name={$p$},
|
||||
description={Frequency bin index in the STFT or spectrogram, an integer ranging from $0$ to $512$.},
|
||||
sort={p},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:t_stft}{ % Differentiating t for STFT time frame and t for feature vector time slice if necessary
|
||||
name={$t$},
|
||||
description={Time frame index in the STFT or spectrogram, an integer ranging from $0$ to $512$. Also used as the time slice index for extracting feature vectors $\mathbf{x}_{i,s,r,t}$ from spectrograms.},
|
||||
sort={t},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:ntildekFk}{ % New entry for the matrix
|
||||
name={$\widetilde{n}_k^{F_k}$},
|
||||
description={The magnitude spectrogram matrix for node $k$, obtained by applying the STFT operator $\mathcal{T}$ to the time-domain signal $n_k^{F_k}$. This matrix is an element of $\mathbb{R}^{513 \times 513}$.},
|
||||
sort={ntildekFk},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:ntildekFkpt}{ % Modified entry for the element
|
||||
name={$\widetilde{n}_k^{F_k}(p,t)$},
|
||||
description={Scalar value representing the magnitude of the STFT for node $k$ at frequency bin $p$ and time frame $t$; specifically, $\widetilde{n}_k^{F_k}(p,t) = |S_k(p,t)|$. This is an element of the spectrogram matrix $\widetilde{n}_k^{F_k}$.},
|
||||
sort={ntildekFkpt},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:R}{
|
||||
name={$\mathbb{R}$},
|
||||
description={The set of real numbers. Used to denote vector spaces like $\mathbb{R}^{N}$ (N-dimensional real vectors) or $\mathbb{R}^{M \times N}$ (M-by-N real matrices).},
|
||||
sort={Rbb},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:diFD}{
|
||||
name={$d_{i}^{\mathrm{FD}}$},
|
||||
description={Frequency-domain subset for damage case $i$. It contains two spectrogram matrices: $\bigl\{\,\widetilde{n}_{5i}^{F_{5i}},\; \widetilde{n}_{5i+4}^{F_{5i+4}}\,\bigr\}$, where each spectrogram $\widetilde{n}$ is in $\mathbb{R}^{513 \times 513}$. Cardinality: $|d_{i}^{\mathrm{FD}}| = 2$ spectrograms.},
|
||||
sort={diFD},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:r_repetition}{
|
||||
name={$r$},
|
||||
description={Repetition index within a single damage case, an integer ranging from $0$ to $4$.},
|
||||
sort={r_repetition},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:xboldisr}{
|
||||
name={$\mathbf{x}_{i,s,r,t}$},
|
||||
description={Feature vector (a row or column, often referred to as a time slice) taken from the $r$-th spectrogram repetition, for damage case $i$ and sensor side $s$, at time slice $t$. This vector is an element of $\mathbb{R}^{513}$.},
|
||||
sort={xisrt_bold},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:s_sensor}{
|
||||
name={$s$},
|
||||
description={Index representing the sensor side (e.g., identifying Sensor A or Sensor B).},
|
||||
sort={s_sensor},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:yi}{
|
||||
name={$y_{i}$},
|
||||
description={Scalar label for the damage case $i$, defined as $y_i = i$. This is an integer value from 0 to 5.},
|
||||
sort={yi},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:Lambda}{
|
||||
name={$\Lambda(i,s,r,t)$},
|
||||
description={Slicing function that concatenates a feature vector $\mathbf{x}_{i,s,r,t} \in \mathbb{R}^{513}$ with its corresponding damage case label $y_i \in \mathbb{R}$, resulting in a combined vector $\bigl[\,\mathbf{x}_{i,s,r,t}, \;y_{i}\bigr] \in \mathbb{R}^{514}$.},
|
||||
sort={Lambda},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
\newglossaryentry{not:calDs}{
|
||||
name={$\mathcal{D}^{(s)}$},
|
||||
description={The complete dataset for sensor side $s$. It is a collection of $15390$ data points, where each point is a vector in $\mathbb{R}^{514}$ (513 features + 1 label). Thus, the dataset can be viewed as a matrix of size $15390 \times 514$.},
|
||||
sort={Dcal_s},
|
||||
type=notation,
|
||||
}
|
||||
|
||||
% --- End Glossary Definitions ---
|
||||
@@ -1,14 +1,18 @@
|
||||
\documentclass[draftmark]{thesis}
|
||||
|
||||
% Title Information
|
||||
\setthesisinfo
|
||||
{Prediksi Lokasi Kerusakan dengan Machine Learning}
|
||||
{Rifqi Damar Panuluh}
|
||||
{20210110224}
|
||||
{PROGRAM STUDI TEKNIK SIPIL}
|
||||
{FAKULTAS TEKNIK}
|
||||
{UNIVERSITAS MUHAMMADIYAH YOGYAKARTA}
|
||||
{2025}
|
||||
% Metadata
|
||||
\title{Prediksi Lokasi Kerusakan dengan Machine Learning}
|
||||
\author{Rifqi Damar Panuluh}
|
||||
\date{\today}
|
||||
\authorid{20210110224}
|
||||
\firstadvisor{Ir. Muhammad Ibnu Syamsi, Ph.D.}
|
||||
\secondadvisor{}
|
||||
\headdepartement{Puji Harsanto, S.T., M.T., Ph.D.}
|
||||
\headdepartementid{19740607201404123064}
|
||||
\faculty{Fakultas Teknik}
|
||||
\program{Program Studi Teknik Sipil}
|
||||
\university{Universitas Muhammadiyah Yogyakarta}
|
||||
\yearofsubmission{2025}
|
||||
|
||||
% Input preamble
|
||||
\input{preamble/packages}
|
||||
@@ -16,22 +20,19 @@
|
||||
\input{preamble/macros}
|
||||
|
||||
\begin{document}
|
||||
|
||||
\maketitle
|
||||
% \input{frontmatter/maketitle}
|
||||
% \input{frontmatter/maketitle_secondary}
|
||||
\frontmatter
|
||||
\input{frontmatter/approval}\clearpage
|
||||
\input{frontmatter/originality}\clearpage
|
||||
\input{frontmatter/acknowledgement}\clearpage
|
||||
\tableofcontents
|
||||
% \input{frontmatter/approval}\clearpage
|
||||
% \input{frontmatter/originality}\clearpage
|
||||
% \input{frontmatter/acknowledgement}\clearpage
|
||||
% \tableofcontents
|
||||
\clearpage
|
||||
\mainmatter
|
||||
\pagestyle{fancyplain}
|
||||
% Include content
|
||||
\include{content/abstract}
|
||||
\include{content/introduction}
|
||||
\include{chapters/01_introduction}
|
||||
\include{content/chapter2}
|
||||
\include{content/conclusion}
|
||||
\include{chapters/id/02_literature_review/index}
|
||||
\include{chapters/id/03_methodology/index}
|
||||
|
||||
% Bibliography
|
||||
% \bibliographystyle{IEEEtran}
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
\newcommand{\studentname}{Rifqi Damar Panuluh}
|
||||
\newcommand{\studentid}{20210110224}
|
||||
\newcommand{\thesistitle}{Prediksi Lokasi Kerusakan dengan Machine Learning}
|
||||
\newcommand{\firstadvisor}{Ir. Muhammad Ibnu Syamsi, Ph.D.}
|
||||
\newcommand{\secondadvisor}{}
|
||||
\newcommand{\headdepartement}{Puji Harsanto, S.T. M.T., Ph.D.}
|
||||
\newcommand{\headdepartementid}{19740607201404123064}
|
||||
\newcommand{\faculty}{Fakultas Teknik}
|
||||
\newcommand{\program}{Teknik Sipil}
|
||||
\newcommand{\university}{Universitas Muhammadiyah Yogyakarta}
|
||||
\newcommand{\yearofsubmission}{2025}
|
||||
230
latex/thesis.cls
230
latex/thesis.cls
@@ -1,7 +1,7 @@
|
||||
\NeedsTeXFormat{LaTeX2e}
|
||||
\ProvidesClass{thesis}[2025/05/10 Bachelor Thesis Class]
|
||||
|
||||
\newif\if@draftmark
|
||||
\newif\if@draftmark \@draftmarkfalse
|
||||
\@draftmarkfalse
|
||||
|
||||
\DeclareOption{draftmark}{\@draftmarktrue}
|
||||
@@ -12,6 +12,7 @@
|
||||
\RequirePackage{polyglossia}
|
||||
\RequirePackage{fontspec}
|
||||
\RequirePackage{titlesec}
|
||||
\RequirePackage{titling}
|
||||
\RequirePackage{fancyhdr}
|
||||
\RequirePackage{geometry}
|
||||
\RequirePackage{setspace}
|
||||
@@ -24,30 +25,31 @@
|
||||
\RequirePackage{svg} % Allows including SVG images directly
|
||||
\RequirePackage{indentfirst} % Makes first paragraph after headings indented
|
||||
\RequirePackage{float} % Provides [H] option to force figure/table placement
|
||||
|
||||
\RequirePackage[style=apa, backend=biber]{biblatex}
|
||||
\RequirePackage[acronym, nogroupskip, toc]{glossaries}
|
||||
% Polyglossia set language
|
||||
+ \setdefaultlanguage[variant=indonesian]{malay} % Proper Indonesian language setup
|
||||
+ \setotherlanguage{english} % Enables English as secondary language
|
||||
|
||||
+ \DefineBibliographyStrings{english}{% % Customizes bibliography text
|
||||
+ andothers={dkk\adddot}, % Changes "et al." to "dkk."
|
||||
+ pages={hlm\adddot}, % Changes "pp." to "hlm."
|
||||
+ }
|
||||
\setdefaultlanguage[variant=indonesian]{malay} % Proper Indonesian language setup
|
||||
\setotherlanguage{english} % Enables English as secondary language
|
||||
\DefineBibliographyStrings{english}{% % Customizes bibliography text
|
||||
andothers={dkk\adddot}, % Changes "et al." to "dkk."
|
||||
pages={hlm\adddot}, % Changes "pp." to "hlm."
|
||||
}
|
||||
|
||||
% Conditionally load the watermark package and settings
|
||||
\if@draftmark
|
||||
\RequirePackage{draftwatermark}
|
||||
\SetWatermarkText{nuluh/thesis (wip) draft: \today}
|
||||
\SetWatermarkText{nuluh/thesis (wip) [draft: \today]}
|
||||
\SetWatermarkColor[gray]{0.8} % Opacity: 0.8 = 20% transparent
|
||||
\SetWatermarkFontSize{1.5cm}
|
||||
\SetWatermarkAngle{90}
|
||||
\SetWatermarkHorCenter{1.5cm}
|
||||
\RequirePackage[left]{lineno}
|
||||
\linenumbers
|
||||
\fi
|
||||
|
||||
% Page layout
|
||||
\geometry{left=3cm, top=3cm, right=3cm, bottom=3cm}
|
||||
\geometry{left=4cm, top=3cm, right=3cm, bottom=3cm}
|
||||
\setlength{\parskip}{0.5em}
|
||||
\setlength{\parindent}{0pt}
|
||||
\onehalfspacing
|
||||
|
||||
% Fonts
|
||||
@@ -56,19 +58,45 @@
|
||||
\setsansfont{Arial}
|
||||
\setmonofont{Courier New}
|
||||
|
||||
% Metadata commands
|
||||
\input{metadata}
|
||||
|
||||
\newcommand{\setthesisinfo}[7]{%
|
||||
\renewcommand{\thesistitle}{#1}%
|
||||
\renewcommand{\studentname}{#2}%
|
||||
\renewcommand{\studentid}{#3}%
|
||||
\renewcommand{\program}{#4}%
|
||||
\renewcommand{\faculty}{#5}%
|
||||
\renewcommand{\university}{#6}%
|
||||
\renewcommand{\yearofsubmission}{#7}%
|
||||
\makeatletter
|
||||
% Extracting the Year from \today
|
||||
\newcommand{\theyear}{%
|
||||
\expandafter\@car\expandafter\@gobble\the\year\@nil
|
||||
}
|
||||
|
||||
% Declare internal macros as initially empty
|
||||
\newcommand{\@authorid}{}
|
||||
\newcommand{\@firstadvisor}{}
|
||||
\newcommand{\@secondadvisor}{}
|
||||
\newcommand{\@headdepartement}{}
|
||||
\newcommand{\@headdepartementid}{}
|
||||
\newcommand{\@faculty}{}
|
||||
\newcommand{\@program}{}
|
||||
\newcommand{\@university}{}
|
||||
\newcommand{\@yearofsubmission}{}
|
||||
|
||||
% Define user commands to set these values.
|
||||
\newcommand{\authorid}[1]{\gdef\@authorid{#1}}
|
||||
\newcommand{\firstadvisor}[1]{\gdef\@firstadvisor{#1}}
|
||||
\newcommand{\secondadvisor}[1]{\gdef\@secondadvisor{#1}}
|
||||
\newcommand{\headdepartement}[1]{\gdef\@headdepartement{#1}}
|
||||
\newcommand{\headdepartementid}[1]{\gdef\@headdepartementid{#1}}
|
||||
\newcommand{\faculty}[1]{\gdef\@faculty{#1}}
|
||||
\newcommand{\program}[1]{\gdef\@program{#1}}
|
||||
\newcommand{\university}[1]{\gdef\@university{#1}}
|
||||
\newcommand{\yearofsubmission}[1]{\gdef\@yearofsubmission{#1}}
|
||||
|
||||
% Now expose robust “the‑” getters to access the values
|
||||
\newcommand{\theauthorid}{\@authorid}
|
||||
\newcommand{\thefirstadvisor}{\@firstadvisor}
|
||||
\newcommand{\thesecondadvisor}{\@secondadvisor}
|
||||
\newcommand{\theheaddepartement}{\@headdepartement}
|
||||
\newcommand{\theheaddepartementid}{\@headdepartementid}
|
||||
\newcommand{\thefaculty}{\@faculty}
|
||||
\newcommand{\theprogram}{\@program}
|
||||
\newcommand{\theuniversity}{\@university}
|
||||
\newcommand{\theyearofsubmission}{\@yearofsubmission}
|
||||
\makeatother
|
||||
% % Header and footer
|
||||
\fancypagestyle{fancy}{%
|
||||
\fancyhf{}
|
||||
@@ -110,11 +138,6 @@
|
||||
\renewcommand{\cftchappresnum}{BAB~}
|
||||
\renewcommand{\cftchapaftersnum}{\quad}
|
||||
|
||||
% \titlespacing*{\chapter}{0pt}{-10pt}{20pt}
|
||||
|
||||
% Redefine \maketitle
|
||||
\renewcommand{\maketitle}{\input{frontmatter/maketitle}}
|
||||
|
||||
% Chapter & Section format
|
||||
\renewcommand{\cftchapfont}{\normalsize\MakeUppercase}
|
||||
% \renewcommand{\cftsecfont}{}
|
||||
@@ -136,11 +159,15 @@
|
||||
\setlength{\cftsubsecnumwidth}{2.5em}
|
||||
\setlength{\cftfignumwidth}{5em}
|
||||
\setlength{\cfttabnumwidth}{4em}
|
||||
\renewcommand \cftchapdotsep{1} % Denser dots (closer together) https://tex.stackexchange.com/a/273764
|
||||
\renewcommand \cftsecdotsep{1} % Apply to sections too
|
||||
\renewcommand \cftsubsecdotsep{1} % Apply to subsections too
|
||||
\renewcommand \cftchapdotsep{1} % https://tex.stackexchange.com/a/273764
|
||||
\renewcommand \cftsecdotsep{1} % https://tex.stackexchange.com/a/273764
|
||||
\renewcommand \cftsubsecdotsep{1} % https://tex.stackexchange.com/a/273764
|
||||
\renewcommand \cftfigdotsep{1.5} % https://tex.stackexchange.com/a/273764
|
||||
\renewcommand \cfttabdotsep{1.5} % https://tex.stackexchange.com/a/273764
|
||||
\renewcommand{\cftchapleader}{\normalfont\cftdotfill{\cftsecdotsep}}
|
||||
\renewcommand{\cftchappagefont}{\normalfont}
|
||||
|
||||
% Add Prefix in the Lof and LoT entries
|
||||
\renewcommand{\cftfigpresnum}{\figurename~}
|
||||
\renewcommand{\cfttabpresnum}{\tablename~}
|
||||
|
||||
@@ -165,6 +192,147 @@
|
||||
% \renewcommand{\cfttoctitlefont}{\bfseries\MakeUppercase}
|
||||
% \renewcommand{\cftaftertoctitle}{\vskip 2em}
|
||||
|
||||
% Defines a new glossary called “notation”
|
||||
\newglossary[nlg]{notation}{not}{ntn}{Notation}
|
||||
|
||||
% Define the header for the location column
|
||||
\providecommand*{\locationname}{Location}
|
||||
|
||||
% Define the new glossary style called 'mylistalt' for main glossaries
|
||||
\makeatletter
|
||||
\newglossarystyle{mylistalt}{%
|
||||
% start the list, initializing glossaries internals
|
||||
\renewenvironment{theglossary}%
|
||||
{\glslistinit\begin{enumerate}}%
|
||||
{\end{enumerate}}%
|
||||
% suppress all headers/groupskips
|
||||
\renewcommand*{\glossaryheader}{}%
|
||||
\renewcommand*{\glsgroupheading}[1]{}%
|
||||
\renewcommand*{\glsgroupskip}{}%
|
||||
% main entries: let \item produce "1." etc., then break
|
||||
\renewcommand*{\glossentry}[2]{%
|
||||
\item \glstarget{##1}{\glossentryname{##1}}%
|
||||
\mbox{}\\
|
||||
\glossentrydesc{##1}\space
|
||||
[##2] % appears on page x
|
||||
}%
|
||||
% sub-entries as separate paragraphs, still aligned
|
||||
\renewcommand*{\subglossentry}[3]{%
|
||||
\par
|
||||
\glssubentryitem{##2}%
|
||||
\glstarget{##2}{\strut}\space
|
||||
\glossentrydesc{##2}\space ##3%
|
||||
}%
|
||||
}
|
||||
|
||||
|
||||
% Define the new glossary style 'altlong3customheader' for notation
|
||||
\newglossarystyle{altlong3customheader}{%
|
||||
% The glossary will be a longtable environment with three columns:
|
||||
% 1. Symbol (left-aligned)
|
||||
% 2. Description (paragraph, width \glsdescwidth)
|
||||
% 3. Location (paragraph, width \glspagelistwidth)
|
||||
\renewenvironment{theglossary}%
|
||||
{\begin{longtable}{lp{\glsdescwidth}p{\glspagelistwidth}}}%
|
||||
{\end{longtable}}%
|
||||
% Define the table header row
|
||||
\renewcommand*{\symbolname}{Simbol}
|
||||
\renewcommand*{\descriptionname}{Keterangan}
|
||||
\renewcommand*{\locationname}{Halaman}
|
||||
\renewcommand*{\glossaryheader}{%
|
||||
\bfseries\symbolname & \bfseries\descriptionname & \bfseries\locationname \tabularnewline\endhead}%
|
||||
% Suppress group headings (e.g., A, B, C...)
|
||||
\renewcommand*{\glsgroupheading}[1]{}%
|
||||
% Define how a main glossary entry is displayed
|
||||
% ##1 is the entry label
|
||||
% ##2 is the location list (page numbers)
|
||||
\renewcommand{\glossentry}[2]{%
|
||||
\glsentryitem{##1}% Inserts entry number if entrycounter option is used
|
||||
\glstarget{##1}{\glossentryname{##1}} & % Column 1: Symbol (with hyperlink target)
|
||||
\glossentrydesc{##1}\glspostdescription & % Column 2: Description (with post-description punctuation)
|
||||
##2\tabularnewline % Column 3: Location list
|
||||
}%
|
||||
% Define how a sub-entry is displayed
|
||||
% ##1 is the sub-entry level (e.g., 1 for first sub-level)
|
||||
% ##2 is the entry label
|
||||
% ##3 is the location list
|
||||
\renewcommand{\subglossentry}[3]{%
|
||||
& % Column 1 (Symbol) is left blank for sub-entries to create an indented look
|
||||
\glssubentryitem{##2}% Inserts sub-entry number if subentrycounter is used
|
||||
\glstarget{##2}{\strut}\glossentrydesc{##2}\glspostdescription & % Column 2: Description (target on strut for hyperlink)
|
||||
##3\tabularnewline % Column 3: Location list
|
||||
}%
|
||||
% Define the skip between letter groups (if group headings were enabled)
|
||||
% For 3 columns, we need 2 ampersands for a full blank row if not using \multicolumn
|
||||
\ifglsnogroupskip
|
||||
\renewcommand*{\glsgroupskip}{}%
|
||||
\else
|
||||
\renewcommand*{\glsgroupskip}{& & \tabularnewline}%
|
||||
\fi
|
||||
}
|
||||
|
||||
% Define a new style 'supercol' based on 'super' for acronyms glossaries
|
||||
\newglossarystyle{supercol}{%
|
||||
\setglossarystyle{super}% inherit everything from the original
|
||||
% override just the main-entry format:
|
||||
\renewcommand*{\glossentry}[2]{%
|
||||
\glsentryitem{##1}%
|
||||
\glstarget{##1}{\glossentryname{##1}}\space % <-- added colon here
|
||||
&: \glossentrydesc{##1}\glspostdescription\space ##2\tabularnewline
|
||||
}%
|
||||
% likewise for sub‐entries, if you want a colon there too:
|
||||
\renewcommand*{\subglossentry}[3]{%
|
||||
&:
|
||||
\glssubentryitem{##2}%
|
||||
\glstarget{##2}{\strut}\glossentryname{##2}\space % <-- and here
|
||||
\glossentrydesc{##2}\glspostdescription\space ##3\tabularnewline
|
||||
}%
|
||||
}
|
||||
\makeatother
|
||||
|
||||
% A new command that enables us to enter bi-lingual (Bahasa Indonesia and English) terms
|
||||
% syntax: \addterm[options]{label}{Bahasa Indonesia}{Bahasa Indonesia first use}{English}{Bahasa Indonesia
|
||||
% description}
|
||||
\newcommand{\addterm}[6][]{
|
||||
\newglossaryentry{#2}{
|
||||
name={#3 (angl.\ #5)},
|
||||
first={#4 (\emph{#5})},
|
||||
text={#3},
|
||||
sort={#3},
|
||||
description={#6},
|
||||
#1 % pass additional options to \newglossaryentry
|
||||
}
|
||||
}
|
||||
|
||||
% A new command that enables us to enter (English) acronyms with bi-lingual
|
||||
% (Bahasa Indonesia and English) long versions
|
||||
% syntax: \addacronym[options]{label}{abbreviation}{Bahasa Indonesia long}{Bahasa Indonesia first
|
||||
% use long}{English long}{Bahasa Indonesia description}
|
||||
\newcommand{\addacronym}[7][]{
|
||||
% Create the main glossary entry with \newacronym
|
||||
% \newacronym[key-val list]{label}{abbrv}{long}
|
||||
\newacronym[
|
||||
name={#4 (angl.\ #6,\ #3)},
|
||||
first={\emph{#5} (angl.\ \emph{#6},\ \emph{#3})},
|
||||
sort={#4},
|
||||
description={#7},
|
||||
#1 % pass additional options to \newglossaryentry
|
||||
]
|
||||
{#2}{#3}{#4}
|
||||
% Create a cross-reference from the abbreviation to the main glossary entry by
|
||||
% creating an auxiliary glossary entry (note: we set the label of this entry
|
||||
% to '<original label>_auxiliary' to avoid clashes)
|
||||
\newglossaryentry{#2_auxiliary}{
|
||||
name={#3},
|
||||
sort={#3},
|
||||
description={\makefirstuc{#6}},
|
||||
see=[See:]{#2}
|
||||
}
|
||||
}
|
||||
|
||||
% Change the text of the cross-reference links to the Bahasa Indonesia long version.
|
||||
\renewcommand*{\glsseeitemformat}[1]{\emph{\acrlong{#1}}.}
|
||||
|
||||
% % Apply a custom fancyhdr layout only on the first page of each \chapter, and use no header/footer elsewhere
|
||||
% % \let\oldchapter\chapter
|
||||
% % \renewcommand{\chapter}{%
|
||||
|
||||
Reference in New Issue
Block a user