Compare commits
29 Commits
latex/bib
...
wuicace-20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f5dada1b9c | ||
|
|
37c9a0765a | ||
|
|
8656289a1c | ||
|
|
15fe8339ec | ||
|
|
44210ef372 | ||
|
|
9192d4c81c | ||
|
|
0373743ca7 | ||
|
|
49d6395e6f | ||
|
|
bf9cca2d90 | ||
|
|
08420296e6 | ||
|
|
1540213eec | ||
|
|
6fd4b7465e | ||
|
|
85a0aebf36 | ||
|
|
8d1edfdbf7 | ||
|
|
ff862d9467 | ||
|
|
dfb64db1d8 | ||
|
|
3e3de577ba | ||
|
|
76a09c0219 | ||
|
|
1a994fd59c | ||
|
|
cdb3010b78 | ||
|
|
8a3c1ae585 | ||
|
|
7b934d3fba | ||
|
|
aaccad7ae8 | ||
|
|
2c453ec403 | ||
|
|
7da3179d08 | ||
|
|
254b24cb21 | ||
|
|
d151062115 | ||
|
|
a32415cebf | ||
|
|
12669ed24c |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,4 +1,5 @@
|
||||
# Ignore CSV files in the data directory and all its subdirectories
|
||||
data/**/*.csv
|
||||
.venv/
|
||||
*.pyc
|
||||
*.pyc
|
||||
*.egg-info/
|
||||
@@ -21,6 +21,7 @@
|
||||
#
|
||||
# Scope:
|
||||
# latex (changes to thesis LaTeX)
|
||||
# documentclass (LaTeX in-house document class changes)
|
||||
# src (changes to Python source code)
|
||||
# nb (changes to notebooks)
|
||||
# ml (ML model specific changes)
|
||||
|
||||
3
.vscode/settings.json
vendored
3
.vscode/settings.json
vendored
@@ -1,3 +1,4 @@
|
||||
{
|
||||
"python.analysis.extraPaths": ["./code/src/features"]
|
||||
"python.analysis.extraPaths": ["./code/src/features"],
|
||||
"jupyter.notebookFileRoot": "${workspaceFolder}/code"
|
||||
}
|
||||
|
||||
@@ -16,3 +16,8 @@ The repository is private and access is restricted only to those who have been g
|
||||
All contents of this repository, including the thesis idea, code, and associated data, are copyrighted © 2024 by Rifqi Panuluh. Unauthorized use or duplication is prohibited.
|
||||
|
||||
[LICENSE](https://github.com/nuluh/thesis?tab=License-1-ov-file#readme)
|
||||
|
||||
## How to Run `stft.ipynb`
|
||||
|
||||
1. run `pip install -e .` in root project first
|
||||
2. run the notebook
|
||||
@@ -155,7 +155,7 @@
|
||||
"import pandas as pd\n",
|
||||
"import numpy as np\n",
|
||||
"from scipy.signal import stft, hann\n",
|
||||
"from multiprocessing import Pool\n",
|
||||
"# from multiprocessing import Pool\n",
|
||||
"\n",
|
||||
"# Function to compute and append STFT data\n",
|
||||
"def process_stft(args):\n",
|
||||
@@ -321,9 +321,9 @@
|
||||
"source": [
|
||||
"import pandas as pd\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"ready_data1 = []\n",
|
||||
"ready_data1a = []\n",
|
||||
"for file in os.listdir('D:/thesis/data/converted/raw/sensor1'):\n",
|
||||
" ready_data1.append(pd.read_csv(os.path.join('D:/thesis/data/converted/raw/sensor1', file)))\n",
|
||||
" ready_data1a.append(pd.read_csv(os.path.join('D:/thesis/data/converted/raw/sensor1', file)))\n",
|
||||
"# colormesh give title x is frequency and y is time and rotate/transpose the data\n",
|
||||
"# Plotting the STFT Data"
|
||||
]
|
||||
@@ -334,8 +334,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ready_data1[0]\n",
|
||||
"plt.pcolormesh(ready_data1[0])"
|
||||
"# len(ready_data1a)\n",
|
||||
"# plt.pcolormesh(ready_data1[0])\n",
|
||||
"ready_data1a[0].max().max()"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -345,7 +346,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for i in range(6):\n",
|
||||
" plt.pcolormesh(ready_data1[i])\n",
|
||||
" plt.pcolormesh(ready_data1a[i], cmap=\"jet\", vmax=0.03, vmin=0.0)\n",
|
||||
" plt.colorbar() \n",
|
||||
" plt.title(f'STFT Magnitude for case {i} sensor 1')\n",
|
||||
" plt.xlabel(f'Frequency [Hz]')\n",
|
||||
" plt.ylabel(f'Time [sec]')\n",
|
||||
@@ -358,9 +360,9 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ready_data2 = []\n",
|
||||
"ready_data2a = []\n",
|
||||
"for file in os.listdir('D:/thesis/data/converted/raw/sensor2'):\n",
|
||||
" ready_data2.append(pd.read_csv(os.path.join('D:/thesis/data/converted/raw/sensor2', file)))"
|
||||
" ready_data2a.append(pd.read_csv(os.path.join('D:/thesis/data/converted/raw/sensor2', file)))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -369,8 +371,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(len(ready_data1))\n",
|
||||
"print(len(ready_data2))"
|
||||
"print(len(ready_data1a))\n",
|
||||
"print(len(ready_data2a))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -379,10 +381,16 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"x1 = 0\n",
|
||||
"print(type(ready_data1[0]))\n",
|
||||
"ready_data1[0].iloc[:,0]\n",
|
||||
"# x1 = x1 + ready_data1[0].shape[0]"
|
||||
"x1a = 0\n",
|
||||
"print(type(ready_data1a[0]))\n",
|
||||
"ready_data1a[0].iloc[:,0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Checking length of the total array"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -391,16 +399,14 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"x1 = 0\n",
|
||||
"print(type(x1))\n",
|
||||
"for i in range(len(ready_data1)):\n",
|
||||
" # print(ready_data1[i].shape)\n",
|
||||
" # print(ready_data1[i].)\n",
|
||||
" print(type(ready_data1[i].shape[0]))\n",
|
||||
" x1 = x1 + ready_data1[i].shape[0]\n",
|
||||
" print(type(x1))\n",
|
||||
"x1a = 0\n",
|
||||
"print(type(x1a))\n",
|
||||
"for i in range(len(ready_data1a)):\n",
|
||||
" print(type(ready_data1a[i].shape[0]))\n",
|
||||
" x1a = x1a + ready_data1a[i].shape[0]\n",
|
||||
" print(type(x1a))\n",
|
||||
"\n",
|
||||
"print(x1)"
|
||||
"print(x1a)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -409,13 +415,20 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"x2 = 0\n",
|
||||
"x2a = 0\n",
|
||||
"\n",
|
||||
"for i in range(len(ready_data2)):\n",
|
||||
" print(ready_data2[i].shape)\n",
|
||||
" x2 = x2 + ready_data2[i].shape[0]\n",
|
||||
"for i in range(len(ready_data2a)):\n",
|
||||
" print(ready_data2a[i].shape)\n",
|
||||
" x2a = x2a + ready_data2a[i].shape[0]\n",
|
||||
"\n",
|
||||
"print(x2)"
|
||||
"print(x2a)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Flatten 6 array into one array"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -424,28 +437,22 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"x1 = ready_data1[0]\n",
|
||||
"# print(x1)\n",
|
||||
"print(type(x1))\n",
|
||||
"for i in range(len(ready_data1) - 1):\n",
|
||||
" #print(i)\n",
|
||||
" x1 = np.concatenate((x1, ready_data1[i + 1]), axis=0)\n",
|
||||
"# print(x1)\n",
|
||||
"pd.DataFrame(x1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"x2 = ready_data2[0]\n",
|
||||
"# Combine all dataframes in ready_data1a into a single dataframe\n",
|
||||
"if ready_data1a: # Check if the list is not empty\n",
|
||||
" # Use pandas concat function instead of iterative concatenation\n",
|
||||
" combined_data = pd.concat(ready_data1a, axis=0, ignore_index=True)\n",
|
||||
" \n",
|
||||
" print(f\"Type of combined data: {type(combined_data)}\")\n",
|
||||
" print(f\"Shape of combined data: {combined_data.shape}\")\n",
|
||||
" \n",
|
||||
" # Display the combined dataframe\n",
|
||||
" combined_data\n",
|
||||
"else:\n",
|
||||
" print(\"No data available in ready_data1a list\")\n",
|
||||
" combined_data = pd.DataFrame()\n",
|
||||
"\n",
|
||||
"for i in range(len(ready_data2) - 1):\n",
|
||||
" #print(i)\n",
|
||||
" x2 = np.concatenate((x2, ready_data2[i + 1]), axis=0)\n",
|
||||
"pd.DataFrame(x2)"
|
||||
"# Store the result in x1a for compatibility with subsequent code\n",
|
||||
"x1a = combined_data"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -454,20 +461,29 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(x1.shape)\n",
|
||||
"print(x2.shape)"
|
||||
"# Combine all dataframes in ready_data1a into a single dataframe\n",
|
||||
"if ready_data2a: # Check if the list is not empty\n",
|
||||
" # Use pandas concat function instead of iterative concatenation\n",
|
||||
" combined_data = pd.concat(ready_data2a, axis=0, ignore_index=True)\n",
|
||||
" \n",
|
||||
" print(f\"Type of combined data: {type(combined_data)}\")\n",
|
||||
" print(f\"Shape of combined data: {combined_data.shape}\")\n",
|
||||
" \n",
|
||||
" # Display the combined dataframe\n",
|
||||
" combined_data\n",
|
||||
"else:\n",
|
||||
" print(\"No data available in ready_data1a list\")\n",
|
||||
" combined_data = pd.DataFrame()\n",
|
||||
"\n",
|
||||
"# Store the result in x1a for compatibility with subsequent code\n",
|
||||
"x2a = combined_data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_1 = [1,1,1,1]\n",
|
||||
"y_2 = [0,1,1,1]\n",
|
||||
"y_3 = [1,0,1,1]\n",
|
||||
"y_4 = [1,1,0,0]"
|
||||
"### Creating the label"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -490,39 +506,41 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_data = [y_1, y_2, y_3, y_4, y_5, y_6]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for i in range(len(y_data)):\n",
|
||||
" print(ready_data1[i].shape[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for i in range(len(y_data)):\n",
|
||||
" y_data[i] = [y_data[i]]*ready_data1[i].shape[0]\n",
|
||||
" y_data[i] = np.array(y_data[i])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_data = [y_1, y_2, y_3, y_4, y_5, y_6]\n",
|
||||
"y_data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"for i in range(len(y_data)):\n",
|
||||
" print(ready_data1a[i].shape[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"for i in range(len(y_data)):\n",
|
||||
" y_data[i] = [y_data[i]]*ready_data1a[i].shape[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"len(y_data[0])\n",
|
||||
"# y_data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -552,10 +570,10 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"from src.ml.model_selection import create_ready_data\n",
|
||||
"\n",
|
||||
"x_train1, x_test1, y_train, y_test = train_test_split(x1, y, test_size=0.2, random_state=2)\n",
|
||||
"x_train2, x_test2, y_train, y_test = train_test_split(x2, y, test_size=0.2, random_state=2)"
|
||||
"X1a, y = create_ready_data('D:/thesis/data/converted/raw/sensor1')\n",
|
||||
"X2a, y = create_ready_data('D:/thesis/data/converted/raw/sensor2')"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -565,6 +583,17 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"\n",
|
||||
"x_train1, x_test1, y_train, y_test = train_test_split(X1a, y, test_size=0.2, random_state=2)\n",
|
||||
"x_train2, x_test2, y_train, y_test = train_test_split(X2a, y, test_size=0.2, random_state=2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.metrics import accuracy_score\n",
|
||||
"from sklearn.ensemble import RandomForestClassifier, BaggingClassifier\n",
|
||||
"from sklearn.tree import DecisionTreeClassifier\n",
|
||||
@@ -592,130 +621,15 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"accuracies1 = []\n",
|
||||
"accuracies2 = []\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# 1. Random Forest\n",
|
||||
"rf_model = RandomForestClassifier()\n",
|
||||
"rf_model.fit(x_train1, y_train)\n",
|
||||
"rf_pred1 = rf_model.predict(x_test1)\n",
|
||||
"acc1 = accuracy_score(y_test, rf_pred1) * 100\n",
|
||||
"accuracies1.append(acc1)\n",
|
||||
"# format with color coded if acc1 > 90\n",
|
||||
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
|
||||
"print(\"Random Forest Accuracy for sensor 1:\", acc1)\n",
|
||||
"rf_model.fit(x_train2, y_train)\n",
|
||||
"rf_pred2 = rf_model.predict(x_test2)\n",
|
||||
"acc2 = accuracy_score(y_test, rf_pred2) * 100\n",
|
||||
"accuracies2.append(acc2)\n",
|
||||
"# format with color coded if acc2 > 90\n",
|
||||
"acc2 = f\"\\033[92m{acc2:.2f}\\033[00m\" if acc2 > 90 else f\"{acc2:.2f}\"\n",
|
||||
"print(\"Random Forest Accuracy for sensor 2:\", acc2)\n",
|
||||
"# print(rf_pred)\n",
|
||||
"# print(y_test)\n",
|
||||
"\n",
|
||||
"# 2. Bagged Trees\n",
|
||||
"bagged_model = BaggingClassifier(estimator=DecisionTreeClassifier(), n_estimators=10)\n",
|
||||
"bagged_model.fit(x_train1, y_train)\n",
|
||||
"bagged_pred1 = bagged_model.predict(x_test1)\n",
|
||||
"acc1 = accuracy_score(y_test, bagged_pred1) * 100\n",
|
||||
"accuracies1.append(acc1)\n",
|
||||
"# format with color coded if acc1 > 90\n",
|
||||
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
|
||||
"print(\"Bagged Trees Accuracy for sensor 1:\", acc1)\n",
|
||||
"bagged_model.fit(x_train2, y_train)\n",
|
||||
"bagged_pred2 = bagged_model.predict(x_test2)\n",
|
||||
"acc2 = accuracy_score(y_test, bagged_pred2) * 100\n",
|
||||
"accuracies2.append(acc2)\n",
|
||||
"# format with color coded if acc2 > 90\n",
|
||||
"acc2 = f\"\\033[92m{acc2:.2f}\\033[00m\" if acc2 > 90 else f\"{acc2:.2f}\"\n",
|
||||
"print(\"Bagged Trees Accuracy for sensor 2:\", acc2)\n",
|
||||
"\n",
|
||||
"# 3. Decision Tree\n",
|
||||
"dt_model = DecisionTreeClassifier()\n",
|
||||
"dt_model.fit(x_train1, y_train)\n",
|
||||
"dt_pred1 = dt_model.predict(x_test1)\n",
|
||||
"acc1 = accuracy_score(y_test, dt_pred1) * 100\n",
|
||||
"accuracies1.append(acc1)\n",
|
||||
"# format with color coded if acc1 > 90\n",
|
||||
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
|
||||
"print(\"Decision Tree Accuracy for sensor 1:\", acc1)\n",
|
||||
"dt_model.fit(x_train2, y_train)\n",
|
||||
"dt_pred2 = dt_model.predict(x_test2)\n",
|
||||
"acc2 = accuracy_score(y_test, dt_pred2) * 100\n",
|
||||
"accuracies2.append(acc2)\n",
|
||||
"# format with color coded if acc2 > 90\n",
|
||||
"acc2 = f\"\\033[92m{acc2:.2f}\\033[00m\" if acc2 > 90 else f\"{acc2:.2f}\"\n",
|
||||
"print(\"Decision Tree Accuracy for sensor 2:\", acc2)\n",
|
||||
"\n",
|
||||
"# 4. KNeighbors\n",
|
||||
"knn_model = KNeighborsClassifier()\n",
|
||||
"knn_model.fit(x_train1, y_train)\n",
|
||||
"knn_pred1 = knn_model.predict(x_test1)\n",
|
||||
"acc1 = accuracy_score(y_test, knn_pred1) * 100\n",
|
||||
"accuracies1.append(acc1)\n",
|
||||
"# format with color coded if acc1 > 90\n",
|
||||
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
|
||||
"print(\"KNeighbors Accuracy for sensor 1:\", acc1)\n",
|
||||
"knn_model.fit(x_train2, y_train)\n",
|
||||
"knn_pred2 = knn_model.predict(x_test2)\n",
|
||||
"acc2 = accuracy_score(y_test, knn_pred2) * 100\n",
|
||||
"accuracies2.append(acc2)\n",
|
||||
"# format with color coded if acc2 > 90\n",
|
||||
"acc2 = f\"\\033[92m{acc2:.2f}\\033[00m\" if acc2 > 90 else f\"{acc2:.2f}\"\n",
|
||||
"print(\"KNeighbors Accuracy for sensor 2:\", acc2)\n",
|
||||
"\n",
|
||||
"# 5. Linear Discriminant Analysis\n",
|
||||
"lda_model = LinearDiscriminantAnalysis()\n",
|
||||
"lda_model.fit(x_train1, y_train)\n",
|
||||
"lda_pred1 = lda_model.predict(x_test1)\n",
|
||||
"acc1 = accuracy_score(y_test, lda_pred1) * 100\n",
|
||||
"accuracies1.append(acc1)\n",
|
||||
"# format with color coded if acc1 > 90\n",
|
||||
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
|
||||
"print(\"Linear Discriminant Analysis Accuracy for sensor 1:\", acc1)\n",
|
||||
"lda_model.fit(x_train2, y_train)\n",
|
||||
"lda_pred2 = lda_model.predict(x_test2)\n",
|
||||
"acc2 = accuracy_score(y_test, lda_pred2) * 100\n",
|
||||
"accuracies2.append(acc2)\n",
|
||||
"# format with color coded if acc2 > 90\n",
|
||||
"acc2 = f\"\\033[92m{acc2:.2f}\\033[00m\" if acc2 > 90 else f\"{acc2:.2f}\"\n",
|
||||
"print(\"Linear Discriminant Analysis Accuracy for sensor 2:\", acc2)\n",
|
||||
"\n",
|
||||
"# 6. Support Vector Machine\n",
|
||||
"svm_model = SVC()\n",
|
||||
"svm_model.fit(x_train1, y_train)\n",
|
||||
"svm_pred1 = svm_model.predict(x_test1)\n",
|
||||
"acc1 = accuracy_score(y_test, svm_pred1) * 100\n",
|
||||
"accuracies1.append(acc1)\n",
|
||||
"# format with color coded if acc1 > 90\n",
|
||||
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
|
||||
"print(\"Support Vector Machine Accuracy for sensor 1:\", acc1)\n",
|
||||
"svm_model.fit(x_train2, y_train)\n",
|
||||
"svm_pred2 = svm_model.predict(x_test2)\n",
|
||||
"acc2 = accuracy_score(y_test, svm_pred2) * 100\n",
|
||||
"accuracies2.append(acc2)\n",
|
||||
"# format with color coded if acc2 > 90\n",
|
||||
"acc2 = f\"\\033[92m{acc2:.2f}\\033[00m\" if acc2 > 90 else f\"{acc2:.2f}\"\n",
|
||||
"print(\"Support Vector Machine Accuracy for sensor 2:\", acc2)\n",
|
||||
"\n",
|
||||
"# 7. XGBoost\n",
|
||||
"xgboost_model = XGBClassifier()\n",
|
||||
"xgboost_model.fit(x_train1, y_train)\n",
|
||||
"xgboost_pred1 = xgboost_model.predict(x_test1)\n",
|
||||
"acc1 = accuracy_score(y_test, xgboost_pred1) * 100\n",
|
||||
"accuracies1.append(acc1)\n",
|
||||
"# format with color coded if acc1 > 90\n",
|
||||
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
|
||||
"print(\"XGBoost Accuracy:\", acc1)\n",
|
||||
"xgboost_model.fit(x_train2, y_train)\n",
|
||||
"xgboost_pred2 = xgboost_model.predict(x_test2)\n",
|
||||
"acc2 = accuracy_score(y_test, xgboost_pred2) * 100\n",
|
||||
"accuracies2.append(acc2)\n",
|
||||
"# format with color coded if acc2 > 90\n",
|
||||
"acc2 = f\"\\033[92m{acc2:.2f}\\033[00m\" if acc2 > 90 else f\"{acc2:.2f}\"\n",
|
||||
"print(\"XGBoost Accuracy:\", acc2)"
|
||||
"def train_and_evaluate_model(model, model_name, sensor_label, x_train, y_train, x_test, y_test):\n",
|
||||
" model.fit(x_train, y_train)\n",
|
||||
" y_pred = model.predict(x_test)\n",
|
||||
" accuracy = accuracy_score(y_test, y_pred) * 100\n",
|
||||
" return {\n",
|
||||
" \"model\": model_name,\n",
|
||||
" \"sensor\": sensor_label,\n",
|
||||
" \"accuracy\": accuracy\n",
|
||||
" }"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -724,8 +638,59 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(accuracies1)\n",
|
||||
"print(accuracies2)"
|
||||
"# Define models for sensor1\n",
|
||||
"models_sensor1 = {\n",
|
||||
" # \"Random Forest\": RandomForestClassifier(),\n",
|
||||
" # \"Bagged Trees\": BaggingClassifier(estimator=DecisionTreeClassifier(), n_estimators=10),\n",
|
||||
" # \"Decision Tree\": DecisionTreeClassifier(),\n",
|
||||
" # \"KNN\": KNeighborsClassifier(),\n",
|
||||
" # \"LDA\": LinearDiscriminantAnalysis(),\n",
|
||||
" \"SVM\": SVC(),\n",
|
||||
" \"XGBoost\": XGBClassifier()\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"results_sensor1 = []\n",
|
||||
"for name, model in models_sensor1.items():\n",
|
||||
" res = train_and_evaluate_model(model, name, \"sensor1\", x_train1, y_train, x_test1, y_test)\n",
|
||||
" results_sensor1.append(res)\n",
|
||||
" print(f\"{name} on sensor1: Accuracy = {res['accuracy']:.2f}%\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"models_sensor2 = {\n",
|
||||
" # \"Random Forest\": RandomForestClassifier(),\n",
|
||||
" # \"Bagged Trees\": BaggingClassifier(estimator=DecisionTreeClassifier(), n_estimators=10),\n",
|
||||
" # \"Decision Tree\": DecisionTreeClassifier(),\n",
|
||||
" # \"KNN\": KNeighborsClassifier(),\n",
|
||||
" # \"LDA\": LinearDiscriminantAnalysis(),\n",
|
||||
" \"SVM\": SVC(),\n",
|
||||
" \"XGBoost\": XGBClassifier()\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"results_sensor2 = []\n",
|
||||
"for name, model in models_sensor2.items():\n",
|
||||
" res = train_and_evaluate_model(model, name, \"sensor2\", x_train2, y_train, x_test2, y_test)\n",
|
||||
" results_sensor2.append(res)\n",
|
||||
" print(f\"{name} on sensor2: Accuracy = {res['accuracy']:.2f}%\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"all_results = {\n",
|
||||
" \"sensor1\": results_sensor1,\n",
|
||||
" \"sensor2\": results_sensor2\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"print(all_results)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -737,36 +702,48 @@
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"\n",
|
||||
"models = [rf_model, bagged_model, dt_model, knn_model, lda_model, svm_model, xgboost_model]\n",
|
||||
"model_names = [\"Random Forest\", \"Bagged Trees\", \"Decision Tree\", \"KNN\", \"LDA\", \"SVM\", \"XGBoost\"]\n",
|
||||
"def prepare_plot_data(results_dict):\n",
|
||||
" # Gather unique model names\n",
|
||||
" models_set = {entry['model'] for sensor in results_dict.values() for entry in sensor}\n",
|
||||
" models = sorted(list(models_set))\n",
|
||||
" \n",
|
||||
" # Create dictionaries mapping sensor -> accuracy list ordered by model name\n",
|
||||
" sensor_accuracies = {}\n",
|
||||
" for sensor, entries in results_dict.items():\n",
|
||||
" # Build a mapping: model -> accuracy for the given sensor\n",
|
||||
" mapping = {entry['model']: entry['accuracy'] for entry in entries}\n",
|
||||
" # Order the accuracies consistent with the sorted model names\n",
|
||||
" sensor_accuracies[sensor] = [mapping.get(model, 0) for model in models]\n",
|
||||
" \n",
|
||||
" return models, sensor_accuracies\n",
|
||||
"\n",
|
||||
"bar_width = 0.35 # Width of each bar\n",
|
||||
"index = np.arange(len(model_names)) # Index for the bars\n",
|
||||
"def plot_accuracies(models, sensor_accuracies):\n",
|
||||
" bar_width = 0.35\n",
|
||||
" x = np.arange(len(models))\n",
|
||||
" sensors = list(sensor_accuracies.keys())\n",
|
||||
" \n",
|
||||
" plt.figure(figsize=(10, 6))\n",
|
||||
" # Assume two sensors for plotting grouped bars\n",
|
||||
" plt.bar(x - bar_width/2, sensor_accuracies[sensors[0]], width=bar_width, color='blue', label=sensors[0])\n",
|
||||
" plt.bar(x + bar_width/2, sensor_accuracies[sensors[1]], width=bar_width, color='orange', label=sensors[1])\n",
|
||||
" \n",
|
||||
" # Add text labels on top of bars\n",
|
||||
" for i, (a1, a2) in enumerate(zip(sensor_accuracies[sensors[0]], sensor_accuracies[sensors[1]])):\n",
|
||||
" plt.text(x[i] - bar_width/2, a1 + 0.1, f\"{a1:.2f}%\", ha='center', va='bottom', color='black')\n",
|
||||
" plt.text(x[i] + bar_width/2, a2 + 0.1, f\"{a2:.2f}%\", ha='center', va='bottom', color='black')\n",
|
||||
" \n",
|
||||
" plt.xlabel('Model Name')\n",
|
||||
" plt.ylabel('Accuracy (%)')\n",
|
||||
" plt.title('Accuracy of Classifiers for Each Sensor')\n",
|
||||
" plt.xticks(x, models)\n",
|
||||
" plt.legend()\n",
|
||||
" plt.ylim(0, 105)\n",
|
||||
" plt.tight_layout()\n",
|
||||
" plt.show()\n",
|
||||
"\n",
|
||||
"# Plotting the bar graph\n",
|
||||
"plt.figure(figsize=(14, 8))\n",
|
||||
"\n",
|
||||
"# Bar plot for Sensor 1\n",
|
||||
"plt.bar(index, accuracies1, width=bar_width, color='blue', label='Sensor 1')\n",
|
||||
"\n",
|
||||
"# Bar plot for Sensor 2\n",
|
||||
"plt.bar(index + bar_width, accuracies2, width=bar_width, color='orange', label='Sensor 2')\n",
|
||||
"\n",
|
||||
"# Add values on top of each bar\n",
|
||||
"for i, acc1, acc2 in zip(index, accuracies1, accuracies2):\n",
|
||||
" plt.text(i, acc1 + .1, f'{acc1:.2f}%', ha='center', va='bottom', color='black')\n",
|
||||
" plt.text(i + bar_width, acc2 + 1, f'{acc2:.2f}%', ha='center', va='bottom', color='black')\n",
|
||||
"\n",
|
||||
"# Customize the plot\n",
|
||||
"plt.xlabel('Model Name →')\n",
|
||||
"plt.ylabel('Accuracy →')\n",
|
||||
"plt.title('Accuracy of classifiers for Sensors 1 and 2 with 513 features')\n",
|
||||
"plt.xticks(index + bar_width / 2, model_names) # Set x-tick positions\n",
|
||||
"plt.legend()\n",
|
||||
"plt.ylim(0, 100)\n",
|
||||
"\n",
|
||||
"# Show the plot\n",
|
||||
"plt.show()\n"
|
||||
"# Use the functions\n",
|
||||
"models, sensor_accuracies = prepare_plot_data(all_results)\n",
|
||||
"plot_accuracies(models, sensor_accuracies)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -787,51 +764,10 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def spectograph(data_dir: str):\n",
|
||||
" # print(os.listdir(data_dir))\n",
|
||||
" for damage in os.listdir(data_dir):\n",
|
||||
" # print(damage)\n",
|
||||
" d = os.path.join(data_dir, damage)\n",
|
||||
" # print(d)\n",
|
||||
" for file in os.listdir(d):\n",
|
||||
" # print(file)\n",
|
||||
" f = os.path.join(d, file)\n",
|
||||
" print(f)\n",
|
||||
" # sensor1 = pd.read_csv(f, skiprows=1, sep=';')\n",
|
||||
" # sensor2 = pd.read_csv(f, skiprows=1, sep=';')\n",
|
||||
"from src.ml.model_selection import create_ready_data\n",
|
||||
"\n",
|
||||
" # df1 = pd.DataFrame()\n",
|
||||
"\n",
|
||||
" # df1['s1'] = sensor1[sensor1.columns[-1]]\n",
|
||||
" # df1['s2'] = sensor2[sensor2.columns[-1]]\n",
|
||||
"ed\n",
|
||||
" # # Combined Plot for sensor 1 and sensor 2 from data1 file in which motor is operated at 800 rpm\n",
|
||||
"\n",
|
||||
" # plt.plot(df1['s2'], label='sensor 2')\n",
|
||||
" # plt.plot(df1['s1'], label='sensor 1')\n",
|
||||
" # plt.xlabel(\"Number of samples\")\n",
|
||||
" # plt.ylabel(\"Amplitude\")\n",
|
||||
" # plt.title(\"Raw vibration signal\")\n",
|
||||
" # plt.legend()\n",
|
||||
" # plt.show()\n",
|
||||
"\n",
|
||||
" # from scipy import signal\n",
|
||||
" # from scipy.signal.windows import hann\n",
|
||||
"\n",
|
||||
" # vibration_data = df1['s1']\n",
|
||||
"\n",
|
||||
" # # Applying STFT\n",
|
||||
" # window_size = 1024\n",
|
||||
" # hop_size = 512\n",
|
||||
" # window = hann(window_size) # Creating a Hanning window\n",
|
||||
" # frequencies, times, Zxx = signal.stft(vibration_data, window=window, nperseg=window_size, noverlap=window_size - hop_size)\n",
|
||||
"\n",
|
||||
" # # Plotting the STFT Data\n",
|
||||
" # plt.pcolormesh(times, frequencies, np.abs(Zxx), shading='gouraud')\n",
|
||||
" # plt.title(f'STFT Magnitude for case 1 signal sensor 1 ')\n",
|
||||
" # plt.ylabel('Frequency [Hz]')\n",
|
||||
" # plt.xlabel('Time [sec]')\n",
|
||||
" # plt.show()"
|
||||
"X1b, y = create_ready_data('D:/thesis/data/converted/raw_B/sensor1')\n",
|
||||
"X2b, y = create_ready_data('D:/thesis/data/converted/raw_B/sensor2')"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -840,7 +776,115 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"spectograph('D:/thesis/data/converted/raw')"
|
||||
"y.shape"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.metrics import accuracy_score, classification_report\n",
|
||||
"# 4. Validate on Dataset B\n",
|
||||
"y_pred_svm = svm_model.predict(X1b)\n",
|
||||
"\n",
|
||||
"# 5. Evaluate\n",
|
||||
"print(\"Accuracy on Dataset B:\", accuracy_score(y, y_pred_svm))\n",
|
||||
"print(classification_report(y, y_pred_svm))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from sklearn.metrics import accuracy_score, classification_report\n",
|
||||
"# 4. Validate on Dataset B\n",
|
||||
"y_pred = rf_model2.predict(X2b)\n",
|
||||
"\n",
|
||||
"# 5. Evaluate\n",
|
||||
"print(\"Accuracy on Dataset B:\", accuracy_score(y, y_pred))\n",
|
||||
"print(classification_report(y, y_pred))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y_predict = svm_model2.predict(X2b.iloc[[5312],:])\n",
|
||||
"print(y_predict)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"y[5312]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Confusion Matrix"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"cm = confusion_matrix(y, y_pred_svm) # -> ndarray\n",
|
||||
"\n",
|
||||
"# get the class labels\n",
|
||||
"labels = svm_model.classes_\n",
|
||||
"\n",
|
||||
"# Plot\n",
|
||||
"disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=labels)\n",
|
||||
"disp.plot(cmap=plt.cm.Blues) # You can change colormap\n",
|
||||
"plt.title(\"SVM Sensor1 CM Train w/ Dataset A Val w/ Dataset B\")\n",
|
||||
"plt.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Self-test CM"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# 1. Predict sensor 1 on Dataset A\n",
|
||||
"y_train_pred = svm_model.predict(x_train1)\n",
|
||||
"\n",
|
||||
"# 2. Import confusion matrix tools\n",
|
||||
"from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"\n",
|
||||
"# 3. Create and plot confusion matrix\n",
|
||||
"cm_train = confusion_matrix(y_train, y_train_pred)\n",
|
||||
"labels = svm_model.classes_\n",
|
||||
"\n",
|
||||
"disp = ConfusionMatrixDisplay(confusion_matrix=cm_train, display_labels=labels)\n",
|
||||
"disp.plot(cmap=plt.cm.Blues)\n",
|
||||
"plt.title(\"Confusion Matrix: Train & Test on Dataset A\")\n",
|
||||
"plt.show()\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
0
code/src/ml/__init__.py
Normal file
0
code/src/ml/__init__.py
Normal file
57
code/src/ml/model_selection.py
Normal file
57
code/src/ml/model_selection.py
Normal file
@@ -0,0 +1,57 @@
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import os
|
||||
from sklearn.model_selection import train_test_split as sklearn_split
|
||||
|
||||
|
||||
def create_ready_data(
|
||||
stft_data_path: str,
|
||||
stratify: np.ndarray = None,
|
||||
) -> tuple:
|
||||
"""
|
||||
Create a stratified train-test split from STFT data.
|
||||
|
||||
Parameters:
|
||||
-----------
|
||||
stft_data_path : str
|
||||
Path to the directory containing STFT data files (e.g. 'data/converted/raw/sensor1')
|
||||
stratify : np.ndarray, optional
|
||||
Labels to use for stratified sampling
|
||||
|
||||
Returns:
|
||||
--------
|
||||
tuple
|
||||
(X_train, X_test, y_train, y_test) - Split datasets
|
||||
"""
|
||||
ready_data = []
|
||||
for file in os.listdir(stft_data_path):
|
||||
ready_data.append(pd.read_csv(os.path.join(stft_data_path, file)))
|
||||
|
||||
y_data = [i for i in range(len(ready_data))]
|
||||
|
||||
# Combine all dataframes in ready_data into a single dataframe
|
||||
if ready_data: # Check if the list is not empty
|
||||
# Use pandas concat function instead of iterative concatenation
|
||||
combined_data = pd.concat(ready_data, axis=0, ignore_index=True)
|
||||
|
||||
print(f"Type of combined data: {type(combined_data)}")
|
||||
print(f"Shape of combined data: {combined_data.shape}")
|
||||
else:
|
||||
print("No data available in ready_data list")
|
||||
combined_data = pd.DataFrame()
|
||||
|
||||
# Store the result in x1a for compatibility with subsequent code
|
||||
X = combined_data
|
||||
|
||||
for i in range(len(y_data)):
|
||||
y_data[i] = [y_data[i]] * ready_data[i].shape[0]
|
||||
y_data[i] = np.array(y_data[i])
|
||||
|
||||
if y_data:
|
||||
# Use numpy concatenate function instead of iterative concatenation
|
||||
y = np.concatenate(y_data, axis=0)
|
||||
else:
|
||||
print("No labels available in y_data list")
|
||||
y = np.array([])
|
||||
|
||||
return X, y
|
||||
@@ -3,7 +3,7 @@ Alur keseluruhan penelitian ini dilakukan melalui tahapan-tahapan sebagai beriku
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\includegraphics[width=0.3\linewidth]{chapters/id/flow.png}
|
||||
\includegraphics[width=0.3\linewidth]{chapters/img/flow.png}
|
||||
\caption{Diagram alir tahapan penelitian}
|
||||
\label{fig:flowchart}
|
||||
\end{figure}
|
||||
|
||||
0
latex/frontmatter/acknowledgement.tex
Normal file
0
latex/frontmatter/acknowledgement.tex
Normal file
78
latex/frontmatter/glossaries.tex
Normal file
78
latex/frontmatter/glossaries.tex
Normal file
@@ -0,0 +1,78 @@
|
||||
% % A new command that enables us to enter bi-lingual (Slovene and English) terms
|
||||
% % syntax: \addterm[options]{label}{Slovene}{Slovene first use}{English}{Slovene
|
||||
% % description}
|
||||
% \newcommand{\addterm}[6][]{
|
||||
% \newglossaryentry{#2}{
|
||||
% name={#3 (angl.\ #5)},
|
||||
% first={#4 (\emph{#5})},
|
||||
% text={#3},
|
||||
% sort={#3},
|
||||
% description={#6},
|
||||
% #1 % pass additional options to \newglossaryentry
|
||||
% }
|
||||
% }
|
||||
|
||||
% % A new command that enables us to enter (English) acronyms with bi-lingual
|
||||
% % (Slovene and English) long versions
|
||||
% % syntax: \addacronym[options]{label}{abbreviation}{Slovene long}{Slovene first
|
||||
% % use long}{English long}{Slovene description}
|
||||
% \newcommand{\addacronym}[7][]{
|
||||
% % Create the main glossary entry with \newacronym
|
||||
% % \newacronym[key-val list]{label}{abbrv}{long}
|
||||
% \newacronym[
|
||||
% name={#4 (angl.\ #6,\ #3)},
|
||||
% first={\emph{#5} (angl.\ \emph{#6},\ \emph{#3})},
|
||||
% sort={#4},
|
||||
% description={#7},
|
||||
% #1 % pass additional options to \newglossaryentry
|
||||
% ]
|
||||
% {#2}{#3}{#4}
|
||||
% % Create a cross-reference from the abbreviation to the main glossary entry by
|
||||
% % creating an auxiliary glossary entry (note: we set the label of this entry
|
||||
% % to '<original label>_auxiliary' to avoid clashes)
|
||||
% \newglossaryentry{#2_auxiliary}{
|
||||
% name={#3},
|
||||
% sort={#3},
|
||||
% description={\makefirstuc{#6}},
|
||||
% see=[See:]{#2}
|
||||
% }
|
||||
% }
|
||||
|
||||
% % Change the text of the cross-reference links to the Slovene long version.
|
||||
% \renewcommand*{\glsseeitemformat}[1]{\emph{\acrlong{#1}}.}
|
||||
|
||||
% Define the Indonesian term and link it to the English term
|
||||
\newglossaryentry{jaringansaraf}{
|
||||
name=Jaringan Saraf,
|
||||
description={The Indonesian term for \gls{nn}}
|
||||
}
|
||||
% \newglossaryentry{pemelajaranmesin}{
|
||||
% name=Pemelajaran Mesin,
|
||||
% description={Lihat \gls{machinelearning}}
|
||||
% }
|
||||
|
||||
% Define the English term and link it to its acronym
|
||||
\newglossaryentry{neuralnetwork}{
|
||||
name=Neural Network,
|
||||
description={A computational model inspired by the human brain, see \gls{nn}}
|
||||
}
|
||||
|
||||
% \newglossaryentry{machinelearning}{
|
||||
% name=Machine Learning,
|
||||
% description={A program or system that trains a model from input data. The trained model can make useful predictions from new (never-before-seen) data drawn from the same distribution as the one used to train the model.}}
|
||||
% \newglossaryentry{pemelajaranmesin}{
|
||||
% name={pemelajaran mesin (angl.\ #5)},
|
||||
% first={pemelajaran mesin (\emph{machine learning})},
|
||||
% text={pemelajaran mesin},
|
||||
% sort={ },
|
||||
% description={#6},
|
||||
% #1 % pass additional options to \newglossaryentry
|
||||
% }
|
||||
\longnewglossaryentry{machinelearning}{name={machine learning}}
|
||||
{A program or system that trains a model from input data. The trained model can make useful predictions from new (never-before-seen) data drawn from the same distribution as the one used to train the model.}
|
||||
\newterm[see={machinelearning}]{pemelajaranmesin}
|
||||
% \newglossaryentry{pemelajaran mesin}{}
|
||||
% \addterm{machinelearning}{pemelajaran mesin}{pemelajaran mesin}{machine learning}{A program or system that trains a model from input data. The trained model can make useful predictions from new (never-before-seen) data drawn from the same distribution as the one used to train the model.}
|
||||
\newacronym
|
||||
[description={statistical pattern recognition technique}]
|
||||
{svm}{SVM}{support vector machine}
|
||||
@@ -1,31 +1,30 @@
|
||||
\begin{titlepage}
|
||||
\centering
|
||||
\vspace*{1cm}
|
||||
|
||||
{\fontsize{14pt}{16pt}\selectfont \textbf{\MakeUppercase{Tugas Akhir}}\par}
|
||||
\vspace{1.5cm}
|
||||
|
||||
{\fontsize{14pt}{16pt}\selectfont \textbf{\MakeUppercase{\thesistitle}}\par}
|
||||
\vspace{1.5cm}
|
||||
|
||||
\includegraphics[width=5cm]{frontmatter/img/logo.png}
|
||||
\vspace{1.5cm}
|
||||
|
||||
|
||||
\textbf{Disusun oleh:} \\
|
||||
{\fontsize{14pt}{16pt}\selectfont \textbf{\studentname}} \\
|
||||
{\fontsize{14pt}{16pt}\selectfont \textbf{\studentid}} \\
|
||||
|
||||
|
||||
\vfill
|
||||
|
||||
{\fontsize{12pt}{14pt}\selectfont
|
||||
\textbf{\program} \\
|
||||
\textbf{\faculty} \\
|
||||
\textbf{\university} \\
|
||||
\textbf{\yearofsubmission}
|
||||
}
|
||||
|
||||
\end{titlepage}%
|
||||
|
||||
|
||||
\centering
|
||||
\vspace*{1cm}
|
||||
|
||||
{\fontsize{14pt}{16pt}\selectfont \textbf{\MakeUppercase{Tugas Akhir}}\par}
|
||||
\vspace{1.5cm}
|
||||
|
||||
{\fontsize{14pt}{16pt}\selectfont \textbf{\MakeUppercase{\thetitle}}\par}
|
||||
\vspace{1.5cm}
|
||||
|
||||
\includegraphics[width=5cm]{frontmatter/img/logo.png}
|
||||
\vspace{1.5cm}
|
||||
|
||||
|
||||
\textbf{Disusun oleh:} \\
|
||||
{\fontsize{14pt}{16pt}\selectfont \textbf{\theauthor}} \\
|
||||
{\fontsize{14pt}{16pt}\selectfont \textbf{\studentid}} \\
|
||||
|
||||
|
||||
\vfill
|
||||
|
||||
{\fontsize{12pt}{14pt}\selectfont
|
||||
\textbf{\program} \\
|
||||
\textbf{\faculty} \\
|
||||
\textbf{\university} \\
|
||||
\textbf{\yearofsubmission}
|
||||
}
|
||||
|
||||
\end{titlepage}%
|
||||
|
||||
|
||||
29
latex/frontmatter/maketitle_secondary.tex
Normal file
29
latex/frontmatter/maketitle_secondary.tex
Normal file
@@ -0,0 +1,29 @@
|
||||
\begin{titlepage}
|
||||
\centering
|
||||
{\fontsize{14pt}{16pt}\selectfont \textbf{\MakeUppercase{Tugas Akhir}}\par}
|
||||
\vspace{1.5cm}
|
||||
|
||||
{\fontsize{14pt}{16pt}\selectfont \textbf{\MakeUppercase{\thetitle}}\par}
|
||||
\vspace{1cm}
|
||||
{\normalsize\selectfont Diajukan guna melengkapi persyaratan untuk memenuhi gelar Sarjana Teknik di Program Studi Teknik Sipil, Fakultas Teknik, Universitas Muhammadiyah Yogyakarta\par}
|
||||
\vspace{1.5cm}
|
||||
|
||||
\includegraphics[width=5cm]{frontmatter/img/logo.png}
|
||||
\vspace{1.5cm}
|
||||
|
||||
|
||||
\textbf{Disusun oleh:} \\
|
||||
{\fontsize{14pt}{16pt}\selectfont \textbf{\theauthor}} \\
|
||||
{\fontsize{14pt}{16pt}\selectfont \textbf{\studentid}} \\
|
||||
|
||||
|
||||
\vfill
|
||||
|
||||
{\fontsize{12pt}{14pt}\selectfont
|
||||
\textbf{\program} \\
|
||||
\textbf{\faculty} \\
|
||||
\textbf{\university} \\
|
||||
\textbf{\yearofsubmission}
|
||||
}
|
||||
|
||||
\end{titlepage}%
|
||||
@@ -1,14 +1,18 @@
|
||||
\documentclass[draftmark]{thesis}
|
||||
|
||||
% Title Information
|
||||
\setthesisinfo
|
||||
{Prediksi Lokasi Kerusakan dengan Machine Learning}
|
||||
{Rifqi Damar Panuluh}
|
||||
{20210110224}
|
||||
{PROGRAM STUDI TEKNIK SIPIL}
|
||||
{FAKULTAS TEKNIK}
|
||||
{UNIVERSITAS MUHAMMADIYAH YOGYAKARTA}
|
||||
{2025}
|
||||
% Metadata
|
||||
\title{Prediksi Lokasi Kerusakan dengan Machine Learning}
|
||||
\author{Rifqi Damar Panuluh}
|
||||
\date{\today}
|
||||
\authorid{20210110224}
|
||||
\firstadvisor{Ir. Muhammad Ibnu Syamsi, Ph.D.}
|
||||
\secondadvisor{}
|
||||
\headdepartement{Puji Harsanto, S.T., M.T., Ph.D.}
|
||||
\headdepartementid{19740607201404123064}
|
||||
\faculty{Fakultas Teknik}
|
||||
\program{Program Studi Teknik Sipil}
|
||||
\university{Universitas Muhammadiyah Yogyakarta}
|
||||
\yearofsubmission{2025}
|
||||
|
||||
% Input preamble
|
||||
\input{preamble/packages}
|
||||
@@ -16,22 +20,19 @@
|
||||
\input{preamble/macros}
|
||||
|
||||
\begin{document}
|
||||
|
||||
\maketitle
|
||||
% \input{frontmatter/maketitle}
|
||||
% \input{frontmatter/maketitle_secondary}
|
||||
\frontmatter
|
||||
\input{frontmatter/approval}\clearpage
|
||||
\input{frontmatter/originality}\clearpage
|
||||
\input{frontmatter/acknowledgement}\clearpage
|
||||
\tableofcontents
|
||||
% \input{frontmatter/approval}\clearpage
|
||||
% \input{frontmatter/originality}\clearpage
|
||||
% \input{frontmatter/acknowledgement}\clearpage
|
||||
% \tableofcontents
|
||||
\clearpage
|
||||
\mainmatter
|
||||
\pagestyle{fancyplain}
|
||||
% Include content
|
||||
\include{content/abstract}
|
||||
\include{content/introduction}
|
||||
\include{chapters/01_introduction}
|
||||
\include{content/chapter2}
|
||||
\include{content/conclusion}
|
||||
\include{chapters/id/02_literature_review/index}
|
||||
\include{chapters/id/03_methodology/index}
|
||||
|
||||
% Bibliography
|
||||
% \bibliographystyle{IEEEtran}
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
\newcommand{\studentname}{Rifqi Damar Panuluh}
|
||||
\newcommand{\studentid}{20210110224}
|
||||
\newcommand{\thesistitle}{Prediksi Lokasi Kerusakan dengan Machine Learning}
|
||||
\newcommand{\firstadvisor}{Ir. Muhammad Ibnu Syamsi, Ph.D.}
|
||||
\newcommand{\secondadvisor}{}
|
||||
\newcommand{\headdepartement}{Puji Harsanto, S.T. M.T., Ph.D.}
|
||||
\newcommand{\headdepartementid}{19740607201404123064}
|
||||
\newcommand{\faculty}{Fakultas Teknik}
|
||||
\newcommand{\program}{Teknik Sipil}
|
||||
\newcommand{\university}{Universitas Muhammadiyah Yogyakarta}
|
||||
\newcommand{\yearofsubmission}{2025}
|
||||
247
latex/thesis.cls
247
latex/thesis.cls
@@ -1,7 +1,7 @@
|
||||
\NeedsTeXFormat{LaTeX2e}
|
||||
\ProvidesClass{thesis}[2025/05/10 Bachelor Thesis Class]
|
||||
|
||||
\newif\if@draftmark
|
||||
\newif\if@draftmark \@draftmarkfalse
|
||||
\@draftmarkfalse
|
||||
|
||||
\DeclareOption{draftmark}{\@draftmarktrue}
|
||||
@@ -12,6 +12,7 @@
|
||||
\RequirePackage{polyglossia}
|
||||
\RequirePackage{fontspec}
|
||||
\RequirePackage{titlesec}
|
||||
\RequirePackage{titling}
|
||||
\RequirePackage{fancyhdr}
|
||||
\RequirePackage{geometry}
|
||||
\RequirePackage{setspace}
|
||||
@@ -21,25 +22,34 @@
|
||||
\RequirePackage{tocloft}
|
||||
\RequirePackage{tocbibind}
|
||||
\RequirePackage{amsmath,amsfonts,amssymb}
|
||||
|
||||
\RequirePackage{svg} % Allows including SVG images directly
|
||||
\RequirePackage{indentfirst} % Makes first paragraph after headings indented
|
||||
\RequirePackage{float} % Provides [H] option to force figure/table placement
|
||||
\RequirePackage[style=apa, backend=biber]{biblatex}
|
||||
\RequirePackage[acronym, nogroupskip, toc]{glossaries}
|
||||
% Polyglossia set language
|
||||
\setmainlanguage{bahasai}
|
||||
% \setotherlanguage{english}
|
||||
\setdefaultlanguage[variant=indonesian]{malay} % Proper Indonesian language setup
|
||||
\setotherlanguage{english} % Enables English as secondary language
|
||||
\DefineBibliographyStrings{english}{% % Customizes bibliography text
|
||||
andothers={dkk\adddot}, % Changes "et al." to "dkk."
|
||||
pages={hlm\adddot}, % Changes "pp." to "hlm."
|
||||
}
|
||||
|
||||
% Conditionally load the watermark package and settings
|
||||
\if@draftmark
|
||||
\RequirePackage{draftwatermark}
|
||||
\SetWatermarkText{Draft: \today [wip]}
|
||||
\SetWatermarkColor[gray]{0.7}
|
||||
\SetWatermarkFontSize{2cm}
|
||||
\SetWatermarkText{nuluh/thesis (wip) [draft: \today]}
|
||||
\SetWatermarkColor[gray]{0.8} % Opacity: 0.8 = 20% transparent
|
||||
\SetWatermarkFontSize{1.5cm}
|
||||
\SetWatermarkAngle{90}
|
||||
\SetWatermarkHorCenter{1.5cm}
|
||||
\RequirePackage[left]{lineno}
|
||||
\linenumbers
|
||||
\fi
|
||||
|
||||
% Page layout
|
||||
\geometry{left=3cm, top=3cm, right=3cm, bottom=3cm}
|
||||
\geometry{left=4cm, top=3cm, right=3cm, bottom=3cm}
|
||||
\setlength{\parskip}{0.5em}
|
||||
\setlength{\parindent}{0pt}
|
||||
\onehalfspacing
|
||||
|
||||
% Fonts
|
||||
@@ -48,19 +58,45 @@
|
||||
\setsansfont{Arial}
|
||||
\setmonofont{Courier New}
|
||||
|
||||
% Metadata commands
|
||||
\input{metadata}
|
||||
|
||||
\newcommand{\setthesisinfo}[7]{%
|
||||
\renewcommand{\thesistitle}{#1}%
|
||||
\renewcommand{\studentname}{#2}%
|
||||
\renewcommand{\studentid}{#3}%
|
||||
\renewcommand{\program}{#4}%
|
||||
\renewcommand{\faculty}{#5}%
|
||||
\renewcommand{\university}{#6}%
|
||||
\renewcommand{\yearofsubmission}{#7}%
|
||||
\makeatletter
|
||||
% Extracting the Year from \today
|
||||
\newcommand{\theyear}{%
|
||||
\expandafter\@car\expandafter\@gobble\the\year\@nil
|
||||
}
|
||||
|
||||
% Declare internal macros as initially empty
|
||||
\newcommand{\@authorid}{}
|
||||
\newcommand{\@firstadvisor}{}
|
||||
\newcommand{\@secondadvisor}{}
|
||||
\newcommand{\@headdepartement}{}
|
||||
\newcommand{\@headdepartementid}{}
|
||||
\newcommand{\@faculty}{}
|
||||
\newcommand{\@program}{}
|
||||
\newcommand{\@university}{}
|
||||
\newcommand{\@yearofsubmission}{}
|
||||
|
||||
% Define user commands to set these values.
|
||||
\newcommand{\authorid}[1]{\gdef\@authorid{#1}}
|
||||
\newcommand{\firstadvisor}[1]{\gdef\@firstadvisor{#1}}
|
||||
\newcommand{\secondadvisor}[1]{\gdef\@secondadvisor{#1}}
|
||||
\newcommand{\headdepartement}[1]{\gdef\@headdepartement{#1}}
|
||||
\newcommand{\headdepartementid}[1]{\gdef\@headdepartementid{#1}}
|
||||
\newcommand{\faculty}[1]{\gdef\@faculty{#1}}
|
||||
\newcommand{\program}[1]{\gdef\@program{#1}}
|
||||
\newcommand{\university}[1]{\gdef\@university{#1}}
|
||||
\newcommand{\yearofsubmission}[1]{\gdef\@yearofsubmission{#1}}
|
||||
|
||||
% Now expose robust “the‑” getters to access the values
|
||||
\newcommand{\theauthorid}{\@authorid}
|
||||
\newcommand{\thefirstadvisor}{\@firstadvisor}
|
||||
\newcommand{\thesecondadvisor}{\@secondadvisor}
|
||||
\newcommand{\theheaddepartement}{\@headdepartement}
|
||||
\newcommand{\theheaddepartementid}{\@headdepartementid}
|
||||
\newcommand{\thefaculty}{\@faculty}
|
||||
\newcommand{\theprogram}{\@program}
|
||||
\newcommand{\theuniversity}{\@university}
|
||||
\newcommand{\theyearofsubmission}{\@yearofsubmission}
|
||||
\makeatother
|
||||
% % Header and footer
|
||||
\fancypagestyle{fancy}{%
|
||||
\fancyhf{}
|
||||
@@ -79,7 +115,10 @@
|
||||
}
|
||||
|
||||
% Chapter formatting
|
||||
\titlespacing{\chapter}{0pt}{0pt}{*1.5}
|
||||
\titlespacing{\chapter}{0pt}{0cm}{*1.5} % 0pt→0cm: same value, different unit
|
||||
% 0pt = no space above chapter title
|
||||
% *1.5 = 1.5× line spacing after title
|
||||
|
||||
\titleformat{\chapter}[display]
|
||||
{\normalsize\bfseries\centering}
|
||||
{BAB~\Roman{chapter}} % << display format
|
||||
@@ -91,15 +130,14 @@
|
||||
\titleformat{\subsection}
|
||||
{\normalsize\bfseries}{\thesubsection}{1em}{}
|
||||
|
||||
% Section numbering depth
|
||||
\setcounter{secnumdepth}{3} % Enables numbering for:
|
||||
% 1 = chapters, 2 = sections, 3 = subsections
|
||||
|
||||
% Ensure chapter reference in TOC matches
|
||||
\renewcommand{\cftchappresnum}{BAB~}
|
||||
\renewcommand{\cftchapaftersnum}{\quad}
|
||||
|
||||
% \titlespacing*{\chapter}{0pt}{-10pt}{20pt}
|
||||
|
||||
% Redefine \maketitle
|
||||
\renewcommand{\maketitle}{\input{frontmatter/maketitle}}
|
||||
|
||||
% Chapter & Section format
|
||||
\renewcommand{\cftchapfont}{\normalsize\MakeUppercase}
|
||||
% \renewcommand{\cftsecfont}{}
|
||||
@@ -108,18 +146,28 @@
|
||||
|
||||
|
||||
% Dot leaders, spacing, indentation
|
||||
\setlength{\cftbeforetoctitleskip}{0cm} % Space above "DAFTAR ISI" title
|
||||
\setlength{\cftbeforeloftitleskip}{0cm} % Space above "DAFTAR GAMBAR" title
|
||||
\setlength{\cftbeforelottitleskip}{0cm} % Space above "DAFTAR TABEL" title
|
||||
|
||||
\setlength{\cftbeforechapskip}{0em}
|
||||
\setlength{\cftchapindent}{0pt}
|
||||
\setlength{\cftsecindent}{0em}
|
||||
\setlength{\cftsubsecindent}{2.5em}
|
||||
\setlength{\cftsubsecindent}{2em}
|
||||
\setlength{\cftchapnumwidth}{3.5em}
|
||||
\setlength{\cftsecnumwidth}{3.5em}
|
||||
\setlength{\cftsecnumwidth}{2em}
|
||||
\setlength{\cftsubsecnumwidth}{2.5em}
|
||||
\setlength{\cftfignumwidth}{5em}
|
||||
\setlength{\cfttabnumwidth}{4em}
|
||||
\renewcommand \cftchapdotsep{4.5} % https://tex.stackexchange.com/a/273764
|
||||
\renewcommand \cftchapdotsep{1} % https://tex.stackexchange.com/a/273764
|
||||
\renewcommand \cftsecdotsep{1} % https://tex.stackexchange.com/a/273764
|
||||
\renewcommand \cftsubsecdotsep{1} % https://tex.stackexchange.com/a/273764
|
||||
\renewcommand \cftfigdotsep{1.5} % https://tex.stackexchange.com/a/273764
|
||||
\renewcommand \cfttabdotsep{1.5} % https://tex.stackexchange.com/a/273764
|
||||
\renewcommand{\cftchapleader}{\normalfont\cftdotfill{\cftsecdotsep}}
|
||||
\renewcommand{\cftchappagefont}{\normalfont}
|
||||
|
||||
% Add Prefix in the Lof and LoT entries
|
||||
\renewcommand{\cftfigpresnum}{\figurename~}
|
||||
\renewcommand{\cfttabpresnum}{\tablename~}
|
||||
|
||||
@@ -144,6 +192,147 @@
|
||||
% \renewcommand{\cfttoctitlefont}{\bfseries\MakeUppercase}
|
||||
% \renewcommand{\cftaftertoctitle}{\vskip 2em}
|
||||
|
||||
% Defines a new glossary called “notation”
|
||||
\newglossary[nlg]{notation}{not}{ntn}{Notation}
|
||||
|
||||
% Define the header for the location column
|
||||
\providecommand*{\locationname}{Location}
|
||||
|
||||
% Define the new glossary style called 'mylistalt' for main glossaries
|
||||
\makeatletter
|
||||
\newglossarystyle{mylistalt}{%
|
||||
% start the list, initializing glossaries internals
|
||||
\renewenvironment{theglossary}%
|
||||
{\glslistinit\begin{enumerate}}%
|
||||
{\end{enumerate}}%
|
||||
% suppress all headers/groupskips
|
||||
\renewcommand*{\glossaryheader}{}%
|
||||
\renewcommand*{\glsgroupheading}[1]{}%
|
||||
\renewcommand*{\glsgroupskip}{}%
|
||||
% main entries: let \item produce "1." etc., then break
|
||||
\renewcommand*{\glossentry}[2]{%
|
||||
\item \glstarget{##1}{\glossentryname{##1}}%
|
||||
\mbox{}\\
|
||||
\glossentrydesc{##1}\space
|
||||
[##2] % appears on page x
|
||||
}%
|
||||
% sub-entries as separate paragraphs, still aligned
|
||||
\renewcommand*{\subglossentry}[3]{%
|
||||
\par
|
||||
\glssubentryitem{##2}%
|
||||
\glstarget{##2}{\strut}\space
|
||||
\glossentrydesc{##2}\space ##3%
|
||||
}%
|
||||
}
|
||||
|
||||
|
||||
% Define the new glossary style 'altlong3customheader' for notation
|
||||
\newglossarystyle{altlong3customheader}{%
|
||||
% The glossary will be a longtable environment with three columns:
|
||||
% 1. Symbol (left-aligned)
|
||||
% 2. Description (paragraph, width \glsdescwidth)
|
||||
% 3. Location (paragraph, width \glspagelistwidth)
|
||||
\renewenvironment{theglossary}%
|
||||
{\begin{longtable}{lp{\glsdescwidth}p{\glspagelistwidth}}}%
|
||||
{\end{longtable}}%
|
||||
% Define the table header row
|
||||
\renewcommand*{\symbolname}{Simbol}
|
||||
\renewcommand*{\descriptionname}{Keterangan}
|
||||
\renewcommand*{\locationname}{Halaman}
|
||||
\renewcommand*{\glossaryheader}{%
|
||||
\bfseries\symbolname & \bfseries\descriptionname & \bfseries\locationname \tabularnewline\endhead}%
|
||||
% Suppress group headings (e.g., A, B, C...)
|
||||
\renewcommand*{\glsgroupheading}[1]{}%
|
||||
% Define how a main glossary entry is displayed
|
||||
% ##1 is the entry label
|
||||
% ##2 is the location list (page numbers)
|
||||
\renewcommand{\glossentry}[2]{%
|
||||
\glsentryitem{##1}% Inserts entry number if entrycounter option is used
|
||||
\glstarget{##1}{\glossentryname{##1}} & % Column 1: Symbol (with hyperlink target)
|
||||
\glossentrydesc{##1}\glspostdescription & % Column 2: Description (with post-description punctuation)
|
||||
##2\tabularnewline % Column 3: Location list
|
||||
}%
|
||||
% Define how a sub-entry is displayed
|
||||
% ##1 is the sub-entry level (e.g., 1 for first sub-level)
|
||||
% ##2 is the entry label
|
||||
% ##3 is the location list
|
||||
\renewcommand{\subglossentry}[3]{%
|
||||
& % Column 1 (Symbol) is left blank for sub-entries to create an indented look
|
||||
\glssubentryitem{##2}% Inserts sub-entry number if subentrycounter is used
|
||||
\glstarget{##2}{\strut}\glossentrydesc{##2}\glspostdescription & % Column 2: Description (target on strut for hyperlink)
|
||||
##3\tabularnewline % Column 3: Location list
|
||||
}%
|
||||
% Define the skip between letter groups (if group headings were enabled)
|
||||
% For 3 columns, we need 2 ampersands for a full blank row if not using \multicolumn
|
||||
\ifglsnogroupskip
|
||||
\renewcommand*{\glsgroupskip}{}%
|
||||
\else
|
||||
\renewcommand*{\glsgroupskip}{& & \tabularnewline}%
|
||||
\fi
|
||||
}
|
||||
|
||||
% Define a new style 'supercol' based on 'super' for acronyms glossaries
|
||||
\newglossarystyle{supercol}{%
|
||||
\setglossarystyle{super}% inherit everything from the original
|
||||
% override just the main-entry format:
|
||||
\renewcommand*{\glossentry}[2]{%
|
||||
\glsentryitem{##1}%
|
||||
\glstarget{##1}{\glossentryname{##1}}\space % <-- added colon here
|
||||
&: \glossentrydesc{##1}\glspostdescription\space ##2\tabularnewline
|
||||
}%
|
||||
% likewise for sub‐entries, if you want a colon there too:
|
||||
\renewcommand*{\subglossentry}[3]{%
|
||||
&:
|
||||
\glssubentryitem{##2}%
|
||||
\glstarget{##2}{\strut}\glossentryname{##2}\space % <-- and here
|
||||
\glossentrydesc{##2}\glspostdescription\space ##3\tabularnewline
|
||||
}%
|
||||
}
|
||||
\makeatother
|
||||
|
||||
% A new command that enables us to enter bi-lingual (Bahasa Indonesia and English) terms
|
||||
% syntax: \addterm[options]{label}{Bahasa Indonesia}{Bahasa Indonesia first use}{English}{Bahasa Indonesia
|
||||
% description}
|
||||
\newcommand{\addterm}[6][]{
|
||||
\newglossaryentry{#2}{
|
||||
name={#3 (angl.\ #5)},
|
||||
first={#4 (\emph{#5})},
|
||||
text={#3},
|
||||
sort={#3},
|
||||
description={#6},
|
||||
#1 % pass additional options to \newglossaryentry
|
||||
}
|
||||
}
|
||||
|
||||
% A new command that enables us to enter (English) acronyms with bi-lingual
|
||||
% (Bahasa Indonesia and English) long versions
|
||||
% syntax: \addacronym[options]{label}{abbreviation}{Bahasa Indonesia long}{Bahasa Indonesia first
|
||||
% use long}{English long}{Bahasa Indonesia description}
|
||||
\newcommand{\addacronym}[7][]{
|
||||
% Create the main glossary entry with \newacronym
|
||||
% \newacronym[key-val list]{label}{abbrv}{long}
|
||||
\newacronym[
|
||||
name={#4 (angl.\ #6,\ #3)},
|
||||
first={\emph{#5} (angl.\ \emph{#6},\ \emph{#3})},
|
||||
sort={#4},
|
||||
description={#7},
|
||||
#1 % pass additional options to \newglossaryentry
|
||||
]
|
||||
{#2}{#3}{#4}
|
||||
% Create a cross-reference from the abbreviation to the main glossary entry by
|
||||
% creating an auxiliary glossary entry (note: we set the label of this entry
|
||||
% to '<original label>_auxiliary' to avoid clashes)
|
||||
\newglossaryentry{#2_auxiliary}{
|
||||
name={#3},
|
||||
sort={#3},
|
||||
description={\makefirstuc{#6}},
|
||||
see=[See:]{#2}
|
||||
}
|
||||
}
|
||||
|
||||
% Change the text of the cross-reference links to the Bahasa Indonesia long version.
|
||||
\renewcommand*{\glsseeitemformat}[1]{\emph{\acrlong{#1}}.}
|
||||
|
||||
% % Apply a custom fancyhdr layout only on the first page of each \chapter, and use no header/footer elsewhere
|
||||
% % \let\oldchapter\chapter
|
||||
% % \renewcommand{\chapter}{%
|
||||
|
||||
Reference in New Issue
Block a user