Compare commits

..

17 Commits

Author SHA1 Message Date
nuluh
f5dada1b9c fix(latex): fix image path for flowchart in methodology section 2025-06-04 15:59:13 +07:00
nuluh
37c9a0765a fix(documentclass): remove language option from biblatex package 2025-06-04 15:53:57 +07:00
nuluh
8656289a1c chore(documentclass): comment out table of contents for temporary removal 2025-06-04 15:53:35 +07:00
nuluh
15fe8339ec feat(documentclass): add new glossary for notation 2025-06-04 15:31:00 +07:00
nuluh
44210ef372 chore(latex): comment out maketitle inputs for temporary 2025-06-04 11:27:56 +07:00
nuluh
9192d4c81c chore(documentclass): remove commented-out code for chapter formatting and header layout 2025-06-03 21:37:32 +07:00
nuluh
0373743ca7 fix(documentclass): enhance dot separation in ToC and add prefixes for figures and tables 2025-06-03 21:34:05 +07:00
nuluh
49d6395e6f fix(documentclass): add missing \RequirePackage{titling} for maketitle formatting 2025-06-03 21:16:34 +07:00
nuluh
bf9cca2d90 feat(documentclass): redefine metadata information to main.tex by consdolidate internal command inside thesis.cls and remove metadata.tex
Closes #96
2025-06-03 21:13:28 +07:00
nuluh
08420296e6 fix(documentclass): add missing \makeatother command to properly close the @ symbol 2025-06-03 20:59:11 +07:00
nuluh
1540213eec feat(documentclass): add commands for bilingual terms and acronyms with custom glossary entries 2025-06-03 20:58:18 +07:00
nuluh
6fd4b7465e feat(documentclass): add new glossary style 'supercol' for enhanced acronym formatting
Closes #85
2025-06-03 20:55:26 +07:00
nuluh
85a0aebf36 feat(documentclass): add custom glossary style 'altlong3customheader' for notation with three-column layout
Closes #95
2025-06-03 20:54:45 +07:00
nuluh
8d1edfdbf7 feat(glossaries): add glossary support with custom style for main glossaries entry and location header
Closes 84
2025-06-03 20:52:54 +07:00
nuluh
ff862d9467 fix(documentclass): adjust page layout by increasing left margin to 4cm 2025-06-03 20:39:03 +07:00
nuluh
dfb64db1d8 feat(documentclass): add draft watermark and optional line numbering with 'draftmark' option 2025-06-03 20:37:29 +07:00
Rifqi D. Panuluh
3e3de577ba Merge pull request #94 from nuluh/latex/91-bug-expose-maketitle
Maketitle Replaced with \input for Flexibility when integrated with latexdiff-latexpand Workflow
2025-06-03 20:16:30 +07:00
11 changed files with 416 additions and 414 deletions

View File

@@ -1,52 +0,0 @@
name: LaTeX Lint
on:
push:
branches:
- main
- dev
paths:
- 'latex/**/*.tex'
- 'latex/main.tex'
workflow_dispatch:
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Install chktex
run: |
sudo apt-get update
sudo apt-get install -y chktex
- name: Run chktex inside latex/
working-directory: latex
run: |
TEX_FILES=$(find . -type f -name "*.tex")
if [ -z "$TEX_FILES" ]; then
echo "No .tex files found in latex/. Skipping lint."
exit 0
fi
echo "🔍 Linting .tex files with chktex..."
FAIL=0
for f in $TEX_FILES; do
echo "▶ Checking $f"
# Run chktex and show output; capture error status
if ! chktex "$f"; then
echo "::warning file=$f::ChkTeX found issues in $f"
FAIL=1
fi
done
if [ $FAIL -ne 0 ]; then
echo "::error::❌ Lint errors or warnings were found in one or more .tex files above."
exit 1
else
echo "✅ All files passed chktex lint."
fi

View File

@@ -1,102 +0,0 @@
name: LaTeX Diff
on:
workflow_dispatch:
inputs:
base_branch:
description: 'Base branch (older version)'
required: true
compare_branch:
description: 'Compare branch (new version)'
required: true
jobs:
latexdiff:
runs-on: ubuntu-latest
container:
image: ghcr.io/xu-cheng/texlive-full:latest
options: --user root
steps:
- name: Install latexpand (Perl script)
run: |
tlmgr init-usertree
tlmgr install latexpand
- name: Checkout base branch
uses: actions/checkout@v4
with:
ref: ${{ github.event.inputs.base_branch }}
path: base
- name: Checkout compare branch
uses: actions/checkout@v4
with:
ref: ${{ github.event.inputs.compare_branch }}
path: compare
- name: Create output folder
run: mkdir -p diff_output
- name: Flatten base/main.tex (with latexpand)
run: |
cd base/latex
echo "📂 Listing files in base/latex:"
ls -R
echo "🔄 Flattening with latexpand..."
latexpand --verbose --keep-comments --output=../../diff_output/base_flat.tex main.tex
echo "✅ Preview of base_flat.tex:"
head -n 50 ../../diff_output/base_flat.tex
- name: Flatten compare/main.tex (with latexpand)
run: |
cd compare/latex
echo "📂 Listing files in compare/latex:"
ls -R
echo "🔄 Flattening with latexpand..."
latexpand --verbose --keep-comments --output=../../diff_output/compare_flat.tex main.tex
echo "✅ Preview of compare_flat.tex:"
head -n 50 ../../diff_output/compare_flat.tex
- name: Generate diff.tex using latexdiff
run: |
latexdiff diff_output/base_flat.tex diff_output/compare_flat.tex > diff_output/diff.tex
- name: Copy thesis.cls to diff_output
run: cp compare/latex/thesis.cls diff_output/
- name: Copy chapters/img into diff_output
run: |
# Create the same chapters/img path inside diff_output
mkdir -p diff_output/chapters/img
# Copy all images from compare branch into diff_output
cp -R compare/latex/chapters/img/* diff_output/chapters/img/
- name: Copy .bib files into diff_output
run: |
mkdir -p diff_output
cp compare/latex/*.bib diff_output/
- name: Override “\input{preamble/fonts}” in diff.tex
run: |
sed -i "/\\input{preamble\/fonts}/c % — replaced by CI: use TeX Gyre fonts instead of Times New Roman\/Arial\n\\\setmainfont{TeX Gyre Termes}\n\\\setsansfont{TeX Gyre Heros}\n\\\setmonofont{TeX Gyre Cursor}" diff_output/diff.tex
- name: Print preview of diff.tex (after font override)
run: |
echo "📄 Preview of diff_output/diff.tex after font override:"
head -n 50 diff_output/diff.tex
- name: Compile diff.tex to PDF
working-directory: diff_output
continue-on-error: true
run: |
xelatex -interaction=nonstopmode diff.tex
xelatex -interaction=nonstopmode diff.tex
- name: Upload diff output files
uses: actions/upload-artifact@v4
with:
name: latex-diff-output
path: diff_output/

View File

@@ -1,29 +0,0 @@
name: Render XeLaTeX on PR to dev
on:
pull_request:
branches:
- dev
jobs:
build-pdf:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Compile XeLaTeX
uses: dante-ev/latex-action@2021-A
with:
root_file: main.tex
working_directory: latex
compiler: xelatex
args: -interaction=nonstopmode -halt-on-error -file-line-error
extra_system_packages: "fonts-freefont-otf"
- name: Upload compiled PDF
uses: actions/upload-artifact@v4
with:
name: compiled-pdf
path: latex/main.pdf

View File

@@ -4,14 +4,20 @@ This repository contains the work related to my thesis, which focuses on damage
**Note:** This repository does not contain the secondary data used in the analysis. The code is designed to work with data from the [QUGS (Qatar University Grandstand Simulator)](https://www.structuralvibration.com/benchmark/qugs/) dataset, which is not included here.
The repository is private and access is restricted only to those who have been given explicit permission by the owner. Access is provided solely for the purpose of brief review or seeking technical guidance.
## Restrictions
- **No Derivative Works or Cloning:** Any form of copying, cloning, or creating derivative works based on this repository is strictly prohibited.
- **Limited Access:** Use beyond brief review or collaboration is not allowed without prior permission from the owner.
---
All contents of this repository, including the thesis idea, code, and associated data, are copyrighted © 2024 by Rifqi Panuluh. Unauthorized use or duplication is prohibited.
[LICENSE](https://github.com/nuluh/thesis?tab=License-1-ov-file#readme)
## How to Run `stft.ipynb`
1. run `pip install -e .` in root project first
2. run the notebook

View File

@@ -334,8 +334,9 @@
"metadata": {},
"outputs": [],
"source": [
"len(ready_data1a)\n",
"# plt.pcolormesh(ready_data1[0])"
"# len(ready_data1a)\n",
"# plt.pcolormesh(ready_data1[0])\n",
"ready_data1a[0].max().max()"
]
},
{
@@ -345,7 +346,8 @@
"outputs": [],
"source": [
"for i in range(6):\n",
" plt.pcolormesh(ready_data1a[i])\n",
" plt.pcolormesh(ready_data1a[i], cmap=\"jet\", vmax=0.03, vmin=0.0)\n",
" plt.colorbar() \n",
" plt.title(f'STFT Magnitude for case {i} sensor 1')\n",
" plt.xlabel(f'Frequency [Hz]')\n",
" plt.ylabel(f'Time [sec]')\n",
@@ -535,8 +537,8 @@
"metadata": {},
"outputs": [],
"source": [
"# len(y_data[0])\n",
"y_data"
"len(y_data[0])\n",
"# y_data"
]
},
{
@@ -619,137 +621,15 @@
"metadata": {},
"outputs": [],
"source": [
"accuracies1 = []\n",
"accuracies2 = []\n",
"\n",
"\n",
"# 1. Random Forest\n",
"rf_model1 = RandomForestClassifier()\n",
"rf_model1.fit(x_train1, y_train)\n",
"rf_pred1 = rf_model1.predict(x_test1)\n",
"acc1 = accuracy_score(y_test, rf_pred1) * 100\n",
"accuracies1.append(acc1)\n",
"# format with color coded if acc1 > 90\n",
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
"print(\"Random Forest Accuracy for sensor 1:\", acc1)\n",
"rf_model2 = RandomForestClassifier()\n",
"rf_model2.fit(x_train2, y_train)\n",
"rf_pred2 = rf_model2.predict(x_test2)\n",
"acc2 = accuracy_score(y_test, rf_pred2) * 100\n",
"accuracies2.append(acc2)\n",
"# format with color coded if acc2 > 90\n",
"acc2 = f\"\\033[92m{acc2:.2f}\\033[00m\" if acc2 > 90 else f\"{acc2:.2f}\"\n",
"print(\"Random Forest Accuracy for sensor 2:\", acc2)\n",
"# print(rf_pred)\n",
"# print(y_test)\n",
"\n",
"# 2. Bagged Trees\n",
"bagged_model1 = BaggingClassifier(estimator=DecisionTreeClassifier(), n_estimators=10)\n",
"bagged_model1.fit(x_train1, y_train)\n",
"bagged_pred1 = bagged_model1.predict(x_test1)\n",
"acc1 = accuracy_score(y_test, bagged_pred1) * 100\n",
"accuracies1.append(acc1)\n",
"# format with color coded if acc1 > 90\n",
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
"print(\"Bagged Trees Accuracy for sensor 1:\", acc1)\n",
"bagged_model2 = BaggingClassifier(estimator=DecisionTreeClassifier(), n_estimators=10)\n",
"bagged_model2.fit(x_train2, y_train)\n",
"bagged_pred2 = bagged_model2.predict(x_test2)\n",
"acc2 = accuracy_score(y_test, bagged_pred2) * 100\n",
"accuracies2.append(acc2)\n",
"# format with color coded if acc2 > 90\n",
"acc2 = f\"\\033[92m{acc2:.2f}\\033[00m\" if acc2 > 90 else f\"{acc2:.2f}\"\n",
"print(\"Bagged Trees Accuracy for sensor 2:\", acc2)\n",
"\n",
"# 3. Decision Tree\n",
"dt_model = DecisionTreeClassifier()\n",
"dt_model.fit(x_train1, y_train)\n",
"dt_pred1 = dt_model.predict(x_test1)\n",
"acc1 = accuracy_score(y_test, dt_pred1) * 100\n",
"accuracies1.append(acc1)\n",
"# format with color coded if acc1 > 90\n",
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
"print(\"Decision Tree Accuracy for sensor 1:\", acc1)\n",
"dt_model2 = DecisionTreeClassifier()\n",
"dt_model2.fit(x_train2, y_train)\n",
"dt_pred2 = dt_model2.predict(x_test2)\n",
"acc2 = accuracy_score(y_test, dt_pred2) * 100\n",
"accuracies2.append(acc2)\n",
"# format with color coded if acc2 > 90\n",
"acc2 = f\"\\033[92m{acc2:.2f}\\033[00m\" if acc2 > 90 else f\"{acc2:.2f}\"\n",
"print(\"Decision Tree Accuracy for sensor 2:\", acc2)\n",
"\n",
"# 4. KNeighbors\n",
"knn_model = KNeighborsClassifier()\n",
"knn_model.fit(x_train1, y_train)\n",
"knn_pred1 = knn_model.predict(x_test1)\n",
"acc1 = accuracy_score(y_test, knn_pred1) * 100\n",
"accuracies1.append(acc1)\n",
"# format with color coded if acc1 > 90\n",
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
"print(\"KNeighbors Accuracy for sensor 1:\", acc1)\n",
"knn_model2 = KNeighborsClassifier()\n",
"knn_model2.fit(x_train2, y_train)\n",
"knn_pred2 = knn_model2.predict(x_test2)\n",
"acc2 = accuracy_score(y_test, knn_pred2) * 100\n",
"accuracies2.append(acc2)\n",
"# format with color coded if acc2 > 90\n",
"acc2 = f\"\\033[92m{acc2:.2f}\\033[00m\" if acc2 > 90 else f\"{acc2:.2f}\"\n",
"print(\"KNeighbors Accuracy for sensor 2:\", acc2)\n",
"\n",
"# 5. Linear Discriminant Analysis\n",
"lda_model = LinearDiscriminantAnalysis()\n",
"lda_model.fit(x_train1, y_train)\n",
"lda_pred1 = lda_model.predict(x_test1)\n",
"acc1 = accuracy_score(y_test, lda_pred1) * 100\n",
"accuracies1.append(acc1)\n",
"# format with color coded if acc1 > 90\n",
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
"print(\"Linear Discriminant Analysis Accuracy for sensor 1:\", acc1)\n",
"lda_model2 = LinearDiscriminantAnalysis()\n",
"lda_model2.fit(x_train2, y_train)\n",
"lda_pred2 = lda_model2.predict(x_test2)\n",
"acc2 = accuracy_score(y_test, lda_pred2) * 100\n",
"accuracies2.append(acc2)\n",
"# format with color coded if acc2 > 90\n",
"acc2 = f\"\\033[92m{acc2:.2f}\\033[00m\" if acc2 > 90 else f\"{acc2:.2f}\"\n",
"print(\"Linear Discriminant Analysis Accuracy for sensor 2:\", acc2)\n",
"\n",
"# 6. Support Vector Machine\n",
"svm_model = SVC()\n",
"svm_model.fit(x_train1, y_train)\n",
"svm_pred1 = svm_model.predict(x_test1)\n",
"acc1 = accuracy_score(y_test, svm_pred1) * 100\n",
"accuracies1.append(acc1)\n",
"# format with color coded if acc1 > 90\n",
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
"print(\"Support Vector Machine Accuracy for sensor 1:\", acc1)\n",
"svm_model2 = SVC()\n",
"svm_model2.fit(x_train2, y_train)\n",
"svm_pred2 = svm_model2.predict(x_test2)\n",
"acc2 = accuracy_score(y_test, svm_pred2) * 100\n",
"accuracies2.append(acc2)\n",
"# format with color coded if acc2 > 90\n",
"acc2 = f\"\\033[92m{acc2:.2f}\\033[00m\" if acc2 > 90 else f\"{acc2:.2f}\"\n",
"print(\"Support Vector Machine Accuracy for sensor 2:\", acc2)\n",
"\n",
"# 7. XGBoost\n",
"xgboost_model = XGBClassifier()\n",
"xgboost_model.fit(x_train1, y_train)\n",
"xgboost_pred1 = xgboost_model.predict(x_test1)\n",
"acc1 = accuracy_score(y_test, xgboost_pred1) * 100\n",
"accuracies1.append(acc1)\n",
"# format with color coded if acc1 > 90\n",
"acc1 = f\"\\033[92m{acc1:.2f}\\033[00m\" if acc1 > 90 else f\"{acc1:.2f}\"\n",
"print(\"XGBoost Accuracy:\", acc1)\n",
"xgboost_model2 = XGBClassifier()\n",
"xgboost_model2.fit(x_train2, y_train)\n",
"xgboost_pred2 = xgboost_model2.predict(x_test2)\n",
"acc2 = accuracy_score(y_test, xgboost_pred2) * 100\n",
"accuracies2.append(acc2)\n",
"# format with color coded if acc2 > 90\n",
"acc2 = f\"\\033[92m{acc2:.2f}\\033[00m\" if acc2 > 90 else f\"{acc2:.2f}\"\n",
"print(\"XGBoost Accuracy:\", acc2)"
"def train_and_evaluate_model(model, model_name, sensor_label, x_train, y_train, x_test, y_test):\n",
" model.fit(x_train, y_train)\n",
" y_pred = model.predict(x_test)\n",
" accuracy = accuracy_score(y_test, y_pred) * 100\n",
" return {\n",
" \"model\": model_name,\n",
" \"sensor\": sensor_label,\n",
" \"accuracy\": accuracy\n",
" }"
]
},
{
@@ -758,8 +638,59 @@
"metadata": {},
"outputs": [],
"source": [
"print(accuracies1)\n",
"print(accuracies2)"
"# Define models for sensor1\n",
"models_sensor1 = {\n",
" # \"Random Forest\": RandomForestClassifier(),\n",
" # \"Bagged Trees\": BaggingClassifier(estimator=DecisionTreeClassifier(), n_estimators=10),\n",
" # \"Decision Tree\": DecisionTreeClassifier(),\n",
" # \"KNN\": KNeighborsClassifier(),\n",
" # \"LDA\": LinearDiscriminantAnalysis(),\n",
" \"SVM\": SVC(),\n",
" \"XGBoost\": XGBClassifier()\n",
"}\n",
"\n",
"results_sensor1 = []\n",
"for name, model in models_sensor1.items():\n",
" res = train_and_evaluate_model(model, name, \"sensor1\", x_train1, y_train, x_test1, y_test)\n",
" results_sensor1.append(res)\n",
" print(f\"{name} on sensor1: Accuracy = {res['accuracy']:.2f}%\")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"models_sensor2 = {\n",
" # \"Random Forest\": RandomForestClassifier(),\n",
" # \"Bagged Trees\": BaggingClassifier(estimator=DecisionTreeClassifier(), n_estimators=10),\n",
" # \"Decision Tree\": DecisionTreeClassifier(),\n",
" # \"KNN\": KNeighborsClassifier(),\n",
" # \"LDA\": LinearDiscriminantAnalysis(),\n",
" \"SVM\": SVC(),\n",
" \"XGBoost\": XGBClassifier()\n",
"}\n",
"\n",
"results_sensor2 = []\n",
"for name, model in models_sensor2.items():\n",
" res = train_and_evaluate_model(model, name, \"sensor2\", x_train2, y_train, x_test2, y_test)\n",
" results_sensor2.append(res)\n",
" print(f\"{name} on sensor2: Accuracy = {res['accuracy']:.2f}%\")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"all_results = {\n",
" \"sensor1\": results_sensor1,\n",
" \"sensor2\": results_sensor2\n",
"}\n",
"\n",
"print(all_results)"
]
},
{
@@ -771,36 +702,48 @@
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
"\n",
"models = [rf_model, bagged_model, dt_model, knn_model, lda_model, svm_model, xgboost_model]\n",
"model_names = [\"Random Forest\", \"Bagged Trees\", \"Decision Tree\", \"KNN\", \"LDA\", \"SVM\", \"XGBoost\"]\n",
"def prepare_plot_data(results_dict):\n",
" # Gather unique model names\n",
" models_set = {entry['model'] for sensor in results_dict.values() for entry in sensor}\n",
" models = sorted(list(models_set))\n",
" \n",
"bar_width = 0.35 # Width of each bar\n",
"index = np.arange(len(model_names)) # Index for the bars\n",
" # Create dictionaries mapping sensor -> accuracy list ordered by model name\n",
" sensor_accuracies = {}\n",
" for sensor, entries in results_dict.items():\n",
" # Build a mapping: model -> accuracy for the given sensor\n",
" mapping = {entry['model']: entry['accuracy'] for entry in entries}\n",
" # Order the accuracies consistent with the sorted model names\n",
" sensor_accuracies[sensor] = [mapping.get(model, 0) for model in models]\n",
" \n",
"# Plotting the bar graph\n",
"plt.figure(figsize=(14, 8))\n",
" return models, sensor_accuracies\n",
"\n",
"# Bar plot for Sensor 1\n",
"plt.bar(index, accuracies1, width=bar_width, color='blue', label='Sensor 1')\n",
"def plot_accuracies(models, sensor_accuracies):\n",
" bar_width = 0.35\n",
" x = np.arange(len(models))\n",
" sensors = list(sensor_accuracies.keys())\n",
" \n",
"# Bar plot for Sensor 2\n",
"plt.bar(index + bar_width, accuracies2, width=bar_width, color='orange', label='Sensor 2')\n",
" plt.figure(figsize=(10, 6))\n",
" # Assume two sensors for plotting grouped bars\n",
" plt.bar(x - bar_width/2, sensor_accuracies[sensors[0]], width=bar_width, color='blue', label=sensors[0])\n",
" plt.bar(x + bar_width/2, sensor_accuracies[sensors[1]], width=bar_width, color='orange', label=sensors[1])\n",
" \n",
"# Add values on top of each bar\n",
"for i, acc1, acc2 in zip(index, accuracies1, accuracies2):\n",
" plt.text(i, acc1 + .1, f'{acc1:.2f}%', ha='center', va='bottom', color='black')\n",
" plt.text(i + bar_width, acc2 + 1, f'{acc2:.2f}%', ha='center', va='bottom', color='black')\n",
" # Add text labels on top of bars\n",
" for i, (a1, a2) in enumerate(zip(sensor_accuracies[sensors[0]], sensor_accuracies[sensors[1]])):\n",
" plt.text(x[i] - bar_width/2, a1 + 0.1, f\"{a1:.2f}%\", ha='center', va='bottom', color='black')\n",
" plt.text(x[i] + bar_width/2, a2 + 0.1, f\"{a2:.2f}%\", ha='center', va='bottom', color='black')\n",
" \n",
"# Customize the plot\n",
"plt.xlabel('Model Name →')\n",
"plt.ylabel('Accuracy →')\n",
"plt.title('Accuracy of classifiers for Sensors 1 and 2 with 513 features')\n",
"plt.xticks(index + bar_width / 2, model_names) # Set x-tick positions\n",
" plt.xlabel('Model Name')\n",
" plt.ylabel('Accuracy (%)')\n",
" plt.title('Accuracy of Classifiers for Each Sensor')\n",
" plt.xticks(x, models)\n",
" plt.legend()\n",
"plt.ylim(0, 100)\n",
" plt.ylim(0, 105)\n",
" plt.tight_layout()\n",
" plt.show()\n",
"\n",
"# Show the plot\n",
"plt.show()\n"
"# Use the functions\n",
"models, sensor_accuracies = prepare_plot_data(all_results)\n",
"plot_accuracies(models, sensor_accuracies)\n"
]
},
{

View File

@@ -3,7 +3,7 @@ Alur keseluruhan penelitian ini dilakukan melalui tahapan-tahapan sebagai beriku
\begin{figure}[H]
\centering
\includegraphics[width=0.3\linewidth]{chapters/id/flow.png}
\includegraphics[width=0.3\linewidth]{chapters/img/flow.png}
\caption{Diagram alir tahapan penelitian}
\label{fig:flowchart}
\end{figure}

View File

View File

@@ -0,0 +1,78 @@
% % A new command that enables us to enter bi-lingual (Slovene and English) terms
% % syntax: \addterm[options]{label}{Slovene}{Slovene first use}{English}{Slovene
% % description}
% \newcommand{\addterm}[6][]{
% \newglossaryentry{#2}{
% name={#3 (angl.\ #5)},
% first={#4 (\emph{#5})},
% text={#3},
% sort={#3},
% description={#6},
% #1 % pass additional options to \newglossaryentry
% }
% }
% % A new command that enables us to enter (English) acronyms with bi-lingual
% % (Slovene and English) long versions
% % syntax: \addacronym[options]{label}{abbreviation}{Slovene long}{Slovene first
% % use long}{English long}{Slovene description}
% \newcommand{\addacronym}[7][]{
% % Create the main glossary entry with \newacronym
% % \newacronym[key-val list]{label}{abbrv}{long}
% \newacronym[
% name={#4 (angl.\ #6,\ #3)},
% first={\emph{#5} (angl.\ \emph{#6},\ \emph{#3})},
% sort={#4},
% description={#7},
% #1 % pass additional options to \newglossaryentry
% ]
% {#2}{#3}{#4}
% % Create a cross-reference from the abbreviation to the main glossary entry by
% % creating an auxiliary glossary entry (note: we set the label of this entry
% % to '<original label>_auxiliary' to avoid clashes)
% \newglossaryentry{#2_auxiliary}{
% name={#3},
% sort={#3},
% description={\makefirstuc{#6}},
% see=[See:]{#2}
% }
% }
% % Change the text of the cross-reference links to the Slovene long version.
% \renewcommand*{\glsseeitemformat}[1]{\emph{\acrlong{#1}}.}
% Define the Indonesian term and link it to the English term
\newglossaryentry{jaringansaraf}{
name=Jaringan Saraf,
description={The Indonesian term for \gls{nn}}
}
% \newglossaryentry{pemelajaranmesin}{
% name=Pemelajaran Mesin,
% description={Lihat \gls{machinelearning}}
% }
% Define the English term and link it to its acronym
\newglossaryentry{neuralnetwork}{
name=Neural Network,
description={A computational model inspired by the human brain, see \gls{nn}}
}
% \newglossaryentry{machinelearning}{
% name=Machine Learning,
% description={A program or system that trains a model from input data. The trained model can make useful predictions from new (never-before-seen) data drawn from the same distribution as the one used to train the model.}}
% \newglossaryentry{pemelajaranmesin}{
% name={pemelajaran mesin (angl.\ #5)},
% first={pemelajaran mesin (\emph{machine learning})},
% text={pemelajaran mesin},
% sort={ },
% description={#6},
% #1 % pass additional options to \newglossaryentry
% }
\longnewglossaryentry{machinelearning}{name={machine learning}}
{A program or system that trains a model from input data. The trained model can make useful predictions from new (never-before-seen) data drawn from the same distribution as the one used to train the model.}
\newterm[see={machinelearning}]{pemelajaranmesin}
% \newglossaryentry{pemelajaran mesin}{}
% \addterm{machinelearning}{pemelajaran mesin}{pemelajaran mesin}{machine learning}{A program or system that trains a model from input data. The trained model can make useful predictions from new (never-before-seen) data drawn from the same distribution as the one used to train the model.}
\newacronym
[description={statistical pattern recognition technique}]
{svm}{SVM}{support vector machine}

View File

@@ -1,14 +1,18 @@
\documentclass[draftmark]{thesis}
% Title Information
\setthesisinfo
{Prediksi Lokasi Kerusakan dengan Machine Learning}
{Rifqi Damar Panuluh}
{20210110224}
{PROGRAM STUDI TEKNIK SIPIL}
{FAKULTAS TEKNIK}
{UNIVERSITAS MUHAMMADIYAH YOGYAKARTA}
{2025}
% Metadata
\title{Prediksi Lokasi Kerusakan dengan Machine Learning}
\author{Rifqi Damar Panuluh}
\date{\today}
\authorid{20210110224}
\firstadvisor{Ir. Muhammad Ibnu Syamsi, Ph.D.}
\secondadvisor{}
\headdepartement{Puji Harsanto, S.T., M.T., Ph.D.}
\headdepartementid{19740607201404123064}
\faculty{Fakultas Teknik}
\program{Program Studi Teknik Sipil}
\university{Universitas Muhammadiyah Yogyakarta}
\yearofsubmission{2025}
% Input preamble
\input{preamble/packages}
@@ -16,22 +20,19 @@
\input{preamble/macros}
\begin{document}
\maketitle
% \input{frontmatter/maketitle}
% \input{frontmatter/maketitle_secondary}
\frontmatter
\input{frontmatter/approval}\clearpage
\input{frontmatter/originality}\clearpage
\input{frontmatter/acknowledgement}\clearpage
\tableofcontents
% \input{frontmatter/approval}\clearpage
% \input{frontmatter/originality}\clearpage
% \input{frontmatter/acknowledgement}\clearpage
% \tableofcontents
\clearpage
\mainmatter
\pagestyle{fancyplain}
% Include content
\include{content/abstract}
\include{content/introduction}
\include{chapters/01_introduction}
\include{content/chapter2}
\include{content/conclusion}
\include{chapters/id/02_literature_review/index}
\include{chapters/id/03_methodology/index}
% Bibliography
% \bibliographystyle{IEEEtran}

View File

@@ -1,11 +0,0 @@
\newcommand{\studentname}{Rifqi Damar Panuluh}
\newcommand{\studentid}{20210110224}
\newcommand{\thesistitle}{Prediksi Lokasi Kerusakan dengan Machine Learning}
\newcommand{\firstadvisor}{Ir. Muhammad Ibnu Syamsi, Ph.D.}
\newcommand{\secondadvisor}{}
\newcommand{\headdepartement}{Puji Harsanto, S.T. M.T., Ph.D.}
\newcommand{\headdepartementid}{19740607201404123064}
\newcommand{\faculty}{Fakultas Teknik}
\newcommand{\program}{Teknik Sipil}
\newcommand{\university}{Universitas Muhammadiyah Yogyakarta}
\newcommand{\yearofsubmission}{2025}

View File

@@ -1,7 +1,7 @@
\NeedsTeXFormat{LaTeX2e}
\ProvidesClass{thesis}[2025/05/10 Bachelor Thesis Class]
\newif\if@draftmark
\newif\if@draftmark \@draftmarkfalse
\@draftmarkfalse
\DeclareOption{draftmark}{\@draftmarktrue}
@@ -12,6 +12,7 @@
\RequirePackage{polyglossia}
\RequirePackage{fontspec}
\RequirePackage{titlesec}
\RequirePackage{titling}
\RequirePackage{fancyhdr}
\RequirePackage{geometry}
\RequirePackage{setspace}
@@ -24,30 +25,31 @@
\RequirePackage{svg} % Allows including SVG images directly
\RequirePackage{indentfirst} % Makes first paragraph after headings indented
\RequirePackage{float} % Provides [H] option to force figure/table placement
\RequirePackage[style=apa, backend=biber]{biblatex}
\RequirePackage[acronym, nogroupskip, toc]{glossaries}
% Polyglossia set language
+ \setdefaultlanguage[variant=indonesian]{malay} % Proper Indonesian language setup
+ \setotherlanguage{english} % Enables English as secondary language
+ \DefineBibliographyStrings{english}{% % Customizes bibliography text
+ andothers={dkk\adddot}, % Changes "et al." to "dkk."
+ pages={hlm\adddot}, % Changes "pp." to "hlm."
+ }
\setdefaultlanguage[variant=indonesian]{malay} % Proper Indonesian language setup
\setotherlanguage{english} % Enables English as secondary language
\DefineBibliographyStrings{english}{% % Customizes bibliography text
andothers={dkk\adddot}, % Changes "et al." to "dkk."
pages={hlm\adddot}, % Changes "pp." to "hlm."
}
% Conditionally load the watermark package and settings
\if@draftmark
\RequirePackage{draftwatermark}
\SetWatermarkText{nuluh/thesis (wip) draft: \today}
\SetWatermarkText{nuluh/thesis (wip) [draft: \today]}
\SetWatermarkColor[gray]{0.8} % Opacity: 0.8 = 20% transparent
\SetWatermarkFontSize{1.5cm}
\SetWatermarkAngle{90}
\SetWatermarkHorCenter{1.5cm}
\RequirePackage[left]{lineno}
\linenumbers
\fi
% Page layout
\geometry{left=3cm, top=3cm, right=3cm, bottom=3cm}
\geometry{left=4cm, top=3cm, right=3cm, bottom=3cm}
\setlength{\parskip}{0.5em}
\setlength{\parindent}{0pt}
\onehalfspacing
% Fonts
@@ -56,19 +58,45 @@
\setsansfont{Arial}
\setmonofont{Courier New}
% Metadata commands
\input{metadata}
\newcommand{\setthesisinfo}[7]{%
\renewcommand{\thesistitle}{#1}%
\renewcommand{\studentname}{#2}%
\renewcommand{\studentid}{#3}%
\renewcommand{\program}{#4}%
\renewcommand{\faculty}{#5}%
\renewcommand{\university}{#6}%
\renewcommand{\yearofsubmission}{#7}%
\makeatletter
% Extracting the Year from \today
\newcommand{\theyear}{%
\expandafter\@car\expandafter\@gobble\the\year\@nil
}
% Declare internal macros as initially empty
\newcommand{\@authorid}{}
\newcommand{\@firstadvisor}{}
\newcommand{\@secondadvisor}{}
\newcommand{\@headdepartement}{}
\newcommand{\@headdepartementid}{}
\newcommand{\@faculty}{}
\newcommand{\@program}{}
\newcommand{\@university}{}
\newcommand{\@yearofsubmission}{}
% Define user commands to set these values.
\newcommand{\authorid}[1]{\gdef\@authorid{#1}}
\newcommand{\firstadvisor}[1]{\gdef\@firstadvisor{#1}}
\newcommand{\secondadvisor}[1]{\gdef\@secondadvisor{#1}}
\newcommand{\headdepartement}[1]{\gdef\@headdepartement{#1}}
\newcommand{\headdepartementid}[1]{\gdef\@headdepartementid{#1}}
\newcommand{\faculty}[1]{\gdef\@faculty{#1}}
\newcommand{\program}[1]{\gdef\@program{#1}}
\newcommand{\university}[1]{\gdef\@university{#1}}
\newcommand{\yearofsubmission}[1]{\gdef\@yearofsubmission{#1}}
% Now expose robust the getters to access the values
\newcommand{\theauthorid}{\@authorid}
\newcommand{\thefirstadvisor}{\@firstadvisor}
\newcommand{\thesecondadvisor}{\@secondadvisor}
\newcommand{\theheaddepartement}{\@headdepartement}
\newcommand{\theheaddepartementid}{\@headdepartementid}
\newcommand{\thefaculty}{\@faculty}
\newcommand{\theprogram}{\@program}
\newcommand{\theuniversity}{\@university}
\newcommand{\theyearofsubmission}{\@yearofsubmission}
\makeatother
% % Header and footer
\fancypagestyle{fancy}{%
\fancyhf{}
@@ -110,11 +138,6 @@
\renewcommand{\cftchappresnum}{BAB~}
\renewcommand{\cftchapaftersnum}{\quad}
% \titlespacing*{\chapter}{0pt}{-10pt}{20pt}
% Redefine \maketitle
\renewcommand{\maketitle}{\input{frontmatter/maketitle}}
% Chapter & Section format
\renewcommand{\cftchapfont}{\normalsize\MakeUppercase}
% \renewcommand{\cftsecfont}{}
@@ -136,11 +159,15 @@
\setlength{\cftsubsecnumwidth}{2.5em}
\setlength{\cftfignumwidth}{5em}
\setlength{\cfttabnumwidth}{4em}
\renewcommand \cftchapdotsep{1} % Denser dots (closer together) https://tex.stackexchange.com/a/273764
\renewcommand \cftsecdotsep{1} % Apply to sections too
\renewcommand \cftsubsecdotsep{1} % Apply to subsections too
\renewcommand \cftchapdotsep{1} % https://tex.stackexchange.com/a/273764
\renewcommand \cftsecdotsep{1} % https://tex.stackexchange.com/a/273764
\renewcommand \cftsubsecdotsep{1} % https://tex.stackexchange.com/a/273764
\renewcommand \cftfigdotsep{1.5} % https://tex.stackexchange.com/a/273764
\renewcommand \cfttabdotsep{1.5} % https://tex.stackexchange.com/a/273764
\renewcommand{\cftchapleader}{\normalfont\cftdotfill{\cftsecdotsep}}
\renewcommand{\cftchappagefont}{\normalfont}
% Add Prefix in the Lof and LoT entries
\renewcommand{\cftfigpresnum}{\figurename~}
\renewcommand{\cfttabpresnum}{\tablename~}
@@ -165,6 +192,147 @@
% \renewcommand{\cfttoctitlefont}{\bfseries\MakeUppercase}
% \renewcommand{\cftaftertoctitle}{\vskip 2em}
% Defines a new glossary called notation
\newglossary[nlg]{notation}{not}{ntn}{Notation}
% Define the header for the location column
\providecommand*{\locationname}{Location}
% Define the new glossary style called 'mylistalt' for main glossaries
\makeatletter
\newglossarystyle{mylistalt}{%
% start the list, initializing glossaries internals
\renewenvironment{theglossary}%
{\glslistinit\begin{enumerate}}%
{\end{enumerate}}%
% suppress all headers/groupskips
\renewcommand*{\glossaryheader}{}%
\renewcommand*{\glsgroupheading}[1]{}%
\renewcommand*{\glsgroupskip}{}%
% main entries: let \item produce "1." etc., then break
\renewcommand*{\glossentry}[2]{%
\item \glstarget{##1}{\glossentryname{##1}}%
\mbox{}\\
\glossentrydesc{##1}\space
[##2] % appears on page x
}%
% sub-entries as separate paragraphs, still aligned
\renewcommand*{\subglossentry}[3]{%
\par
\glssubentryitem{##2}%
\glstarget{##2}{\strut}\space
\glossentrydesc{##2}\space ##3%
}%
}
% Define the new glossary style 'altlong3customheader' for notation
\newglossarystyle{altlong3customheader}{%
% The glossary will be a longtable environment with three columns:
% 1. Symbol (left-aligned)
% 2. Description (paragraph, width \glsdescwidth)
% 3. Location (paragraph, width \glspagelistwidth)
\renewenvironment{theglossary}%
{\begin{longtable}{lp{\glsdescwidth}p{\glspagelistwidth}}}%
{\end{longtable}}%
% Define the table header row
\renewcommand*{\symbolname}{Simbol}
\renewcommand*{\descriptionname}{Keterangan}
\renewcommand*{\locationname}{Halaman}
\renewcommand*{\glossaryheader}{%
\bfseries\symbolname & \bfseries\descriptionname & \bfseries\locationname \tabularnewline\endhead}%
% Suppress group headings (e.g., A, B, C...)
\renewcommand*{\glsgroupheading}[1]{}%
% Define how a main glossary entry is displayed
% ##1 is the entry label
% ##2 is the location list (page numbers)
\renewcommand{\glossentry}[2]{%
\glsentryitem{##1}% Inserts entry number if entrycounter option is used
\glstarget{##1}{\glossentryname{##1}} & % Column 1: Symbol (with hyperlink target)
\glossentrydesc{##1}\glspostdescription & % Column 2: Description (with post-description punctuation)
##2\tabularnewline % Column 3: Location list
}%
% Define how a sub-entry is displayed
% ##1 is the sub-entry level (e.g., 1 for first sub-level)
% ##2 is the entry label
% ##3 is the location list
\renewcommand{\subglossentry}[3]{%
& % Column 1 (Symbol) is left blank for sub-entries to create an indented look
\glssubentryitem{##2}% Inserts sub-entry number if subentrycounter is used
\glstarget{##2}{\strut}\glossentrydesc{##2}\glspostdescription & % Column 2: Description (target on strut for hyperlink)
##3\tabularnewline % Column 3: Location list
}%
% Define the skip between letter groups (if group headings were enabled)
% For 3 columns, we need 2 ampersands for a full blank row if not using \multicolumn
\ifglsnogroupskip
\renewcommand*{\glsgroupskip}{}%
\else
\renewcommand*{\glsgroupskip}{& & \tabularnewline}%
\fi
}
% Define a new style 'supercol' based on 'super' for acronyms glossaries
\newglossarystyle{supercol}{%
\setglossarystyle{super}% inherit everything from the original
% override just the main-entry format:
\renewcommand*{\glossentry}[2]{%
\glsentryitem{##1}%
\glstarget{##1}{\glossentryname{##1}}\space % <-- added colon here
&: \glossentrydesc{##1}\glspostdescription\space ##2\tabularnewline
}%
% likewise for subentries, if you want a colon there too:
\renewcommand*{\subglossentry}[3]{%
&:
\glssubentryitem{##2}%
\glstarget{##2}{\strut}\glossentryname{##2}\space % <-- and here
\glossentrydesc{##2}\glspostdescription\space ##3\tabularnewline
}%
}
\makeatother
% A new command that enables us to enter bi-lingual (Bahasa Indonesia and English) terms
% syntax: \addterm[options]{label}{Bahasa Indonesia}{Bahasa Indonesia first use}{English}{Bahasa Indonesia
% description}
\newcommand{\addterm}[6][]{
\newglossaryentry{#2}{
name={#3 (angl.\ #5)},
first={#4 (\emph{#5})},
text={#3},
sort={#3},
description={#6},
#1 % pass additional options to \newglossaryentry
}
}
% A new command that enables us to enter (English) acronyms with bi-lingual
% (Bahasa Indonesia and English) long versions
% syntax: \addacronym[options]{label}{abbreviation}{Bahasa Indonesia long}{Bahasa Indonesia first
% use long}{English long}{Bahasa Indonesia description}
\newcommand{\addacronym}[7][]{
% Create the main glossary entry with \newacronym
% \newacronym[key-val list]{label}{abbrv}{long}
\newacronym[
name={#4 (angl.\ #6,\ #3)},
first={\emph{#5} (angl.\ \emph{#6},\ \emph{#3})},
sort={#4},
description={#7},
#1 % pass additional options to \newglossaryentry
]
{#2}{#3}{#4}
% Create a cross-reference from the abbreviation to the main glossary entry by
% creating an auxiliary glossary entry (note: we set the label of this entry
% to '<original label>_auxiliary' to avoid clashes)
\newglossaryentry{#2_auxiliary}{
name={#3},
sort={#3},
description={\makefirstuc{#6}},
see=[See:]{#2}
}
}
% Change the text of the cross-reference links to the Bahasa Indonesia long version.
\renewcommand*{\glsseeitemformat}[1]{\emph{\acrlong{#1}}.}
% % Apply a custom fancyhdr layout only on the first page of each \chapter, and use no header/footer elsewhere
% % \let\oldchapter\chapter
% % \renewcommand{\chapter}{%