commit 5d271ec87e270e389ae0f947d94daa558a777c7b Author: Flatlogic Bot Date: Wed May 6 17:53:23 2026 +0000 Initial import diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..1950276 --- /dev/null +++ b/.gitignore @@ -0,0 +1,19 @@ +# Python +__pycache__/ +*.pyc +*.pyo +*.egg-info/ +dist/ +build/ + +# PyInstaller +*.spec + +# IDE stuff +.vscode/ +.idea/ + +# OS junk +.DS_Store +Thumbs.db +/.venv-linux-build diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..2d02eba --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2026 TheBagelOfMan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..f3cef9b --- /dev/null +++ b/README.md @@ -0,0 +1,21 @@ +# Daniel + +A real-time rice difficulty calculator for 4k osu!mania must which must be run alongside [tosu](https://tosu.app). + +**[Website](https://thebagelofman.github.io/Daniel/)** · **[Download](https://github.com/TheBagelOfMan/Daniel/releases/latest)** + +## Linux build + +Use `build_linux.sh` to create a Linux binary: + +```bash +./build_linux.sh +``` + +This script installs dependencies using python venv and outputs `dist/Daniel-linux`. If `src/msd` is missing, set `MSD_BIN_PATH` at runtime to a Linux-compatible `msd` executable. + +On Linux/macOS, it will use Wine if only `src/msd.exe` is available. + +## License + +[MIT](LICENSE) diff --git a/build_linux.sh b/build_linux.sh new file mode 100644 index 0000000..1a28ec8 --- /dev/null +++ b/build_linux.sh @@ -0,0 +1,65 @@ +#!/usr/bin/env bash +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +cd "$ROOT_DIR" + +if [[ "$(uname -s)" != "Linux" ]]; then + echo "This script is for Linux only." + exit 1 +fi + +if ! command -v python3 >/dev/null 2>&1; then + echo "python3 is required" + exit 1 +fi + +VENV_DIR=".venv-linux-build" +PYTHON_BIN="$VENV_DIR/bin/python" +PYINSTALLER_BIN="$VENV_DIR/bin/pyinstaller" + +if [[ ! -x "$PYTHON_BIN" ]]; then + rm -rf "$VENV_DIR" + python3 -m venv "$VENV_DIR" +fi + +if [[ ! -x "$PYTHON_BIN" ]]; then + echo "Failed to create virtualenv python at $PYTHON_BIN" + echo "Install the venv package for your distro (example: sudo apt install python3-venv) and rerun." + exit 1 +fi + +"$PYTHON_BIN" -m ensurepip --upgrade >/dev/null 2>&1 || true +"$PYTHON_BIN" -m pip install --upgrade pip +"$PYTHON_BIN" -m pip install pyinstaller numpy pillow websocket-client + +ADD_DATA_ARGS=() + +if [[ -f src/icon.png ]]; then + ADD_DATA_ARGS+=(--add-data "src/icon.png:.") +fi +if [[ -f src/icon.ico ]]; then + ADD_DATA_ARGS+=(--add-data "src/icon.ico:.") +fi +if [[ -f src/msd ]]; then + chmod +x src/msd + ADD_DATA_ARGS+=(--add-data "src/msd:.") +else + echo "Warning: src/msd not found. Build will succeed, but MSD will require MSD_BIN_PATH at runtime." +fi +if [[ -f src/msd.exe ]]; then + ADD_DATA_ARGS+=(--add-data "src/msd.exe:.") +fi + +"$PYINSTALLER_BIN" \ + --noconfirm \ + --clean \ + --onefile \ + --name Daniel-linux \ + --collect-all numpy \ + --collect-all PIL \ + --hidden-import websocket \ + "${ADD_DATA_ARGS[@]}" \ + src/daniel.py + +echo "Build complete: dist/Daniel-linux" diff --git a/docs/benchmark_report.html b/docs/benchmark_report.html new file mode 100644 index 0000000..6f14e63 --- /dev/null +++ b/docs/benchmark_report.html @@ -0,0 +1,1031 @@ + + + + + + +Daniel Benchmark Report + + + + +
+ + + + + +
+

Benchmark Report

+

145 maps · 0 errors · 2026-03-29 09:47

+ +
+
+ 52.4% +
Exact ±0.165
+
Predicted the correct dan level and tier
+
+
+ 37.2% +
Adjacent ±0.495
+
Off by one tier (e.g. Mid predicted as High)
+
+
+ 9.0% +
Within ±1.0
+
Within one full dan level of the expected tier
+
+
+ 1.4% +
Miss >1.0
+
More than one full dan level away from expected tier
+
+
+ 145 +
Total Maps
+
Number of beatmaps processed in this benchmark run
+
+
+ +
+
+ 0.215 +
MAE
+
Mean Absolute Error - average size of all errors ignoring direction
+
+
+ 0.150 +
Median AE
+
Median absolute error
+
+
+ 0.304 +
Std Dev
+
Standard deviation of errors
+
+
+ -0.003 +
Bias
+
Average signed error
+
+
+ +0.900 +
Max Overrate
+
Largest single overestimation (predicted too hard)
+
+
+ -1.380 +
Max Underrate
+
Largest single underestimation (predicted too easy)
+
+
+
+ + +
+

Charts — Overall

+
+ +
+
Accuracy breakdown
+
+
+ +
+
Per-dan accuracy % (stacked)
+
+
+ +
+
Δ numeric vs difficulty
+
+
+ +
+
Expected vs Predicted
+
+
+ +
+
Cumulative accuracy curve
+
+
+ +
+
Per-dan bias & MAE
+
+
+ +
+
+ + +
+

Charts — By Skillset

+
+ +
+
Per-skillset accuracy % (stacked)
+
+
+ +
+
Per-skillset bias & MAE
+
+
+ +
+
Scatter — Jack
+
+
+ +
+
Scatter — Tech
+
+
+ +
+
Scatter — Speed
+
+
+ +
+
Scatter — Stamina
+
+
+ +
+
+ + +
+

Per-Dan Breakdown

+
+ + + + + + + + + + + + + + +
DanNExact%Adjacent%Within%Miss%MAEBias
Alpha1070.0%20.0%10.0%0.0%0.185+0.091
Beta1241.7%50.0%8.3%0.0%0.264-0.104
Gamma2544.0%36.0%20.0%0.0%0.250+0.013
Delta3568.6%31.4%0.0%0.0%0.133-0.029
Epsilon3351.5%39.4%9.1%0.0%0.201+0.034
Zeta2231.8%45.5%13.6%9.1%0.333-0.039
Eta862.5%37.5%0.0%0.0%0.164+0.046
+
+
+ + +
+

Per-Skillset Breakdown

+
+ + + + + + + + + + + +
SkillsetNExact%Adjacent%Within%Miss%MAEBias
Jack6163.9%23.0%9.8%3.3%0.207-0.021
Tech2748.1%48.1%3.7%0.0%0.187+0.007
Speed3135.5%45.2%19.4%0.0%0.282+0.098
Stamina2650.0%50.0%0.0%0.0%0.182-0.092
+
+
+ + +
+

Map Results

+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Song ▾ExpectedGotSkillsetSRNumericΔFlag
Dark SambalandAlpha MidAlpha Midjack6.59211.58+0.08
Air (0.915x)Alpha MidBeta Midspeed6.93712.40+0.90
MakibaAlpha MidAlpha Midspeed6.56811.52+0.02
Babel (1.3x)Alpha MidAlpha Midstamina6.51411.38-0.12
Lazorbeamz (1.0x)Alpha MidAlpha Midstamina6.54211.45-0.05
Odoru (1.1x)Alpha MidAlpha Midtech6.55111.47-0.03
Yasashisa No RiyuuAlpha Mid/HighBeta Lowspeed6.78212.05+0.38~
Yell (0.975x)Alpha HighAlpha Highspeed6.68911.82-0.01
Angel'S SaladAlpha HighAlpha Midtech6.61511.64-0.19~
Bismuth NebulaAlpha HighAlpha Hightech6.66311.76-0.07
Paradigm ShiftBeta MidBeta Midjack6.93712.40-0.10
5Oul On D!SplayBeta MidBeta Lowspeed6.86112.23-0.27~
Amber StarlightBeta MidAlpha Highspeed6.72211.90-0.60
Hideho (0.875x)Beta MidBeta Midspeed6.94612.41-0.09
Uso No Hibana (0.9x)Beta MidBeta Lowspeed6.89712.31-0.19~
Cycle Hit (1.4x)Beta MidBeta Highstamina7.18612.95+0.45~
Time To Say Goodbye (1.0x)Beta MidBeta Midstamina7.04212.63+0.13
Blue PlanetBeta MidBeta Hightech7.12612.82+0.32~
Psychology (1.15x)Beta MidBeta Lowtech6.78612.06-0.44~
Pastel Subliminal (0.9x)Beta Mid/HighBeta Lowspeed6.87212.25-0.42~
Crystal World FractureBeta HighBeta Highspeed7.15912.89+0.06
Observation (1.2x)Beta HighBeta Highstamina7.08912.73-0.10
Hiasobi (1.1x)Gamma LowGamma Lowjack7.27913.14-0.03
Reflect (1.05x)Gamma LowGamma Lowjack7.21213.01-0.16
HustlerGamma LowGamma Lowjack7.27513.14-0.03
Anomaly (1.2x)Gamma LowBeta Highstamina7.18112.94-0.23~
TornadoGamma LowBeta Midtech6.99912.53-0.64
SunnyshinyringGamma Low/MidGamma Highspeed7.61513.83+0.50
Break (1.1x)Gamma MidGamma Highjack7.53913.67+0.17~
Captain Jack (1.2x)Gamma MidBeta Highjack7.11512.79-0.71
Do It To It (1.2x)Gamma MidGamma Midjack7.39413.38-0.12
Everything BlackGamma MidGamma Highjack7.60013.80+0.30~
Credens (0.9x)Gamma MidDelta Lowspeed7.76114.08+0.58
P8107 (1.4x)Gamma MidGamma Midspeed7.53513.67+0.17~
Reflec StreamzGamma MidGamma Highspeed7.56213.72+0.22~
Cyber Inductance (1.4x)Gamma MidGamma Midstamina7.50413.60+0.10
Phosphor (1.05x)Gamma MidGamma Lowstamina7.23913.06-0.44~
Shinbatsu O Tadori (1.05x)Gamma MidGamma Highstamina7.64913.90+0.40~
Icicle Stinger (1.1x)Gamma MidGamma Hightech7.55313.70+0.20~
Matusa Bomber (1.05x)Gamma MidGamma Midtech7.49413.58+0.08
ChildhoodfriendGamma Mid/HighDelta Lowspeed7.87214.21+0.54
Pureruby (1.08x)Gamma Mid/HighGamma Highspeed7.54813.69+0.02
Amen Katagiri Generation (1.05x)Gamma HighGamma Midstamina7.52813.65-0.18~
Livestream (1.2x)Gamma HighGamma Highstamina7.55013.70-0.13
Nuclear Star (1.3x)Gamma HighGamma Highstamina7.53813.67-0.16
NhelvGamma HighGamma Hightech7.60713.81-0.02
Playing With Ruby (1.2x)Gamma HighGamma Hightech7.56013.72-0.11
Dat PhonkDelta LowDelta Lowjack7.96214.32+0.15
Nijiiro Kekkai Signicial,Delta LowDelta Lowjack7.93014.28+0.11
Australia (1.3x)Delta LowDelta Lowspeed7.77514.09-0.08
White Hair Little Swords GirlDelta LowDelta Lowspeed7.95914.32+0.15
Fractal VertexDelta LowDelta Midtech7.97414.34+0.17~
KimiyoDelta LowDelta Lowtech7.89214.24+0.07
Shadows Of Cats (1.2x)Delta LowDelta Lowtech7.79514.12-0.05
Still Into You (1.2x)Delta LowDelta Lowtech7.95814.32+0.15
Nijuu (1.16x)Delta Low/MidDelta Midspeed7.97514.34+0.01
Drop It Like It'S HotDelta MidDelta Midjack8.05914.44-0.06
Aquaris (1.3x)Delta MidDelta Lowjack7.81014.14-0.36~
Cant Give It Up (1.1x)Delta MidDelta Midjack8.10714.50+0.00
Give It To DemDelta MidDelta Highjack8.26414.69+0.19~
Sakazuki (1.1x)Delta MidDelta Midjack8.14614.55+0.05
BookmakerDelta MidDelta Midjack8.03114.41-0.09
Darling DanceDelta MidDelta Midjack8.13214.53+0.03
Enough (1.1x)Delta MidDelta Midjack8.19614.61+0.11
Land Of Fire (1.1x)Delta MidDelta Midjack7.99314.36-0.14
The Island Of AlbatrossDelta MidDelta Midjack8.18014.59+0.09
VolcanicDelta MidDelta Lowspeed7.75614.07-0.43~
Excuse My Rudeness (1.05x)Delta MidDelta Lowstamina7.95214.31-0.19~
Future Dominators (1.3x)Delta MidDelta Midstamina8.05714.44-0.06
Pacific Girls (1.4x)Delta MidDelta Lowstamina7.96014.32-0.18~
The Fool (1.3x)Delta MidDelta Midstamina8.02014.39-0.11
Villain Virus (1.05x)Delta MidDelta Lowstamina7.94714.30-0.20~
Walk This Way (1.1x)Delta MidDelta Lowstamina7.87914.22-0.28~
Crescent Moon Island (1.05x)Delta MidDelta Midtech8.17314.58+0.08
Gottasadae (1.25x)Delta MidDelta Midtech8.15214.55+0.05
Thinkofyou (1.15x)Delta Mid/HighDelta Highspeed8.51114.99+0.32~
Heart Chrome (1.3x)Delta HighDelta Highjack8.38414.84+0.01
Please ChiDelta HighDelta Highjack8.32614.77-0.06
ApoplexyDelta HighDelta Highspeed8.26814.70-0.13
K.Y.A.F.A (1.25x)Delta HighDelta Midstamina8.19514.61-0.22~
Bismuth SupernovaDelta HighDelta Midtech8.22414.64-0.19~
Dimension WarsDelta HighDelta Hightech8.43814.90+0.07
Ghost NovaEpsilon LowEpsilon Lowjack8.68915.22+0.05
UnmeironEpsilon LowEpsilon Lowjack8.61815.13-0.04
Paradigmshift (1.15x)Epsilon LowEpsilon Lowspeed8.72515.27+0.10
Quadraphinix (1.6x)Epsilon LowEpsilon Lowstamina8.63215.15-0.02
Bismuth SingularityEpsilon LowEpsilon Midtech8.78515.35+0.18~
Looking For Edge Of Ground (1.15x)Epsilon LowDelta Hightech8.42414.89-0.28~
The Poetic EddaEpsilon LowEpsilon Midtech8.89615.49+0.32~
Cyber Inductance (1.1x)Epsilon Low/MidEpsilon Midspeed8.90615.51+0.18~
Darling Dance (1.1x)Epsilon MidEpsilon Midjack8.95315.57+0.07
GoodtekEpsilon MidEpsilon Lowjack8.52015.00-0.50
Rose Quartz (1.3x)Epsilon MidEpsilon Lowjack8.76715.33-0.17~
Satellite (1.1x)Epsilon MidEpsilon Midjack8.87115.46-0.04
Bad CycleEpsilon MidEpsilon Midjack8.81215.39-0.11
Break (1.3x)Epsilon MidEpsilon Highjack9.05515.70+0.20~
Gengaozo Noize Of NocentEpsilon MidEpsilon Lowjack8.74115.29-0.21~
I Love It (1.3x)Epsilon MidEpsilon Midjack8.99915.63+0.13
Pam (1.3x)Epsilon MidEpsilon Highjack9.22115.92+0.42~
Psychology (1.1x)Epsilon MidEpsilon Midjack8.95915.58+0.08
Rigid Paradise (1.05x)Epsilon MidEpsilon Midjack8.97715.60+0.10
Sakazuki (1.2x)Epsilon MidEpsilon Midjack8.93715.55+0.05
StrongerEpsilon MidDelta Highjack8.43114.89-0.61
Trouble Kuroneko (1.5x)Epsilon MidEpsilon Midjack8.88115.47-0.03
Full ThrottleEpsilon MidZeta Midspeed9.47416.36+0.86
Mario PaintEpsilon MidEpsilon Highspeed9.15815.84+0.34~
The Empress (1.15x)Epsilon MidEpsilon Midstamina8.88115.47-0.03
The Fate Of Remix (1.1x)Epsilon MidEpsilon Midstamina8.96715.59+0.09
The Hypocrisy (1.2x)Epsilon MidEpsilon Lowstamina8.70315.24-0.26~
Yuudachi No Ribbon (1.2x)Epsilon MidEpsilon Midstamina8.79515.36-0.14
Forgotten (1.4x)Epsilon MidEpsilon Hightech9.21815.91+0.41~
Celestial ExploringEpsilon HighEpsilon Highjack9.10815.77-0.06
Quevedo (1.3x)Epsilon HighEpsilon Highjack9.19315.88+0.05
Sentimental Crisis (1.55x)Epsilon HighEpsilon Midspeed8.95815.58-0.25~
Dancer In The Smoke (1.3x)Epsilon HighZeta Lowtech9.32416.08+0.25~
Angel DustZeta LowZeta Lowjack9.35216.13-0.04
I Love It (1.4x)Zeta MidZeta Midjack9.60916.61+0.11
MutwaZeta MidZeta Highjack9.76716.91+0.41~
Observation (0.95x)Zeta MidEpsilon Highjack9.16315.84-0.66
Promise (1.22x)Zeta MidEta Lowjack9.82917.02+0.52
The Island Of Albatross (1.2x)Zeta MidEta Lowjack9.84917.05+0.55
Truth Never Spoken (1.3x)Zeta MidEpsilon Midjack8.85515.44-1.06
Vertex BetaZeta MidEpsilon Lowjack8.61115.12-1.38
Cg901B (1.3x)Zeta MidZeta Highjack9.64816.68+0.18~
Dark Sambaland Omega (1.2x)Zeta MidZeta Midjack9.51516.43-0.07
Exile (1.1x)Zeta MidZeta Highjack9.64616.68+0.18~
]-[34#!Zeta MidZeta Lowspeed9.42416.26-0.24~
FinixeZeta MidZeta Highspeed9.65416.69+0.19~
Nijuu (1.0x)Zeta MidZeta Midstamina9.46216.33-0.17~
Apocalyptic Premonition (1.05x)Zeta MidZeta Midtech9.56716.53+0.03
Weird Autumn (1.4x)Zeta MidZeta Lowtech9.33216.09-0.41~
Twilightofthegods (1.3x)Zeta Mid/HighEta Lowspeed9.83417.03+0.36~
Anhedonia GeminiZeta HighZeta Highjack9.79316.95+0.12
Darling Dance (1.2x)Zeta HighZeta Highjack9.69316.77-0.06
Day By DayZeta HighZeta Highjack9.75616.88+0.05
Decoy OmegaZeta HighEta Lowjack9.87617.09+0.26~
Melody Blooming In The Earth (1.5x)Zeta HighEta Lowjack9.88117.10+0.27~
Deep Down (1.35x)Eta LowEta Lowjack10.03417.33+0.16
Sendan Life Frawog (1.5x)Eta LowEta Lowjack9.97417.24+0.07
Code Name BravoEta LowEta Lowtech9.98017.25+0.08
Torment PaybackEta MidEta Highjack10.38217.86+0.36~
Intersect ThunderboltEta MidEta Midspeed10.05317.36-0.14
Collapse Of EgoEta MidEta Lowstamina9.95917.22-0.28~
ExitiumEta MidEta Hightech10.25517.67+0.17~
Rip Me ApartEta HighEta Highjack10.33117.78-0.05
+
+
+ + + + +
+

Daniel Benchmark Report

+

2026-03-29 09:47

+
+ +
+ + + + \ No newline at end of file diff --git a/docs/images/compact.png b/docs/images/compact.png new file mode 100644 index 0000000..39fe9e3 Binary files /dev/null and b/docs/images/compact.png differ diff --git a/docs/images/graph.png b/docs/images/graph.png new file mode 100644 index 0000000..d6de91d Binary files /dev/null and b/docs/images/graph.png differ diff --git a/docs/images/hero-screenshot.png b/docs/images/hero-screenshot.png new file mode 100644 index 0000000..8a7dfd1 Binary files /dev/null and b/docs/images/hero-screenshot.png differ diff --git a/docs/images/icon.ico b/docs/images/icon.ico new file mode 100644 index 0000000..5f02c65 Binary files /dev/null and b/docs/images/icon.ico differ diff --git a/docs/images/statistics.png b/docs/images/statistics.png new file mode 100644 index 0000000..56d1b18 Binary files /dev/null and b/docs/images/statistics.png differ diff --git a/docs/index.html b/docs/index.html new file mode 100644 index 0000000..435595d --- /dev/null +++ b/docs/index.html @@ -0,0 +1,638 @@ + + + + + + + + + + + + + +Daniel + + + +
+ + + + + +
+
+

Daniel

+

+ Difficulty Analysis of Notechart Intensity for Estimated Levels +

+

+ A real-time rice difficulty calculator for 4k osu!mania. Daniel links up to tosu to read + your currently selected beatmap displaying its dan referenced level and its tier (Low/Mid/High) from Alpha through to Theta. It breaks it down further using a live strain graph and individual MSD skillsets. +

+ +
+
+ 52.4% +
Within Exact Tier
+
+
+ 89.6% +
Within Adjacent Tiers
+
+
+ 98.7% +
Within one full dan Level
+
+
+ 145 +
Beatmaps tested
+
+
+
+ + +
+

How it works!

+
+

Real-time Detection

+

Daniel reads map data live in real-time using tosu. This means that whenever you change beatmap Daniel will update instantly!

+
+
+

Modified Sunny Rework Algorithm

+

The numerical dan rating is calculated using a modified version of the Sunny Rework algorithm which is optimised for high-level rice. Long notes and OD are not factored into the calculation. The numerical dan ratings are based of individual maps which leads to marathons usually displaying the 'High' tier within that dan level.

+
+
+

Skillset Detection via MSD

+

Individual skillsets are calculated using MSD (MinaCalc Skill Difficulty). Overall rating is also calculated and jackspeed is used to determine whether a beatmap is primarily vibro based.

+
+
+

Mod Support

+

HT and DT are both supported as the rating adjusts automatically based on the selected mod.

+
+ +

Overlay views

+

You can press Tab to cycle between the overlay views.

+
+
+
+

Compact

+

A minimalistic display showing only the estimated dan plus the numerical rating.

+
+ Compact view +
+
+
+

Statistics

+

The compact view with added MSD skillset calculations for skillsets and overall MSD rating.

+
+ Statistics view +
+
+
+

Graph

+

The statistics view with an added straintime graph that + updates in real-time based on drain time in the beatmap. Red vertical lines indicate pauses in gameplay.

+
+ Graph view +
+
+
+ + +
+

Download

+
+
+

Daniel.exe

+
Loading...
+

Requirements:

+
+ Windows 10 / 11 + osu! / osu!(stable) + tosu +
+
+
+ ↓ Download .exe + ... downloads +
+
+ +
+
+
01
+
+

Install tosu

+

Download and install tosu.

+
+
+
+
02
+
+

Download Daniel

+

Save Daniel.exe anywhere on your system.

+
+
+
+
03
+
+

Launch tosu and Daniel

+

Run both tosu and Daniel alongside your osu! client.

+
+
+
+
04
+
+

Have fun danning with Daniel

+

Boy do I love me some dans.

+
+
+
+
+ + +
+

Changelog

+
+
Loading...
+
+
+ + +
+

FAQ

+ +
+ +

It will refuse to calculate vibro maps altogether and will just list them as 'VIBRO'. To my knowledge this doesn't affect any maps other than just straight vibro.

+
+ +
+ +

All calculations are done when the beatmap is first selected so any performance hits would only be in the song select menu.

+
+ +
+ +

The graph is based on the same modified Sunny Rework algorithm that's used for the numerical dan values. This means that it represents that actual difficulty of that point in the beatmap rather than the density.

+
+ +
+ +

Source code is available here!

+
+ +
+ +

It's bad at quite a few different skillsets in particular. It heavily underrates speedjack (Vertex Beta Zeta) and awkward speed (Volcanic). It overrates in epsilon+ anchorjack and may also overrate some high bpm speed(Finixe Zeta). There are more patterns it struggles with occasionally but these are the most consistent ones.

+
+ +
+ +

No. It used to in testing versions but all LNs are converted to rice for the calculations now.

+
+ +
+ +

No. It used to in testing versions but now all maps are calculated as if they are OD9.

+
+ +
+ +

Maybe but not confirmed.

+
+
+ + +
+

Credits

+
+
Developer
+
+

TheBagelOfMan

+
+
+
+ +
+

[Crz]sunnyxxy, Natelytle, vernonlim, ChlorieHCl, Imperial Wolf

+
+
+
+ +
+

MinaciousGrace

+
+
+
+ +
+

KotRik, Cherry

+
+
+
+ + +
+

Daniel Daniel Daniel Daniel Daniel

+ +
+ +
+ + + + + \ No newline at end of file diff --git a/src/algorithm.py b/src/algorithm.py new file mode 100644 index 0000000..729e424 --- /dev/null +++ b/src/algorithm.py @@ -0,0 +1,506 @@ +import math +from collections import defaultdict + +import numpy as np +import osu_file_parser as osu_parser + +# --- Constants --- + +BREAK_ZERO_THRESHOLD_MS = 400 +GRAPH_RESAMPLE_INTERVAL_MS = 100 +SMOOTH_SIGMA_MS = 800 + + +# --- Helper Functions --- + +def gaussian_filter1d(data, sigma, mode="constant", cval=0.0): + kernel_radius = int(4 * sigma + 0.5) + x = np.arange(-kernel_radius, kernel_radius + 1) + kernel = np.exp(-0.5 * (x / sigma) ** 2) + kernel /= kernel.sum() + if mode == "constant": + padded = np.pad(data, kernel_radius, mode="constant", constant_values=cval) + else: + padded = np.pad(data, kernel_radius, mode=mode) + return np.convolve(padded, kernel, mode="valid") + + +def cumulative_sum(x, f): + """Vectorised exact cumulative integral of piecewise-constant f on sorted x.""" + F = np.zeros(len(x)) + F[1:] = np.cumsum(f[:-1] * np.diff(x)) + return F + + +def smooth_on_corners(x, f, window, scale=1.0, mode="sum"): + """Vectorised sliding-window integral of piecewise-constant f.""" + x = np.asarray(x, dtype=float) + f = np.asarray(f, dtype=float) + F = cumulative_sum(x, f) + + a = np.clip(x - window, x[0], x[-1]) + b = np.clip(x + window, x[0], x[-1]) + + def _query_vec(q_arr): + idx = np.searchsorted(x, q_arr) - 1 + idx = np.clip(idx, 0, len(x) - 2) + return F[idx] + f[idx] * (q_arr - x[idx]) + + val = _query_vec(b) - _query_vec(a) + + if mode == "avg": + span = b - a + return np.where(span > 0, val / span, 0.0) + return scale * val + + +def interp_values(new_x, old_x, old_vals): + return np.interp(new_x, old_x, old_vals) + + +def step_interp(new_x, old_x, old_vals): + indices = np.searchsorted(old_x, new_x, side="right") - 1 + indices = np.clip(indices, 0, len(old_vals) - 1) + return old_vals[indices] + + +def rescale_high(sr): + if sr <= 9: + return sr + return 9 + (sr - 9) / 1.2 + + +# --- Preprocessing --- + +def preprocess_file(file_path, mod): + p_obj = osu_parser.parser(file_path) + p_obj.process() + p = p_obj.get_parsed_data() + + note_seq = [] + for i in range(len(p[1])): + k = p[1][i] + h = p[2][i] + if mod == "DT": + h = int(math.floor(h * 2 / 3)) + elif mod == "HT": + h = int(math.floor(h * 4 / 3)) + note_seq.append((k, h)) + + x = 0.3 * ((64.5 - math.ceil(p[5] * 3)) / 500) ** 0.5 + x = min(x, 0.6 * (x - 0.09) + 0.09) + note_seq.sort(key=lambda tup: (tup[1], tup[0])) + + note_dict = defaultdict(list) + for tup in note_seq: + note_dict[tup[0]].append(tup) + note_seq_by_column = sorted(note_dict.values(), key=lambda lst: lst[0][0]) + + K = p[0] + T = max(n[1] for n in note_seq) + 1 + + return x, K, T, note_seq, note_seq_by_column + + +# --- Corner Computation --- + +def get_corners(T, note_seq): + corners_base = set() + for _, h in note_seq: + corners_base.update([h, h + 501, h - 499, h + 1]) + corners_base.update([0, T]) + corners_base = sorted(s for s in corners_base if 0 <= s <= T) + + corners_A = set() + for _, h in note_seq: + corners_A.update([h, h + 1000, h - 1000]) + corners_A.update([0, T]) + corners_A = sorted(s for s in corners_A if 0 <= s <= T) + + all_corners = sorted(set(corners_base) | set(corners_A)) + return ( + np.array(all_corners, dtype=float), + np.array(corners_base, dtype=float), + np.array(corners_A, dtype=float), + ) + + +# --- Key Usage --- + +def get_key_usage(K, T, note_seq, base_corners): + key_usage = {k: np.zeros(len(base_corners), dtype=bool) for k in range(K)} + for k, h in note_seq: + start = max(h - 150, 0) + end = min(h + 150, T - 1) + li = np.searchsorted(base_corners, start, side="left") + ri = np.searchsorted(base_corners, end, side="left") + key_usage[k][li:ri] = True + return key_usage + + +def get_key_usage_400(K, T, note_seq, base_corners): + key_usage_400 = {k: np.zeros(len(base_corners), dtype=float) for k in range(K)} + for k, h in note_seq: + start = max(h, 0) + li = np.searchsorted(base_corners, start - 400, side="left") + ri = np.searchsorted(base_corners, start + 400, side="left") + mid = np.searchsorted(base_corners, start, side="left") + + key_usage_400[k][mid] += 3.75 + for idx_range in [np.arange(li, mid), np.arange(mid + 1, ri)]: + key_usage_400[k][idx_range] += 3.75 - 3.75 / 400 ** 2 * (base_corners[idx_range] - start) ** 2 + return key_usage_400 + + +# --- Difficulty Components --- + +def compute_anchor(K, key_usage_400, base_corners): + counts = np.stack([key_usage_400[k] for k in range(K)], axis=1) + counts = np.sort(counts, axis=1)[:, ::-1] + + nonzero_mask = counts > 0 + n_nz = nonzero_mask.sum(axis=1) + + c0 = counts[:, :-1] + c1 = counts[:, 1:] + safe_c0 = np.where(c0 > 0, c0, 1.0) + ratio = np.where(c0 > 0, c1 / safe_c0, 0.0) + weight = 1 - 4 * (0.5 - ratio) ** 2 + + pair_valid = nonzero_mask[:, :-1] & nonzero_mask[:, 1:] + walk = np.sum(np.where(pair_valid, c0 * weight, 0.0), axis=1) + max_walk = np.sum(np.where(pair_valid, c0, 0.0), axis=1) + + raw_anchor = np.where(n_nz > 1, walk / np.maximum(max_walk, 1e-9), 0.0) + return 1 + np.minimum(raw_anchor - 0.18, 5 * (raw_anchor - 0.22) ** 3) + + +def compute_Jbar(K, T, x, note_seq_by_column, base_corners): + def jack_nerfer(delta): + return 1 - 7e-5 * (0.15 + np.abs(delta - 0.08)) ** (-4) + + J_ks = {k: np.zeros(len(base_corners)) for k in range(K)} + delta_ks = {k: np.full(len(base_corners), 1e9) for k in range(K)} + + for k in range(K): + notes = note_seq_by_column[k] + if len(notes) < 2: + continue + starts = np.array([n[1] for n in notes[:-1]], dtype=float) + ends = np.array([n[1] for n in notes[1:]], dtype=float) + deltas = 0.001 * (ends - starts) + vals = deltas ** -1 * (deltas + 0.11 * x ** 0.25) ** -1 * jack_nerfer(deltas) + + for start, end, delta, val in zip(starts, ends, deltas, vals): + li = np.searchsorted(base_corners, start, side="left") + ri = np.searchsorted(base_corners, end, side="left") + if ri > li: + J_ks[k][li:ri] = val + delta_ks[k][li:ri] = delta + + Jbar_ks = { + k: smooth_on_corners(base_corners, J_ks[k], window=500, scale=0.001, mode="sum") + for k in range(K) + } + + Jbar_stack = np.stack([Jbar_ks[k] for k in range(K)], axis=0) + delta_stack = np.stack([delta_ks[k] for k in range(K)], axis=0) + weights = 1.0 / delta_stack + num = np.sum(np.maximum(Jbar_stack, 0) ** 5 * weights, axis=0) + den = np.sum(weights, axis=0) + Jbar = (num / np.maximum(den, 1e-9)) ** 0.2 + + return delta_ks, Jbar + + +def compute_Xbar(K, T, x, note_seq_by_column, active_columns, base_corners): + cross_matrix = [ + [-1], + [0.075, 0.075], + [0.125, 0.05, 0.125], + [0.125, 0.125, 0.125, 0.125], + [0.175, 0.25, 0.05, 0.25, 0.175], + [0.175, 0.25, 0.175, 0.175, 0.25, 0.175], + [0.225, 0.35, 0.25, 0.05, 0.25, 0.35, 0.225], + [0.225, 0.35, 0.25, 0.225, 0.225, 0.25, 0.35, 0.225], + [0.275, 0.45, 0.35, 0.25, 0.05, 0.25, 0.35, 0.45, 0.275], + [0.275, 0.45, 0.35, 0.25, 0.275, 0.275, 0.25, 0.35, 0.45, 0.275], + [0.325, 0.55, 0.45, 0.35, 0.25, 0.05, 0.25, 0.35, 0.45, 0.55, 0.325], + ] + cross_coeff = cross_matrix[K] + X_ks = {k: np.zeros(len(base_corners)) for k in range(K + 1)} + fast_cross = {k: np.zeros(len(base_corners)) for k in range(K + 1)} + + for k in range(K + 1): + if k == 0: + notes_in_pair = note_seq_by_column[0] + elif k == K: + notes_in_pair = note_seq_by_column[K - 1] + else: + notes_in_pair = sorted( + note_seq_by_column[k - 1] + note_seq_by_column[k], key=lambda t: t[1] + ) + + for i in range(1, len(notes_in_pair)): + start = notes_in_pair[i - 1][1] + end = notes_in_pair[i][1] + li = np.searchsorted(base_corners, start, side="left") + ri = np.searchsorted(base_corners, end, side="left") + if ri <= li: + continue + + delta = 0.001 * (notes_in_pair[i][1] - notes_in_pair[i - 1][1]) + val = 0.16 * max(x, delta) ** -2 + + left_inactive = (k - 1) not in active_columns[li] and (k - 1) not in active_columns[ri] + right_inactive = k not in active_columns[li] and k not in active_columns[ri] + if left_inactive or right_inactive: + val *= 1 - cross_coeff[k] + + X_ks[k][li:ri] = val + fast_cross[k][li:ri] = max(0, 0.4 * max(delta, 0.06, 0.75 * x) ** -2 - 80) + + X_base = np.array([ + sum(X_ks[k][i] * cross_coeff[k] for k in range(K + 1)) + + sum( + np.sqrt(fast_cross[k][i] * cross_coeff[k] * fast_cross[k + 1][i] * cross_coeff[k + 1]) + for k in range(K) + ) + for i in range(len(base_corners)) + ]) + + return smooth_on_corners(base_corners, X_base, window=500, scale=0.001, mode="sum") + + +def compute_Pbar(K, T, x, note_seq, anchor, base_corners): + def stream_booster(delta): + bpm = np.clip(7.5 / delta, 0, 420) + primary = 0.10 / (1 + np.exp(-0.06 * (bpm - 175))) + secondary = np.where( + (bpm >= 200) & (bpm <= 350), + 0.30 * (1 - np.exp(-0.02 * (bpm - 200))), + 0.0, + ) + return 1 + primary + secondary + + P_step = np.zeros(len(base_corners)) + + for i in range(len(note_seq) - 1): + h_l = note_seq[i][1] + h_r = note_seq[i + 1][1] + delta_time = h_r - h_l + + if delta_time < 1e-9: + spike = 1000 * (0.02 * (4 / x - 24)) ** 0.25 + li = np.searchsorted(base_corners, h_l, side="left") + ri = np.searchsorted(base_corners, h_l, side="right") + if ri > li: + P_step[li:ri] += spike + continue + + li = np.searchsorted(base_corners, h_l, side="left") + ri = np.searchsorted(base_corners, h_r, side="left") + if ri <= li: + continue + + delta = 0.001 * delta_time + b_val = stream_booster(delta) + base_inc = (0.08 * x ** -1 * (1 - 24 * x ** -1 * (x / 6) ** 2)) ** 0.25 + + if delta < 2 * x / 3: + inc = delta ** -1 * (0.08 * x ** -1 * (1 - 24 * x ** -1 * (delta - x / 2) ** 2)) ** 0.25 * max(b_val, 1) + else: + inc = delta ** -1 * base_inc * max(b_val, 1) + + seg_anchor = anchor[li:ri] + P_step[li:ri] += np.minimum(inc * seg_anchor, np.maximum(inc, inc * 2 - 10)) + + return smooth_on_corners(base_corners, P_step, window=500, scale=0.001, mode="sum") + + +def compute_Abar(K, T, x, note_seq_by_column, active_columns, delta_ks, A_corners, base_corners): + dks = {k: np.zeros(len(base_corners)) for k in range(K - 1)} + for i in range(len(base_corners)): + cols = active_columns[i] + for j in range(len(cols) - 1): + k0, k1 = cols[j], cols[j + 1] + dks[k0][i] = abs(delta_ks[k0][i] - delta_ks[k1][i]) + 0.4 * max( + 0, max(delta_ks[k0][i], delta_ks[k1][i]) - 0.11 + ) + + A_step = np.ones(len(A_corners)) + bc_idx = np.clip(np.searchsorted(base_corners, A_corners), 0, len(base_corners) - 1) + + for i in range(len(A_corners)): + idx = bc_idx[i] + cols = active_columns[idx] + for j in range(len(cols) - 1): + k0, k1 = cols[j], cols[j + 1] + d_val = dks[k0][idx] + dk0, dk1 = delta_ks[k0][idx], delta_ks[k1][idx] + if d_val < 0.02: + A_step[i] *= min(0.75 + 0.5 * max(dk0, dk1), 1) + elif d_val < 0.07: + A_step[i] *= min(0.65 + 5 * d_val + 0.5 * max(dk0, dk1), 1) + + return smooth_on_corners(A_corners, A_step, window=250, mode="avg") + + +def compute_C_and_Ks(K, T, note_seq, key_usage, base_corners): + note_hit_times = np.array(sorted(n[1] for n in note_seq), dtype=float) + + lo = np.searchsorted(note_hit_times, base_corners - 500, side="left") + hi = np.searchsorted(note_hit_times, base_corners + 500, side="left") + C_step = (hi - lo).astype(float) + + Ks_step = np.maximum( + np.stack([key_usage[k] for k in range(K)], axis=0).sum(axis=0), 1 + ).astype(float) + + return C_step, Ks_step + + +# --- Graph Post-Processing --- + +def _apply_proximity_envelope(all_corners, D_all, note_seq): + if not note_seq: + return D_all.copy() + + note_times = np.sort(np.array([float(h) for _, h in note_seq])) + PROXIMITY_FADE_MS = 500.0 + + idx = np.searchsorted(note_times, all_corners) + d_after = np.abs(note_times[np.clip(idx, 0, len(note_times) - 1)] - all_corners) + d_before = np.abs(note_times[np.clip(idx - 1, 0, len(note_times) - 1)] - all_corners) + d = np.minimum(d_after, d_before) + + envelope = 0.5 * (1.0 + np.cos(np.pi * np.clip(d / PROXIMITY_FADE_MS, 0.0, 1.0))) + return D_all * envelope + + +def smooth_D_for_graph(all_corners, D_all, note_seq): + note_times = np.array(sorted(float(h) for _, h in note_seq), dtype=float) + + t_start = float(all_corners[0]) + t_end = float(all_corners[-1]) + uniform_t = np.arange(t_start, t_end + GRAPH_RESAMPLE_INTERVAL_MS, GRAPH_RESAMPLE_INTERVAL_MS, dtype=float) + + if len(note_times) > 0: + idx = np.searchsorted(note_times, uniform_t) + idx_after = np.clip(idx, 0, len(note_times) - 1) + idx_before = np.clip(idx - 1, 0, len(note_times) - 1) + dist = np.minimum(np.abs(uniform_t - note_times[idx_before]), np.abs(uniform_t - note_times[idx_after])) + break_mask = dist > BREAK_ZERO_THRESHOLD_MS + else: + break_mask = np.zeros(len(uniform_t), dtype=bool) + + uniform_D = np.interp(uniform_t, all_corners, D_all) + uniform_D[break_mask] = 0.0 + + sigma_samples = SMOOTH_SIGMA_MS / GRAPH_RESAMPLE_INTERVAL_MS + uniform_result = gaussian_filter1d(uniform_D, sigma=sigma_samples, mode="constant", cval=0.0) + uniform_result[break_mask] = 0.0 + + return np.interp(all_corners, uniform_t, uniform_result) + + +# --- Main Entry Points --- + +def calculate(file_path, mod): + x, K, T, note_seq, note_seq_by_column = preprocess_file(file_path, mod) + all_corners, base_corners, A_corners = get_corners(T, note_seq) + + key_usage = get_key_usage(K, T, note_seq, base_corners) + active_columns = [[k for k in range(K) if key_usage[k][i]] for i in range(len(base_corners))] + key_usage_400 = get_key_usage_400(K, T, note_seq, base_corners) + anchor = compute_anchor(K, key_usage_400, base_corners) + + delta_ks, Jbar = compute_Jbar(K, T, x, note_seq_by_column, base_corners) + Jbar = interp_values(all_corners, base_corners, Jbar) + + Xbar = compute_Xbar(K, T, x, note_seq_by_column, active_columns, base_corners) + Xbar = interp_values(all_corners, base_corners, Xbar) + + Pbar = compute_Pbar(K, T, x, note_seq, anchor, base_corners) + Pbar = interp_values(all_corners, base_corners, Pbar) + + Abar = compute_Abar(K, T, x, note_seq_by_column, active_columns, delta_ks, A_corners, base_corners) + Abar = interp_values(all_corners, A_corners, Abar) + + C_step, Ks_step = compute_C_and_Ks(K, T, note_seq, key_usage, base_corners) + C_arr = step_interp(all_corners, base_corners, C_step) + Ks_arr = step_interp(all_corners, base_corners, Ks_step) + + S_all = ( + (0.4 * (Abar ** (3 / Ks_arr) * np.minimum(Jbar, 8 + 0.85 * Jbar)) ** 1.5) + + (0.6 * (Abar ** (2 / 3) * (0.8 * Pbar)) ** 1.5) + ) ** (2 / 3) + T_all = (Abar ** (3 / Ks_arr) * Xbar) / (Xbar + S_all + 1) + D_all = 2.7 * (S_all ** 0.5) * (T_all ** 1.5) + S_all * 0.27 + + gaps = np.empty_like(all_corners, dtype=float) + gaps[0] = (all_corners[1] - all_corners[0]) / 2.0 + gaps[-1] = (all_corners[-1] - all_corners[-2]) / 2.0 + gaps[1:-1] = (all_corners[2:] - all_corners[:-2]) / 2.0 + + effective_weights = C_arr * gaps + sorted_indices = np.argsort(D_all) + D_sorted = D_all[sorted_indices] + w_sorted = effective_weights[sorted_indices] + + cum_weights = np.cumsum(w_sorted) + norm_cum_weights = cum_weights / cum_weights[-1] + + target_percentiles = np.array([0.945, 0.935, 0.925, 0.915, 0.845, 0.835, 0.825, 0.815]) + indices = np.searchsorted(norm_cum_weights, target_percentiles, side="left") + + percentile_93 = np.mean(D_sorted[indices[:4]]) + percentile_83 = np.mean(D_sorted[indices[4:8]]) + weighted_mean = (np.sum(D_sorted ** 5 * w_sorted) / np.sum(w_sorted)) ** 0.2 + + SR = 0.88 * percentile_93 * 0.25 + 0.94 * percentile_83 * 0.2 + weighted_mean * 0.55 + total_notes = len(note_seq) + SR *= total_notes / (total_notes + 60) + SR = rescale_high(SR) * 0.975 + + D_pre = _apply_proximity_envelope(all_corners, D_all, note_seq) + D_graph = smooth_D_for_graph(all_corners, D_pre, note_seq) + + return ( + SR, + all_corners, + D_graph, + { + "Pressing Intensity": Pbar, + "Unevenness": Abar, + "Same-Column Pressure": Jbar, + "Cross-Column Pressure": Xbar, + }, + ) + + +def factor_averages(times, factors): + times = np.asarray(times, dtype=float) + names = list(factors.keys()) + matrix = np.stack([factors[n] for n in names], axis=0) + integrals = np.trapezoid(matrix, times, axis=1) + duration = times[-1] - times[0] + return {n: float(integrals[i] / duration) for i, n in enumerate(names)} + + +def parse_hitobjects(file_path, mod="NM"): + p_obj = osu_parser.parser(file_path) + p_obj.process() + p = p_obj.get_parsed_data() + + hitobjects = [] + for i in range(len(p[1])): + x = p[1][i] + time = p[2][i] + if mod == "DT": + time *= 2 / 3 + elif mod == "HT": + time *= 4 / 3 + hitobjects.append({"x": x, "time": time}) + + return hitobjects \ No newline at end of file diff --git a/src/daniel.py b/src/daniel.py new file mode 100644 index 0000000..0330128 --- /dev/null +++ b/src/daniel.py @@ -0,0 +1,841 @@ +import os +import sys +import json +import time +import threading +import ctypes +import tkinter as tk + +import numpy as np +import websocket + +import algorithm +import msd_converter +from graph_fast import FastGraph + + +def resource_path(relative_path): + """Get absolute path to resource — works for dev and when compiled with PyInstaller.""" + base_path = getattr(sys, "_MEIPASS", os.path.dirname(os.path.abspath(__file__))) + return os.path.join(base_path, relative_path) + + +# --- Constants --- + +TOSU_WS = "ws://localhost:24050/ws" + +BREAK_ZERO_THRESHOLD_MS = 400 +TIME_JUMP_THRESHOLD_MS = 2000 +_OSU_TIMEOUT = 1.0 + +MODE_COMPACT = 0 +MODE_STATISTICS = 1 +MODE_FULL = 2 +MODE_NAMES = ["compact", "statistics", "full"] + +GRAPH_HEIGHT = 250 +BAR_HEIGHT = 120 +WINDOW_WIDTH = 650 + +COMPACT_HEIGHT = 65 +STATISTICS_HEIGHT = 120 +FULL_HEIGHT = GRAPH_HEIGHT + BAR_HEIGHT + +COMPACT_WIDTH = 550 +STATISTICS_WIDTH = 650 +FULL_WIDTH = 650 + +MODE_HEIGHTS = { + MODE_COMPACT: COMPACT_HEIGHT, + MODE_STATISTICS: STATISTICS_HEIGHT, + MODE_FULL: FULL_HEIGHT, +} +MODE_WIDTHS = { + MODE_COMPACT: COMPACT_WIDTH, + MODE_STATISTICS: STATISTICS_WIDTH, + MODE_FULL: FULL_WIDTH, +} + +BG_COLOR = "#000000" +PREFIX_FILL = "#FFFFFF" +DOT_RED = "#FF3B3B" +DOT_GREEN = "#00E676" + +FONT_SCALE = float(os.environ.get("DANIEL_FONT_SCALE", "1.0" if os.name == "nt" else "0.67")) + + +def _font_size(size): + return max(8, int(round(size * FONT_SCALE))) + + +FONT_PREFIX = ("Segoe UI Semibold", _font_size(30)) +FONT_DAN = ("Segoe UI Bold", _font_size(45)) +FONT_MSD_SKILL = ("Segoe UI Semibold", _font_size(29)) +FONT_CONNECTION = ("Segoe UI Semibold", _font_size(18)) + +PREFIX_Y_OFFSET = 4.1 +MSD_RELEVANCE_FRACTION = 0.15 +VIBRO_JACKSPEED_THRESHOLD = 0.90 + +DAN_COLORS = { + "Alpha": "#ff5a5a", + "Beta": "#ffd84d", + "Gamma": "#00ffd5", + "Delta": "#ff7b00", + "Epsilon": "#ff7a9e", + "Zeta": "#D7F7FF", + "Eta": "#ff2b2b", + "Theta": "#CC00FF", +} + +DAN_MEANS = { + "Alpha": 6.562, + "Beta": 6.957, + "Gamma": 7.459, + "Delta": 7.939, + "Epsilon": 9.095, + "Zeta": 9.473, + "Eta": 10.162, + "Theta": 10.782, +} +ORDER = list(DAN_MEANS.keys()) +DAN_ORDER_START = 11 + + +# --- State --- + +lock = threading.Lock() + +current_map = None +current_mod = "NM" +last_state = None +current_song_time_ms = 0 + +_ws_receive_time = 0.0 +_ws_song_time_ms = 0 +_prev_song_time_ms = 0 +_prev_receive_time = 0.0 +_last_message_time = 0.0 + +_paused = False +_pause_time_ms = 0 +_frozen_interp_ms = 0.0 + +loading = False +loading_step = 0 +_last_loading_dot = 0.0 + +current_strain_data = None +current_msd_data = None +connection_phase = "connecting" + +_last_dan_label = "." +_last_dan_numeric = "" +current_mode = MODE_FULL + + +# --- Window setup --- + +if os.name == "nt" and hasattr(ctypes, "windll"): + try: + ctypes.windll.user32.SetProcessDPIAware() + except Exception: + pass + +root = tk.Tk() +root.tk.call("tk", "scaling", 1.0) +root.title("Daniel by TheBagelOfMan") +root.geometry(f"{WINDOW_WIDTH}x{FULL_HEIGHT}") +root.resizable(False, False) +root.configure(bg=BG_COLOR) +root.attributes("-topmost", True) + + +def _set_dark_title_bar(window): + try: + hwnd = ctypes.windll.user32.GetParent(window.winfo_id()) + value = ctypes.c_int(1) + ctypes.windll.dwmapi.DwmSetWindowAttribute(hwnd, 20, ctypes.byref(value), ctypes.sizeof(value)) + except Exception: + pass + try: + hwnd = ctypes.windll.user32.GetParent(window.winfo_id()) + value = ctypes.c_int(1) + ctypes.windll.dwmapi.DwmSetWindowAttribute(hwnd, 19, ctypes.byref(value), ctypes.sizeof(value)) + except Exception: + pass + + +root.update_idletasks() +_set_dark_title_bar(root) + +_icon_path = resource_path("icon.ico") +if os.path.exists(_icon_path): + try: + root.iconbitmap(_icon_path) + except Exception: + pass + +_icon_png_path = resource_path("icon.png") +if os.path.exists(_icon_png_path): + try: + _icon_img = tk.PhotoImage(file=_icon_png_path) + root.iconphoto(True, _icon_img) + except Exception: + pass + +canvas = tk.Canvas(root, width=WINDOW_WIDTH, height=FULL_HEIGHT, bg=BG_COLOR, highlightthickness=0) +canvas.pack(expand=True, fill="both") + +graph = FastGraph(canvas, GRAPH_HEIGHT, WINDOW_WIDTH) + +text_items = [] +msd_items = [] +accent_bar = None +current_bar_color = "#333333" +_connection_items = [] +_pulse_job = None + + +# --- Drawing helpers --- + +def rgb(hex_color): + r, g, b = root.winfo_rgb(hex_color) + return r // 256, g // 256, b // 256 + + +def lerp_color(c1, c2, t): + r1, g1, b1 = rgb(c1) + r2, g2, b2 = rgb(c2) + r = int(r1 + (r2 - r1) * t) + g = int(g1 + (g2 - g1) * t) + b = int(b1 + (b2 - b1) * t) + return f"#{r:02x}{g:02x}{b:02x}" + + +def draw_text(x, y, text, fill, font, anchor="w"): + return [canvas.create_text(x, y, text=text, fill=fill, font=font, anchor=anchor)] + + +def draw_outline_text(x, y, text, fill, outline, font): + items = [] + for ox, oy in [(-1, 0), (1, 0), (0, -1), (0, 1), (-1, -1), (-1, 1), (1, -1), (1, 1)]: + items.append(canvas.create_text(x + ox, y + oy, text=text, fill=outline, font=font, anchor="w")) + items.append(canvas.create_text(x, y, text=text, fill=fill, font=font, anchor="w")) + return items + + +def is_loading_text(text): + return text in (".", "..", "...") + + +def _get_text_y_offset(): + return GRAPH_HEIGHT if current_mode == MODE_FULL else 0 + + +# --- Connection screen --- + +def _draw_connection_screen(): + global _connection_items, _pulse_job + + if _pulse_job is not None: + root.after_cancel(_pulse_job) + _pulse_job = None + + for item in _connection_items: + canvas.delete(item) + _connection_items.clear() + + if connection_phase == "ready": + return + + cy = MODE_HEIGHTS[current_mode] // 2 + + if connection_phase == "connecting": + label = "Waiting for tosu connection..." + dot_color = DOT_RED + else: + label = "tosu connected - waiting for map data" + dot_color = DOT_GREEN + + dot_r = 6 + dot_cx = 20 + inner = canvas.create_oval( + dot_cx - dot_r, cy - dot_r, + dot_cx + dot_r, cy + dot_r, + fill=dot_color, outline="", + ) + title = canvas.create_text( + dot_cx + dot_r + 10, cy, + text=label, fill="#AAAAAA", + font=FONT_CONNECTION, anchor="w", + ) + _connection_items += [inner, title] + _pulse_connection(inner, dot_color, 0) + + +def _pulse_connection(inner, dot_color, step): + global _pulse_job + + if connection_phase == "ready": + _pulse_job = None + return + + phase = (step % 40) / 40 + alpha = 0.4 + 0.6 * abs(1 - 2 * phase) + pulsed = lerp_color("#000000", dot_color, alpha) + canvas.itemconfig(inner, fill=pulsed) + _pulse_job = root.after(50, lambda: _pulse_connection(inner, dot_color, step + 1)) + + +def _clear_connection_screen(): + global _connection_items, _pulse_job + + if _pulse_job is not None: + root.after_cancel(_pulse_job) + _pulse_job = None + + for item in _connection_items: + canvas.delete(item) + _connection_items.clear() + + if current_mode == MODE_FULL and _last_dan_label not in ("Invalid Beatmap", ".", "..", "..."): + graph.show() + + +def _clear_normal_ui(): + global text_items, msd_items, accent_bar + + for item in text_items: + canvas.delete(item) + text_items.clear() + + for item in msd_items: + canvas.delete(item) + msd_items.clear() + + if accent_bar: + canvas.delete(accent_bar) + accent_bar = None + + graph.hide() + + +def _clear_invalid_ui(): + global msd_items + for item in msd_items: + canvas.delete(item) + msd_items.clear() + graph.hide() + + +# --- UI components --- + +def get_relevant_skillsets(msd_result): + overall = msd_result.get("overall", 0) + threshold = overall * MSD_RELEVANCE_FRACTION + relevant = {k: v for k, v in msd_result.items() if k != "overall" and (overall - v) <= threshold} + top3 = sorted(relevant.items(), key=lambda x: x[1], reverse=True)[:3] + jackspeed = msd_result.get("jackspeed", 0) + is_vibro = (overall > 0) and (jackspeed / overall >= VIBRO_JACKSPEED_THRESHOLD) + return overall, top3, is_vibro + + +def draw_msd(msd_result, color): + global msd_items + for item in msd_items: + canvas.delete(item) + msd_items.clear() + + if current_mode == MODE_COMPACT: + return + + if msd_result is None: + y = _get_text_y_offset() + 80 + msd_items += draw_text(14, y, "MSD Error", "#FF4444", FONT_MSD_SKILL) + return + + overall, top3, _ = get_relevant_skillsets(msd_result) + if not top3: + return + + skillset_str = ", ".join(key.capitalize() for key, _ in top3) + y = _get_text_y_offset() + 80 + msd_items += draw_text(14, y, f"{skillset_str} {overall:.2f}MSD", "#FFFFFF", FONT_MSD_SKILL) + + +def draw_accent_bar(): + global accent_bar + if accent_bar: + canvas.delete(accent_bar) + h = MODE_HEIGHTS[current_mode] + accent_bar = canvas.create_rectangle(0, 0, 6, h, fill=current_bar_color, outline="") + return accent_bar + + +def fade_items(text_item, bar_item, start_color, end_color, steps=14): + global current_bar_color + + def _step(i): + global current_bar_color + if i < steps: + color = lerp_color(start_color, end_color, i / steps) + canvas.itemconfig(text_item, fill=color) + canvas.itemconfig(bar_item, fill=color) + current_bar_color = color + root.after(15, lambda: _step(i + 1)) + else: + canvas.itemconfig(text_item, fill=end_color) + canvas.itemconfig(bar_item, fill=end_color) + current_bar_color = end_color + if current_mode == MODE_FULL: + graph.set_color(end_color) + + _step(0) + + +def update_dan_text(dan_label, dan_numeric): + global text_items, current_bar_color + + if connection_phase != "ready": + return + + for item in text_items: + canvas.delete(item) + text_items.clear() + + if is_loading_text(dan_label): + fill = "#888888" + new_bar_color = "#333333" + else: + if dan_label.startswith("<"): + fill = "#7DF0FF" + new_bar_color = fill + else: + base = dan_label.split()[0] + fill = DAN_COLORS.get(base, "#FFFFFF") + new_bar_color = fill + + bar = draw_accent_bar() + y_off = _get_text_y_offset() + y = y_off + 28 + prefix_y = y + PREFIX_Y_OFFSET + + prefix = draw_text(14, prefix_y, "Est. Dan:", PREFIX_FILL, FONT_PREFIX) + text_items.extend(prefix) + + bbox = canvas.bbox(prefix[-1]) + pw = bbox[2] - bbox[0] if bbox else 0 + xpos = 14 + pw + 8 + + is_vibro = False + if ( + not is_loading_text(dan_label) + and dan_label not in ("Invalid Beatmap", "? ? ? ? ?") + and current_msd_data is not None + ): + _, _, is_vibro = get_relevant_skillsets(current_msd_data) + + if dan_label == "? ? ? ? ?": + dan_items = draw_outline_text(xpos, y, dan_label, fill="#000000", outline="#FFFFFF", font=FONT_DAN) + new_bar_color = "#FFFFFF" + elif is_vibro: + dan_items = draw_text(xpos, y, "VIBRO", "#FFFFFF", FONT_DAN) + new_bar_color = "#FFFFFF" + else: + dan_items = draw_text(xpos, y, dan_label, current_bar_color, FONT_DAN) + + text_items.extend(dan_items) + + if not is_loading_text(dan_label) and dan_numeric: + dan_bbox = canvas.bbox(dan_items[-1]) + numeric_x = (dan_bbox[2] if dan_bbox else xpos) + 10 + display_numeric = "N/A" if is_vibro else f"({dan_numeric})" + text_items.extend(draw_text(numeric_x, prefix_y, display_numeric, "#FFFFFF", FONT_PREFIX)) + + if current_mode != MODE_COMPACT: + draw_msd(current_msd_data, new_bar_color if not is_loading_text(dan_label) else "#333333") + else: + for item in msd_items: + canvas.delete(item) + msd_items.clear() + + if current_mode == MODE_FULL: + graph.set_color( + new_bar_color if (is_loading_text(dan_label) or dan_label == "? ? ? ? ?") else current_bar_color + ) + + if not is_loading_text(dan_label) and dan_label != "? ? ? ? ?": + fade_items(dan_items[-1], bar, current_bar_color, new_bar_color) + else: + canvas.itemconfig(bar, fill=new_bar_color) + current_bar_color = new_bar_color + if dan_label != "? ? ? ? ?": + canvas.itemconfig(dan_items[-1], fill=fill) + + +def set_dan_text(label, numeric): + root.after(0, lambda: update_dan_text(label, numeric)) + + +# --- Mode switching --- + +def _apply_mode(): + h = MODE_HEIGHTS[current_mode] + w = MODE_WIDTHS[current_mode] + root.geometry(f"{w}x{h}") + canvas.configure(width=w, height=h) + + if current_mode == MODE_FULL: + graph.show() + else: + graph.hide() + + if connection_phase != "ready": + _draw_connection_screen() + else: + update_dan_text(_last_dan_label, _last_dan_numeric) + + +def cycle_mode(event=None): + global current_mode + current_mode = (current_mode + 1) % 3 + _apply_mode() + print(f"[Mode] Switched to {MODE_NAMES[current_mode]}") + + +root.bind("", cycle_mode) + + +# --- Tick loop --- + +def _tick(): + global loading_step, _last_loading_dot + + if connection_phase != "ready": + root.after(16, _tick) + return + + now = time.monotonic() + + if loading and now - _last_loading_dot >= 0.4: + dots = [".", "..", "..."] + update_dan_text(dots[loading_step % 3], "") + loading_step += 1 + _last_loading_dot = now + + if current_mode == MODE_FULL: + with lock: + ws_time = _ws_song_time_ms + ws_recv = _ws_receive_time + prev_time = _prev_song_time_ms + prev_recv = _prev_receive_time + md = current_mod + paused = _paused + frozen_ms = _frozen_interp_ms + + if paused: + graph.update_position(frozen_ms, md) + else: + real_dt = ws_recv - prev_recv + rate = (ws_time - prev_time) / real_dt if real_dt > 0.01 and ws_time > prev_time else 1000.0 + rate = max(0.0, min(rate, 5000.0)) + interpolated_ms = ws_time + rate * (now - ws_recv) + graph.update_position(interpolated_ms, md) + + root.after(16, _tick) + + +# --- WebSocket callbacks --- + +def on_open(ws_app): + global connection_phase, last_state + print("[WS] Connected to tosu.") + last_state = None + connection_phase = "waiting_map" + root.after(0, _draw_connection_screen) + + +def on_message(ws_app, msg): + global current_map, current_mod, current_song_time_ms + global _ws_receive_time, _ws_song_time_ms, _prev_song_time_ms, _prev_receive_time + global connection_phase, last_state, _last_message_time + global _paused, _pause_time_ms, _frozen_interp_ms + + _last_message_time = time.monotonic() + + try: + d = json.loads(msg) + bm = d.get("menu", {}).get("bm") + if not bm: + return + + folder = bm["path"]["folder"] + file = bm["path"]["file"] + + if not folder or not file: + if connection_phase == "ready": + print("[WS] osu closed — no map data.") + last_state = None + connection_phase = "waiting_map" + root.after(0, _clear_normal_ui) + root.after(0, _draw_connection_screen) + return + + songs = d["settings"]["folders"]["songs"] + new_map = os.path.join(songs, folder, file) + new_mod = get_rate_mod(read_mods(d)) + new_time = bm.get("time", {}).get("current", 0) + now = time.monotonic() + + with lock: + prev_ws_time = _ws_song_time_ms + current_map = new_map + current_mod = new_mod + current_song_time_ms = new_time + _prev_song_time_ms = _ws_song_time_ms + _prev_receive_time = _ws_receive_time + _ws_song_time_ms = new_time + _ws_receive_time = now + + sd = current_strain_data + t_max_ms = float(sd[0][-1]) if (sd is not None and len(sd[0]) > 0) else None + at_end = (t_max_ms is not None) and (new_time >= t_max_ms - 500) + + time_delta = new_time - prev_ws_time + jumped = abs(time_delta) > TIME_JUMP_THRESHOLD_MS and not (0 < time_delta < TIME_JUMP_THRESHOLD_MS) + + if jumped and not at_end: + if _paused: + _paused = False + _pause_time_ms = 0 + print(f"[Jump] Time jumped {time_delta:+.0f} ms — clearing pause markers") + root.after(0, graph.clear_all_pause_markers) + + elif new_time == prev_ws_time and not at_end: + if not _paused: + _paused = True + _pause_time_ms = new_time + _frozen_interp_ms = float(new_time) + print(f"[Pause] Detected at {new_time} ms") + root.after(0, lambda t=new_time, m=new_mod: graph.add_pause_marker(t, m)) + + else: + if _paused: + _paused = False + _pause_time_ms = 0 + print(f"[Pause] Resumed at {new_time} ms") + + if connection_phase != "ready": + connection_phase = "ready" + print("[WS] Map data received. Entering normal operation.") + root.after(0, _clear_connection_screen) + + except Exception: + pass + + +def on_close(ws_app, close_status_code, close_msg): + global connection_phase, last_state, _paused + print(f"[WS] Disconnected from tosu (code={close_status_code}).") + last_state = None + _paused = False + connection_phase = "connecting" + root.after(0, _clear_normal_ui) + root.after(0, _draw_connection_screen) + + +def on_error(ws_app, error): + print(f"[WS] Error: {error}") + + +def read_mods(d): + return ( + d.get("gameplay", {}).get("mods", {}).get("str") + or d.get("menu", {}).get("mods", {}).get("str") + or "" + ) + + +def get_rate_mod(m): + if "DT" in m or "NC" in m: + return "DT" + if "HT" in m: + return "HT" + return "NM" + + +# --- Calculation loop --- + +def calculation_loop(): + global last_state, loading, loading_step + global current_strain_data, current_msd_data + global _last_dan_label, _last_dan_numeric + + while True: + if connection_phase != "ready": + time.sleep(0.1) + continue + + with lock: + state = (current_map, current_mod) + + mp, mod = state + + if not mp or not os.path.exists(mp): + time.sleep(0.1) + continue + + if state == last_state: + time.sleep(0.1) + continue + + try: + loading = True + loading_step = 0 + + import osu_file_parser as osu_parser + _p = osu_parser.parser(mp) + _p.process() + if _p.get_parsed_data()[0] != 4: + raise ValueError(f"Not a 4k map (keycount={_p.get_parsed_data()[0]})") + + SR, times, strain, factors = algorithm.calculate(mp, mod) + + t_arr = np.asarray(times, dtype=float) + d_arr = np.asarray(strain, dtype=float) + current_strain_data = (t_arr, d_arr) + + try: + hitobjects = msd_converter.parse_hitobjects(mp, mod) + etterna_rows = msd_converter.osu_to_etterna_rows(hitobjects) + msd_result = msd_converter.calculate_msd(etterna_rows) + print("\n[MSD Skillsets]") + for k, v in msd_result.items(): + print(f"{k:<10}: {v:.2f}") + except Exception as msd_e: + print(f"[MSD] Error calculating MSD, skipping: {msd_e}") + msd_result = None + + with lock: + current_msd_data = msd_result + + averages = algorithm.factor_averages(times, factors) + dan_label, dan_numeric = get_dan_from_diff(SR) + + _last_dan_label = dan_label + _last_dan_numeric = dan_numeric + + print(f"\n[Map Factors] {os.path.basename(mp)} [{mod}]") + for k, v in averages.items(): + print(f"{k:<6}: {v:.4f}") + print(f"SR : {SR:.4f}★") + print(f"Dan : {dan_label} ({dan_numeric})\n") + + loading = False + last_state = state + + if current_mode == MODE_FULL: + root.after(0, lambda _t=t_arr, _d=d_arr: graph.set_data(_t, _d)) + root.after(0, lambda: graph.set_color(current_bar_color)) + root.after(0, graph.show) + set_dan_text(dan_label, dan_numeric) + + except Exception as e: + loading = False + last_state = state + print("Calculation error:", e) + _last_dan_label = "Invalid Beatmap" + _last_dan_numeric = "" + with lock: + current_msd_data = None + current_strain_data = None + root.after(0, _clear_invalid_ui) + set_dan_text("Invalid Beatmap", "") + + time.sleep(0.1) + + +# --- Dan boundary tables --- + +def _precompute_dan_boundaries(): + means = [DAN_MEANS[d] for d in ORDER] + boundaries = [] + for i in range(len(ORDER)): + mean = means[i] + lower = (means[i - 1] + mean) / 2 if i > 0 else mean - ((means[1] + mean) / 2 - mean) + upper = (mean + means[i + 1]) / 2 if i < len(means) - 1 else mean + (mean - means[i - 1]) / 2 + boundaries.append((lower, upper)) + return boundaries + + +_DAN_BOUNDARIES = _precompute_dan_boundaries() + + +def get_dan_from_diff(diff): + if diff < _DAN_BOUNDARIES[0][0]: + return f"<{ORDER[0]} Low", "N/A" + if diff >= _DAN_BOUNDARIES[-1][1]: + return "? ? ? ? ?", "N/A" + + for i, dan in enumerate(ORDER): + lower, upper = _DAN_BOUNDARIES[i] + if lower <= diff < upper: + t = max(0.0, min((diff - lower) / (upper - lower), 1.0)) + numeric = round(DAN_ORDER_START + i + t, 2) + if t < 1 / 3: + label = f"{dan} Low" + elif t < 2 / 3: + label = f"{dan} Mid" + else: + label = f"{dan} High" + return label, numeric + + return "? ? ? ? ?", "N/A" + + +# --- Boot --- + +root.after(100, _draw_connection_screen) + + +def _ws_loop(): + global connection_phase + while True: + print("[WS] Attempting to connect to tosu...") + ws = websocket.WebSocketApp( + TOSU_WS, + on_open=on_open, + on_message=on_message, + on_close=on_close, + on_error=on_error, + ) + ws.run_forever() + if connection_phase == "ready": + connection_phase = "connecting" + root.after(0, _draw_connection_screen) + print("[WS] Retrying in 3 seconds...") + time.sleep(3) + + +def _message_timeout_watcher(): + global connection_phase, last_state + while True: + time.sleep(1) + if connection_phase != "ready": + continue + elapsed = time.monotonic() - _last_message_time + if elapsed > _OSU_TIMEOUT: + print(f"[Watcher] No message for {elapsed:.1f}s — osu likely closed.") + last_state = None + connection_phase = "waiting_map" + root.after(0, _clear_normal_ui) + root.after(0, _draw_connection_screen) + + +threading.Thread(target=calculation_loop, daemon=True).start() +threading.Thread(target=_ws_loop, daemon=True).start() +threading.Thread(target=_message_timeout_watcher, daemon=True).start() + +root.after(16, _tick) +root.mainloop() \ No newline at end of file diff --git a/src/graph_fast.py b/src/graph_fast.py new file mode 100644 index 0000000..7754e45 --- /dev/null +++ b/src/graph_fast.py @@ -0,0 +1,250 @@ +import tkinter as tk + +import numpy as np +from PIL import Image, ImageDraw + +PAD_X = 6 +PAD_Y_TOP = 8 +PAD_Y_BOT = 6 +BG_COLOR_RGB = (0, 0, 0) +UNPLAYED_FILL_RGB = (17, 17, 17) +UNPLAYED_STROKE_RGB = (42, 42, 42) +LINE_WIDTH = 5 # Width at 2x render resolution +LINE_BASELINE_INSET = 4 +MAX_GRAPH_POINTS = 300 # Lower cap for smoother curves +SUPERSAMPLE = 2 # Render at 2x then downscale for anti-aliased lines +MIN_BREAK_MS = 2000 # Breaks shorter than this get interpolated through + +PAUSE_LINE_COLOR = "#FF3B3B" +PAUSE_LINE_WIDTH = 2 + + +def _hex_to_rgb(h): + h = h.lstrip("#") + return (int(h[0:2], 16), int(h[2:4], 16), int(h[4:6], 16)) + + +def _lerp_rgb(c1, c2, t): + return tuple(int(c1[i] + (c2[i] - c1[i]) * t) for i in range(3)) + + +class FastGraph: + def __init__(self, canvas, graph_height, window_width): + self.canvas = canvas + self.graph_height = graph_height + self.window_width = window_width + + self._w = window_width + self._h = graph_height + self._plot_w = self._w - PAD_X + self._plot_h = self._h - PAD_Y_TOP - PAD_Y_BOT + self._bottom_y = self._h - PAD_Y_BOT + self._poly_bottom_y = self._bottom_y + 1 + + self._times = None + self._strain = None + self._t_min = 0.0 + self._t_max = 1.0 + self._poly_data = None + + self._played_rgb = None + self._unplayed_rgb = None + self._composite_rgb = None + + self._color_rgb = (255, 90, 90) + self._played_fill_rgb = _lerp_rgb(BG_COLOR_RGB, (255, 90, 90), 0.45) + self._played_stroke_rgb = _lerp_rgb(BG_COLOR_RGB, (255, 90, 90), 0.85) + + self._ppm_header = b"P6\n%d %d\n255\n" % (self._w, self._h) + self._tk_photo = tk.PhotoImage(width=self._w, height=self._h) + self._tk_photo.put("#000000", to=(0, 0, self._w, self._h)) + + self._canvas_item = self.canvas.create_image(0, 0, image=self._tk_photo, anchor="nw") + self.canvas.tag_lower(self._canvas_item) + + self._last_split_px = -1 + self._visible = True + self._pause_line_items = [] + + # --- Public API --- + + def set_data(self, times, strain): + self._times = np.asarray(times, dtype=float) + self._strain = np.asarray(strain, dtype=float) + + nonzero = np.where(self._strain > 0)[0] + if len(nonzero) == 0: + nonzero = np.arange(len(self._times)) + crop_start = max(nonzero[0] - 1, 0) + crop_end = min(nonzero[-1] + 2, len(self._times)) + self._times = self._times[crop_start:crop_end] + self._strain = self._strain[crop_start:crop_end] + + if len(self._times) < 2: + self._poly_data = None + return + + self._t_min = float(self._times[0]) + self._t_max = float(self._times[-1]) + self._poly_data = self._build_polygon() + self._rebuild_images() + self._last_split_px = -1 + self.clear_all_pause_markers() + + def set_color(self, hex_color): + self._color_rgb = _hex_to_rgb(hex_color) + self._played_fill_rgb = _lerp_rgb(BG_COLOR_RGB, self._color_rgb, 0.45) + self._played_stroke_rgb = _lerp_rgb(BG_COLOR_RGB, self._color_rgb, 0.85) + + if self._poly_data is not None: + self._rebuild_images() + self._last_split_px = -1 + + def hide(self): + if self._visible and self._canvas_item is not None: + self.canvas.itemconfigure(self._canvas_item, state="hidden") + self._visible = False + self.clear_all_pause_markers() + + def show(self): + if not self._visible and self._canvas_item is not None: + self.canvas.itemconfigure(self._canvas_item, state="normal") + self._visible = True + self._last_split_px = -1 + + def update_position(self, song_time_ms, mod="NM"): + if not self._visible or self._played_rgb is None or self._unplayed_rgb is None: + return + + scale = {"DT": 2 / 3, "HT": 4 / 3}.get(mod, 1.0) + adj_time = song_time_ms * scale + duration = self._t_max - self._t_min + frac = max(0.0, min((adj_time - self._t_min) / duration, 1.0)) if duration > 0 else 0.0 + split_px = max(0, min(round(PAD_X + frac * self._plot_w), self._w)) + + if split_px == self._last_split_px: + return + self._last_split_px = split_px + + buf = self._composite_rgb + if split_px > 0: + buf[:, :split_px, :] = self._played_rgb[:, :split_px, :] + if split_px < self._w: + buf[:, split_px:, :] = self._unplayed_rgb[:, split_px:, :] + + self._tk_photo.configure(data=self._ppm_header + buf.tobytes()) + + def add_pause_marker(self, song_time_ms, mod="NM"): + """Add a red vertical line at the given song time. Call from main thread only.""" + if not self._visible or self._t_max <= self._t_min: + return + + scale = {"DT": 2 / 3, "HT": 4 / 3}.get(mod, 1.0) + adj_time = song_time_ms * scale + duration = self._t_max - self._t_min + frac = max(0.0, min((adj_time - self._t_min) / duration, 1.0)) + x = max(PAD_X, min(round(PAD_X + frac * self._plot_w), self._w - 1)) + + if x <= PAD_X: + return + + hw = max(1, PAUSE_LINE_WIDTH // 2) + item = self.canvas.create_rectangle( + x - hw, PAD_Y_TOP, x + hw, self._bottom_y, + fill=PAUSE_LINE_COLOR, outline="", tags="pause_marker", + ) + self.canvas.tag_raise(item, self._canvas_item) + self._pause_line_items.append(item) + + def clear_all_pause_markers(self): + """Remove every pause marker line. Call from main thread only.""" + for item in self._pause_line_items: + self.canvas.delete(item) + self._pause_line_items.clear() + + def destroy(self): + self.clear_all_pause_markers() + if self._canvas_item is not None: + self.canvas.delete(self._canvas_item) + self._canvas_item = None + self._tk_photo = None + self._played_rgb = None + self._unplayed_rgb = None + self._composite_rgb = None + self._poly_data = None + self._last_split_px = -1 + + # --- Internal --- + + def _build_polygon(self): + t = self._times.copy() + d = self._strain.copy() + + is_zero = (d == 0).astype(np.int8) + transitions = np.diff(is_zero, prepend=0, append=0) + gap_starts = np.where(transitions == 1)[0] + gap_ends = np.where(transitions == -1)[0] + + for gs, ge in zip(gap_starts, gap_ends): + gap_duration = t[min(ge, len(t) - 1)] - t[max(gs - 1, 0)] + if gap_duration < MIN_BREAK_MS and gs > 0 and ge < len(d): + val_before = d[gs - 1] + val_after = d[ge] if ge < len(d) else 0 + n_gap = ge - gs + if n_gap > 0: + d[gs:ge] = np.linspace(val_before, val_after, n_gap + 2)[1:-1] + + d_max = max(d.max(), 1.0) + px_x = PAD_X + (t - self._t_min) / (self._t_max - self._t_min) * self._plot_w + px_y = self._h - PAD_Y_BOT - d / d_max * self._plot_h + + n = len(d) + if n > MAX_GRAPH_POINTS: + is_zero = (d == 0).astype(np.int8) + transitions = np.abs(np.diff(is_zero)) + critical = set(np.where(transitions == 1)[0].tolist()) + critical |= set((np.where(transitions == 1)[0] + 1).tolist()) + critical = {i for i in critical if 0 <= i < n} + base = set(np.round(np.linspace(0, n - 1, MAX_GRAPH_POINTS)).astype(int).tolist()) + keep = sorted(base | critical) + idx = np.array(keep) + px_x = px_x[idx] + px_y = px_y[idx] + + x0 = float(px_x[0]) + x1 = float(px_x[-1]) + poly_bottom = float(self._poly_bottom_y) + line_bottom = float(self._bottom_y - LINE_BASELINE_INSET) + + pts = [(float(x), float(y)) for x, y in zip(px_x, px_y)] + poly = [(x0, poly_bottom)] + pts + [(x1, poly_bottom)] + line = [(x0, line_bottom)] + pts + [(x1, line_bottom)] + + return [poly], [line] + + def _rebuild_images(self): + if self._poly_data is None: + return + self._unplayed_rgb = self._render_to_numpy(UNPLAYED_FILL_RGB, UNPLAYED_STROKE_RGB) + self._played_rgb = self._render_to_numpy(self._played_fill_rgb, self._played_stroke_rgb) + self._composite_rgb = np.empty_like(self._unplayed_rgb) + + def _render_to_numpy(self, fill_rgb, stroke_rgb): + ss = SUPERSAMPLE + sw, sh = self._w * ss, self._h * ss + + img = Image.new("RGB", (sw, sh), BG_COLOR_RGB) + draw = ImageDraw.Draw(img) + + polys, lines = self._poly_data + + for seg_poly in polys: + if len(seg_poly) >= 3: + draw.polygon([(x * ss, y * ss) for x, y in seg_poly], fill=fill_rgb) + + for seg_line in lines: + if len(seg_line) >= 2: + draw.line([(x * ss, y * ss) for x, y in seg_line], fill=stroke_rgb, width=LINE_WIDTH) + + img = img.resize((self._w, self._h), Image.LANCZOS) + return np.frombuffer(img.tobytes(), dtype=np.uint8).reshape((self._h, self._w, 3)).copy() \ No newline at end of file diff --git a/src/msd.exe b/src/msd.exe new file mode 100644 index 0000000..1210907 Binary files /dev/null and b/src/msd.exe differ diff --git a/src/msd_converter.py b/src/msd_converter.py new file mode 100644 index 0000000..0f67d6f --- /dev/null +++ b/src/msd_converter.py @@ -0,0 +1,108 @@ +import json +import os +import shutil +import subprocess +import sys + +BASE_DIR = os.path.dirname(__file__) + + +def _resolve_msd_command(): + env_path = os.environ.get("MSD_BIN_PATH") + if env_path: + if os.name != "nt" and env_path.lower().endswith(".exe"): + wine = shutil.which("wine64") or shutil.which("wine") + if wine: + return [wine, env_path], env_path + return [env_path], env_path + + windows_msd = os.path.join(BASE_DIR, "msd.exe") + native_msd = os.path.join(BASE_DIR, "msd") + + if os.name == "nt": + return [windows_msd], windows_msd + + if os.path.exists(native_msd): + return [native_msd], native_msd + + if os.path.exists(windows_msd): + wine = shutil.which("wine64") or shutil.which("wine") + if wine: + return [wine, windows_msd], windows_msd + + return [native_msd], native_msd + + +def parse_hitobjects(osu_file, mod="NM"): + hitobjects = [] + in_section = False + + with open(osu_file, "r", encoding="utf8") as f: + for line in f: + line = line.strip() + + if line == "[HitObjects]": + in_section = True + continue + + if not in_section or not line: + continue + + parts = line.split(",") + x = int(parts[0]) + time = int(parts[2]) + obj_type = int(parts[3]) + + if mod == "DT": + time = int(time * 2 / 3) + elif mod == "HT": + time = int(time * 4 / 3) + + hitobjects.append({"x": x, "time": time, "type": obj_type}) + + return hitobjects + + +def osu_to_etterna_rows(hitobjects, keycount=4): + rows = {} + column_width = 512 / keycount + + for obj in hitobjects: + time = round(obj["time"] / 1000.0, 4) + column = int(obj["x"] // column_width) + rows[time] = rows.get(time, 0) | (1 << column) + # LN releases are intentionally ignored (obj_type & 128) + + return [{"notes": rows[t], "time": t} for t in sorted(rows)] + + +def calculate_msd(notes): + cmd, msd_path = _resolve_msd_command() + + if not os.path.exists(msd_path): + raise FileNotFoundError( + f"MSD binary not found at '{msd_path}'. Set MSD_BIN_PATH or add a compatible executable to src/." + ) + + if os.name != "nt" and msd_path.lower().endswith(".exe") and len(cmd) == 1: + raise RuntimeError( + "Found msd.exe on Linux/macOS, but Wine is not installed. Install Wine or provide a native msd via MSD_BIN_PATH." + ) + + popen_kwargs = { + "stdin": subprocess.PIPE, + "stdout": subprocess.PIPE, + "stderr": subprocess.PIPE, + "text": True, + } + + if os.name == "nt": + popen_kwargs["creationflags"] = subprocess.CREATE_NO_WINDOW + + p = subprocess.Popen(cmd, **popen_kwargs) + output, err = p.communicate(json.dumps(notes)) + + if err: + print("MSD ERROR:", err, file=sys.stderr) + + return json.loads(output) diff --git a/src/osu_file_parser.py b/src/osu_file_parser.py new file mode 100644 index 0000000..f08a8ab --- /dev/null +++ b/src/osu_file_parser.py @@ -0,0 +1,66 @@ +class parser: + def __init__(self, file_path): + self.file_path = file_path + self.od = -1 + self.column_count = -1 + self.columns = [] + self.note_starts = [] + self.note_ends = [] + self.note_types = [] + + def process(self): + with open(self.file_path, "r", encoding="utf-8") as f: + try: + for line in f: + self.read_metadata(f, line) + + cc = self._read_column_count(line) + if cc != -1: + self.column_count = cc + + self.od = 9 + + if self.column_count != -1: + self._read_notes(f, line, self.column_count) + + except StopIteration: + pass + + def read_metadata(self, f, line): + if "[Metadata]" in line: + while "Source:" not in line: + line = next(f) + + def _read_column_count(self, line): + if "CircleSize:" not in line: + return -1 + val = line.strip()[-1] + if val == "0": + val = "10" + return int(float(val)) + + def _read_notes(self, f, line, column_count): + if "[HitObjects]" not in line: + return + line = next(f) + while line is not None: + self._parse_hit_object(line, column_count) + line = next(f) + + def _parse_hit_object(self, line, column_count): + params = line.split(",") + column_width = 512 // column_count + self.columns.append(int(float(params[0])) // column_width) + self.note_starts.append(int(params[2])) + self.note_types.append(int(params[3])) + self.note_ends.append(int(params[5].split(":")[0])) + + def get_parsed_data(self): + return [ + self.column_count, + self.columns, + self.note_starts, + self.note_ends, + self.note_types, + self.od, + ] \ No newline at end of file