Initial import
This commit is contained in:
commit
5d271ec87e
19
.gitignore
vendored
Normal file
19
.gitignore
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
# Python
|
||||
__pycache__/
|
||||
*.pyc
|
||||
*.pyo
|
||||
*.egg-info/
|
||||
dist/
|
||||
build/
|
||||
|
||||
# PyInstaller
|
||||
*.spec
|
||||
|
||||
# IDE stuff
|
||||
.vscode/
|
||||
.idea/
|
||||
|
||||
# OS junk
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
/.venv-linux-build
|
||||
21
LICENSE
Normal file
21
LICENSE
Normal file
@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2026 TheBagelOfMan
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
21
README.md
Normal file
21
README.md
Normal file
@ -0,0 +1,21 @@
|
||||
# Daniel
|
||||
|
||||
A real-time rice difficulty calculator for 4k osu!mania must which must be run alongside [tosu](https://tosu.app).
|
||||
|
||||
**[Website](https://thebagelofman.github.io/Daniel/)** · **[Download](https://github.com/TheBagelOfMan/Daniel/releases/latest)**
|
||||
|
||||
## Linux build
|
||||
|
||||
Use `build_linux.sh` to create a Linux binary:
|
||||
|
||||
```bash
|
||||
./build_linux.sh
|
||||
```
|
||||
|
||||
This script installs dependencies using python venv and outputs `dist/Daniel-linux`. If `src/msd` is missing, set `MSD_BIN_PATH` at runtime to a Linux-compatible `msd` executable.
|
||||
|
||||
On Linux/macOS, it will use Wine if only `src/msd.exe` is available.
|
||||
|
||||
## License
|
||||
|
||||
[MIT](LICENSE)
|
||||
65
build_linux.sh
Normal file
65
build_linux.sh
Normal file
@ -0,0 +1,65 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
cd "$ROOT_DIR"
|
||||
|
||||
if [[ "$(uname -s)" != "Linux" ]]; then
|
||||
echo "This script is for Linux only."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v python3 >/dev/null 2>&1; then
|
||||
echo "python3 is required"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
VENV_DIR=".venv-linux-build"
|
||||
PYTHON_BIN="$VENV_DIR/bin/python"
|
||||
PYINSTALLER_BIN="$VENV_DIR/bin/pyinstaller"
|
||||
|
||||
if [[ ! -x "$PYTHON_BIN" ]]; then
|
||||
rm -rf "$VENV_DIR"
|
||||
python3 -m venv "$VENV_DIR"
|
||||
fi
|
||||
|
||||
if [[ ! -x "$PYTHON_BIN" ]]; then
|
||||
echo "Failed to create virtualenv python at $PYTHON_BIN"
|
||||
echo "Install the venv package for your distro (example: sudo apt install python3-venv) and rerun."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
"$PYTHON_BIN" -m ensurepip --upgrade >/dev/null 2>&1 || true
|
||||
"$PYTHON_BIN" -m pip install --upgrade pip
|
||||
"$PYTHON_BIN" -m pip install pyinstaller numpy pillow websocket-client
|
||||
|
||||
ADD_DATA_ARGS=()
|
||||
|
||||
if [[ -f src/icon.png ]]; then
|
||||
ADD_DATA_ARGS+=(--add-data "src/icon.png:.")
|
||||
fi
|
||||
if [[ -f src/icon.ico ]]; then
|
||||
ADD_DATA_ARGS+=(--add-data "src/icon.ico:.")
|
||||
fi
|
||||
if [[ -f src/msd ]]; then
|
||||
chmod +x src/msd
|
||||
ADD_DATA_ARGS+=(--add-data "src/msd:.")
|
||||
else
|
||||
echo "Warning: src/msd not found. Build will succeed, but MSD will require MSD_BIN_PATH at runtime."
|
||||
fi
|
||||
if [[ -f src/msd.exe ]]; then
|
||||
ADD_DATA_ARGS+=(--add-data "src/msd.exe:.")
|
||||
fi
|
||||
|
||||
"$PYINSTALLER_BIN" \
|
||||
--noconfirm \
|
||||
--clean \
|
||||
--onefile \
|
||||
--name Daniel-linux \
|
||||
--collect-all numpy \
|
||||
--collect-all PIL \
|
||||
--hidden-import websocket \
|
||||
"${ADD_DATA_ARGS[@]}" \
|
||||
src/daniel.py
|
||||
|
||||
echo "Build complete: dist/Daniel-linux"
|
||||
1031
docs/benchmark_report.html
Normal file
1031
docs/benchmark_report.html
Normal file
File diff suppressed because one or more lines are too long
BIN
docs/images/compact.png
Normal file
BIN
docs/images/compact.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 10 KiB |
BIN
docs/images/graph.png
Normal file
BIN
docs/images/graph.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 34 KiB |
BIN
docs/images/hero-screenshot.png
Normal file
BIN
docs/images/hero-screenshot.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 37 KiB |
BIN
docs/images/icon.ico
Normal file
BIN
docs/images/icon.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 11 KiB |
BIN
docs/images/statistics.png
Normal file
BIN
docs/images/statistics.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 16 KiB |
638
docs/index.html
Normal file
638
docs/index.html
Normal file
@ -0,0 +1,638 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<link rel="icon" href="images/icon.ico" type="image/x-icon">
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<meta property="og:title" content="Daniel">
|
||||
<meta property="og:description" content="A real-time rice difficulty calculator for 4k osu!mania. Displays dan level and tier from Alpha through to Theta with live strain graphs and MSD skillset breakdowns.">
|
||||
<meta property="og:image" content="https://thebagelofman.github.io/Daniel/images/hero-screenshot.png">
|
||||
<meta property="og:url" content="https://thebagelofman.github.io/Daniel">
|
||||
<meta property="og:type" content="website">
|
||||
<meta name="twitter:card" content="summary_large_image">
|
||||
<meta name="description" content="A real-time rice difficulty calculator for 4k osu!mania. Displays dan level and tier from Alpha through to Theta with live strain graphs and MSD skillset breakdowns.">
|
||||
<title>Daniel</title>
|
||||
<style>
|
||||
@import url('https://fonts.googleapis.com/css2?family=JetBrains+Mono:wght@300;400;500;700&display=swap');
|
||||
|
||||
:root {
|
||||
--bg: #0e0f14;
|
||||
--surface: #16181f;
|
||||
--border: #2a2d3a;
|
||||
--text: #dde1f5;
|
||||
--muted: #626880;
|
||||
--exact: #7dd3fc;
|
||||
--adjacent: #4ade80;
|
||||
--within: #fb923c;
|
||||
--miss: #ef4444;
|
||||
--accent: #818cf8;
|
||||
}
|
||||
|
||||
*, *::before, *::after { box-sizing: border-box; margin: 0; padding: 0; }
|
||||
html { scroll-behavior: smooth; }
|
||||
|
||||
body {
|
||||
background: var(--bg);
|
||||
color: var(--text);
|
||||
font-family: 'JetBrains Mono', monospace;
|
||||
font-size: 14px;
|
||||
line-height: 1.8;
|
||||
}
|
||||
|
||||
a { color: var(--exact); text-decoration: none; }
|
||||
a:hover { text-decoration: underline; }
|
||||
|
||||
.page {
|
||||
max-width: 1100px;
|
||||
margin: 0 auto;
|
||||
padding: 0 32px;
|
||||
}
|
||||
|
||||
/* ── Nav ─────────────────────────────────────────────────────────── */
|
||||
nav {
|
||||
border-bottom: 1px solid var(--border);
|
||||
padding: 20px 0;
|
||||
margin-bottom: 80px;
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
}
|
||||
.nav-logo { font-size: 16px; font-weight: 700; color: var(--text); }
|
||||
.nav-logo span { color: var(--accent); }
|
||||
.nav-links { display: flex; gap: 24px; list-style: none; }
|
||||
.nav-links a {
|
||||
font-size: 12px; color: var(--muted);
|
||||
letter-spacing: 0.08em; text-transform: uppercase;
|
||||
}
|
||||
.nav-links a:hover { color: var(--text); text-decoration: none; }
|
||||
|
||||
/* ── Sections ────────────────────────────────────────────────────── */
|
||||
section { margin-bottom: 80px; }
|
||||
|
||||
.section-tag {
|
||||
font-size: 11px; color: var(--muted);
|
||||
letter-spacing: 0.15em; text-transform: uppercase; margin-bottom: 8px;
|
||||
}
|
||||
|
||||
h1 {
|
||||
font-size: 52px; font-weight: 700; color: var(--text);
|
||||
line-height: 1.05; letter-spacing: -0.02em; margin-bottom: 16px;
|
||||
}
|
||||
h1 span { color: var(--accent); }
|
||||
|
||||
h2 {
|
||||
font-size: 20px; font-weight: 700; color: var(--text);
|
||||
margin-bottom: 20px; padding-bottom: 10px; border-bottom: 1px solid var(--border);
|
||||
}
|
||||
|
||||
h3 { font-size: 14px; font-weight: 500; color: var(--text); margin-bottom: 4px; }
|
||||
|
||||
p { color: var(--muted); margin-bottom: 16px; max-width: 600px; }
|
||||
|
||||
/* ── Buttons ─────────────────────────────────────────────────────── */
|
||||
.btn {
|
||||
display: inline-flex; align-items: center; gap: 8px;
|
||||
padding: 10px 20px; font-family: 'JetBrains Mono', monospace;
|
||||
font-size: 13px; cursor: pointer; border: 1px solid var(--border);
|
||||
background: var(--surface); color: var(--text); text-decoration: none;
|
||||
transition: border-color 0.15s, color 0.15s;
|
||||
}
|
||||
.btn:hover { border-color: var(--exact); color: var(--exact); text-decoration: none; }
|
||||
.btn-primary { background: var(--exact); color: #0a0b0f; border-color: var(--exact); font-weight: 700; }
|
||||
.btn-primary:hover { background: #a5e3fd; border-color: #a5e3fd; color: #0a0b0f; }
|
||||
|
||||
/* ── Hero ────────────────────────────────────────────────────────── */
|
||||
.hero-desc { font-size: 15px; color: var(--muted); margin-bottom: 32px; max-width: 540px; }
|
||||
.hero-actions { display: flex; gap: 12px; flex-wrap: wrap; margin-bottom: 80px; }
|
||||
|
||||
#hero {
|
||||
position: relative;
|
||||
overflow: hidden;
|
||||
}
|
||||
.hero-bg {
|
||||
position: absolute;
|
||||
right: 0;
|
||||
top: 0;
|
||||
bottom: auto;
|
||||
height: 420px;
|
||||
width: 800px;
|
||||
background-image: url('./images/hero-screenshot.png');
|
||||
background-size: cover;
|
||||
background-position: right top;
|
||||
opacity: 0.75;
|
||||
pointer-events: none;
|
||||
z-index: 0;
|
||||
-webkit-mask-image: linear-gradient(to right, transparent 30%, black 70%);
|
||||
mask-image: linear-gradient(to right, transparent 30%, black 70%);
|
||||
}
|
||||
#hero > *:not(.hero-bg) {
|
||||
position: relative;
|
||||
z-index: 1;
|
||||
}
|
||||
|
||||
/* ── tab hint ────────────────────────────────────────────────────── */
|
||||
.tab-hint {
|
||||
font-size: 11px;
|
||||
color: var(--muted);
|
||||
letter-spacing: 0.08em;
|
||||
margin-bottom: 12px;
|
||||
}
|
||||
.tab-hint code { font-size: 11px; }
|
||||
|
||||
/* ── Stat row ────────────────────────────────────────────────────── */
|
||||
.stats-row {
|
||||
display: flex; border: 1px solid var(--border); background: var(--surface);
|
||||
}
|
||||
.stat { flex: 1; padding: 20px 24px; border-right: 1px solid var(--border); }
|
||||
.stat:last-child { border-right: none; }
|
||||
.stat-val { font-size: 26px; font-weight: 700; display: block; line-height: 1; margin-bottom: 6px; }
|
||||
.stat-lbl { font-size: 11px; color: var(--muted); letter-spacing: 0.08em; text-transform: uppercase; }
|
||||
.c-exact { color: var(--exact); }
|
||||
.c-adjacent { color: var(--adjacent); }
|
||||
.c-within { color: var(--within); }
|
||||
.c-muted { color: var(--muted); }
|
||||
|
||||
/* ── Steps list ──────────────────────────────────────────────────── */
|
||||
.steps { border: 1px solid var(--border); }
|
||||
.step { display: grid; grid-template-columns: 48px 1fr; border-bottom: 1px solid var(--border); }
|
||||
.step:last-child { border-bottom: none; }
|
||||
.step-num { font-size: 12px; color: var(--muted); padding: 20px 0 20px 16px; border-right: 1px solid var(--border); }
|
||||
.step-body { padding: 20px 24px; }
|
||||
.step-body p { font-size: 13px; margin-bottom: 0; max-width: 100%; }
|
||||
|
||||
code {
|
||||
color: var(--exact); background: var(--bg);
|
||||
border: 1px solid var(--border); padding: 1px 6px;
|
||||
font-family: 'JetBrains Mono', monospace;
|
||||
}
|
||||
|
||||
/* ── Feature list ────────────────────────────────────────────────── */
|
||||
.features {
|
||||
border: 1px solid var(--border);
|
||||
margin-bottom: 32px;
|
||||
}
|
||||
.feature {
|
||||
padding: 18px 20px;
|
||||
border-bottom: 1px solid var(--border);
|
||||
}
|
||||
.feature:last-child { border-bottom: none; }
|
||||
.feature h3 { font-size: 14px; margin-bottom: 2px; }
|
||||
.feature p { font-size: 13px; margin-bottom: 0; max-width: 100%; }
|
||||
|
||||
/* ── Download block ──────────────────────────────────────────────── */
|
||||
.download-block {
|
||||
background: var(--surface); border: 1px solid var(--border);
|
||||
padding: 28px; margin-bottom: 24px;
|
||||
}
|
||||
.dl-version {
|
||||
font-size: 11px; color: var(--accent); letter-spacing: 0.1em;
|
||||
background: rgba(129,140,248,0.1); border: 1px solid rgba(129,140,248,0.25);
|
||||
padding: 2px 8px; display: inline-block; margin-bottom: 10px;
|
||||
}
|
||||
.dl-info p { font-size: 13px; margin-bottom: 0; }
|
||||
.req-tags { display: flex; gap: 8px; flex-wrap: wrap; margin-top: 14px; }
|
||||
.req-tag {
|
||||
font-size: 12px; color: var(--muted);
|
||||
background: var(--bg); border: 1px solid var(--border); padding: 2px 10px;
|
||||
}
|
||||
.dl-btn-row {
|
||||
margin-top: 20px;
|
||||
padding-top: 20px;
|
||||
border-top: 1px solid var(--border);
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 16px;
|
||||
}
|
||||
.dl-count {
|
||||
font-size: 12px;
|
||||
color: var(--muted);
|
||||
}
|
||||
.dl-count span {
|
||||
color: var(--text);
|
||||
}
|
||||
|
||||
/* ── Changelog ───────────────────────────────────────────────────── */
|
||||
.changelog-entry {
|
||||
display: grid; grid-template-columns: 140px 1fr;
|
||||
gap: 24px; padding: 24px 0; border-bottom: 1px solid var(--border);
|
||||
}
|
||||
.changelog-entry:last-child { border-bottom: none; }
|
||||
.cl-ver { font-size: 13px; color: var(--accent); }
|
||||
.cl-date { font-size: 11px; color: var(--muted); margin-top: 4px; }
|
||||
.cl-body ul { list-style: none; display: flex; flex-direction: column; gap: 4px; margin-top: 8px; }
|
||||
.cl-body ul li { font-size: 13px; color: var(--muted); padding-left: 16px; position: relative; }
|
||||
.cl-body ul li::before { content: '–'; position: absolute; left: 0; color: var(--border); }
|
||||
|
||||
.tag {
|
||||
display: inline-block; font-size: 10px; letter-spacing: 0.08em;
|
||||
padding: 1px 7px; margin-left: 8px; vertical-align: middle; text-transform: uppercase;
|
||||
}
|
||||
.tag.new { background: rgba(74,222,128,0.1); color: var(--adjacent); border: 1px solid rgba(74,222,128,0.2); }
|
||||
.tag.fix { background: rgba(125,211,252,0.1); color: var(--exact); border: 1px solid rgba(125,211,252,0.2); }
|
||||
.tag.breaking { background: rgba(239,68,68,0.1); color: var(--miss); border: 1px solid rgba(239,68,68,0.2); }
|
||||
|
||||
/* ── FAQ ─────────────────────────────────────────────────────────── */
|
||||
.faq-item { border-bottom: 1px solid var(--border); }
|
||||
.faq-item:first-child { border-top: 1px solid var(--border); }
|
||||
.faq-q {
|
||||
width: 100%; background: none; border: none; padding: 16px 0;
|
||||
display: flex; justify-content: space-between; align-items: center;
|
||||
cursor: pointer; font-family: 'JetBrains Mono', monospace;
|
||||
font-size: 13px; color: var(--text); text-align: left; gap: 16px;
|
||||
}
|
||||
.faq-q:hover { color: var(--exact); }
|
||||
.faq-arrow { font-size: 11px; color: var(--muted); flex-shrink: 0; transition: transform 0.2s; }
|
||||
.faq-item.open .faq-arrow { transform: rotate(90deg); }
|
||||
.faq-a { max-height: 0; overflow: hidden; transition: max-height 0.3s ease; }
|
||||
.faq-item.open .faq-a { max-height: 300px; }
|
||||
.faq-a p { font-size: 13px; padding-bottom: 16px; max-width: 100%; }
|
||||
|
||||
/* ── Stats CTA ───────────────────────────────────────────────────── */
|
||||
.stats-cta {
|
||||
background: var(--surface); border: 1px solid var(--border); padding: 28px;
|
||||
display: flex; justify-content: space-between; align-items: center;
|
||||
gap: 24px; flex-wrap: wrap; margin-bottom: 80px;
|
||||
}
|
||||
.stats-cta h3 { font-size: 15px; margin-bottom: 4px; }
|
||||
.stats-cta p { font-size: 13px; margin-bottom: 0; }
|
||||
|
||||
/* ── Credits ─────────────────────────────────────────────────────── */
|
||||
.credit-row {
|
||||
display: grid; grid-template-columns: 160px 1fr;
|
||||
gap: 24px; padding: 18px 0; border-bottom: 1px solid var(--border);
|
||||
}
|
||||
.credit-row:first-child { border-top: 1px solid var(--border); }
|
||||
.credit-role { font-size: 11px; color: var(--muted); letter-spacing: 0.1em; text-transform: uppercase; padding-top: 2px; }
|
||||
.credit-info h3 { font-size: 14px; margin-bottom: 2px; }
|
||||
.credit-info p { font-size: 13px; margin-bottom: 0; }
|
||||
|
||||
/* ── Overlay screenshots ─────────────────────────────────────────── */
|
||||
.overlay-screenshot {
|
||||
width: 75%;
|
||||
display: block;
|
||||
margin: 12px auto 0;
|
||||
}
|
||||
|
||||
/* ── Footer ──────────────────────────────────────────────────────── */
|
||||
footer {
|
||||
border-top: 1px solid var(--border); padding: 24px 0 48px;
|
||||
display: flex; justify-content: space-between;
|
||||
}
|
||||
footer p { font-size: 12px; color: var(--muted); margin: 0; max-width: 100%; }
|
||||
|
||||
/* ── Responsive ──────────────────────────────────────────────────── */
|
||||
@media (max-width: 600px) {
|
||||
h1 { font-size: 36px; }
|
||||
.stats-row { flex-direction: column; }
|
||||
.stat { border-right: none; border-bottom: 1px solid var(--border); }
|
||||
.changelog-entry, .credit-row { grid-template-columns: 1fr; gap: 8px; }
|
||||
.stats-cta, .download-block { flex-direction: column; }
|
||||
.nav-links { display: none; }
|
||||
.dl-btn-row { flex-direction: column; align-items: flex-start; }
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="page">
|
||||
|
||||
<!-- Nav -->
|
||||
<nav>
|
||||
<div style="display:flex; align-items:center; gap:14px;">
|
||||
<div class="nav-logo">Daniel</div>
|
||||
<a href="https://www.youtube.com/@TheBagelOfMan/featured" target="_blank" title="YouTube" style="display:flex; align-items:center; color:var(--muted); transition:color 0.15s;" onmouseover="this.style.color='var(--text)'" onmouseout="this.style.color='var(--muted)'">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 24 24" fill="currentColor">
|
||||
<path d="M23.498 6.186a3.016 3.016 0 0 0-2.122-2.136C19.505 3.545 12 3.545 12 3.545s-7.505 0-9.377.505A3.017 3.017 0 0 0 .502 6.186C0 8.07 0 12 0 12s0 3.93.502 5.814a3.016 3.016 0 0 0 2.122 2.136c1.871.505 9.376.505 9.376.505s7.505 0 9.377-.505a3.015 3.015 0 0 0 2.122-2.136C24 15.93 24 12 24 12s0-3.93-.502-5.814zM9.545 15.568V8.432L15.818 12l-6.273 3.568z"/>
|
||||
</svg>
|
||||
</a>
|
||||
<a href="https://github.com/TheBagelOfMan/Daniel" target="_blank" title="GitHub" style="display:flex; align-items:center; color:var(--muted); transition:color 0.15s;" onmouseover="this.style.color='var(--text)'" onmouseout="this.style.color='var(--muted)'">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 24 24" fill="currentColor">
|
||||
<path d="M12 0C5.374 0 0 5.373 0 12c0 5.302 3.438 9.8 8.207 11.387.599.111.793-.261.793-.577v-2.234c-3.338.726-4.033-1.416-4.033-1.416-.546-1.387-1.333-1.756-1.333-1.756-1.089-.745.083-.729.083-.729 1.205.084 1.839 1.237 1.839 1.237 1.07 1.834 2.807 1.304 3.492.997.107-.775.418-1.305.762-1.604-2.665-.305-5.467-1.334-5.467-5.931 0-1.311.469-2.381 1.236-3.221-.124-.303-.535-1.524.117-3.176 0 0 1.008-.322 3.301 1.23A11.509 11.509 0 0 1 12 5.803c1.02.005 2.047.138 3.006.404 2.291-1.552 3.297-1.23 3.297-1.23.653 1.653.242 2.874.118 3.176.77.84 1.235 1.911 1.235 3.221 0 4.609-2.807 5.624-5.479 5.921.43.372.823 1.102.823 2.222v3.293c0 .319.192.694.801.576C20.566 21.797 24 17.3 24 12c0-6.627-5.373-12-12-12z"/>
|
||||
</svg>
|
||||
</a>
|
||||
</div>
|
||||
<ul class="nav-links">
|
||||
<li><a href="#how-it-works">How it works</a></li>
|
||||
<li><a href="#download">Download</a></li>
|
||||
<li><a href="#changelog">Changelog</a></li>
|
||||
<li><a href="#faq">FAQ</a></li>
|
||||
<li><a href="#credits">Credits</a></li>
|
||||
</ul>
|
||||
</nav>
|
||||
|
||||
<!-- Hero -->
|
||||
<section id="hero">
|
||||
<div class="hero-bg"></div>
|
||||
<h1>Daniel</h1>
|
||||
<p style="font-size:11px; color:var(--muted); letter-spacing:0.12em; text-transform:uppercase; margin-bottom:12px;">
|
||||
Difficulty Analysis of Notechart Intensity for Estimated Levels
|
||||
</p>
|
||||
<p class="hero-desc">
|
||||
A real-time rice difficulty calculator for 4k osu!mania. Daniel links up to <a href="https://github.com/tosuapp/tosu" target="_blank">tosu</a> to read
|
||||
your currently selected beatmap displaying its dan referenced level and its tier (Low/Mid/High) from Alpha through to Theta. It breaks it down further using a live strain graph and individual MSD skillsets.
|
||||
</p>
|
||||
<div class="hero-actions">
|
||||
<a href="#download" class="btn btn-primary">↓ Download for Windows</a>
|
||||
<a href="benchmark_report.html" class="btn">◈ Benchmark stats</a>
|
||||
</div>
|
||||
<div class="stats-row">
|
||||
<div class="stat">
|
||||
<span class="stat-val c-exact" id="stat-exact">52.4%</span>
|
||||
<div class="stat-lbl">Within Exact Tier</div>
|
||||
</div>
|
||||
<div class="stat">
|
||||
<span class="stat-val c-adjacent" id="stat-adjacent">89.6%</span>
|
||||
<div class="stat-lbl">Within Adjacent Tiers</div>
|
||||
</div>
|
||||
<div class="stat">
|
||||
<span class="stat-val c-within" id="stat-within">98.7%</span>
|
||||
<div class="stat-lbl">Within one full dan Level</div>
|
||||
</div>
|
||||
<div class="stat">
|
||||
<span class="stat-val c-muted" id="stat-maps">145</span>
|
||||
<div class="stat-lbl">Beatmaps tested</div>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<!-- How it works -->
|
||||
<section id="how-it-works">
|
||||
<h2>How it works!</h2>
|
||||
<div class="feature">
|
||||
<h3>Real-time Detection</h3>
|
||||
<p>Daniel reads map data live in real-time using tosu. This means that whenever you change beatmap Daniel will update instantly!</p>
|
||||
</div>
|
||||
<div class="feature">
|
||||
<h3>Modified Sunny Rework Algorithm</h3>
|
||||
<p>The numerical dan rating is calculated using a modified version of the Sunny Rework algorithm which is optimised for high-level rice. Long notes and OD are not factored into the calculation. The numerical dan ratings are based of individual maps which leads to marathons usually displaying the 'High' tier within that dan level.</p>
|
||||
</div>
|
||||
<div class="feature">
|
||||
<h3>Skillset Detection via MSD</h3>
|
||||
<p>Individual skillsets are calculated using MSD (MinaCalc Skill Difficulty). Overall rating is also calculated and jackspeed is used to determine whether a beatmap is primarily vibro based.</p>
|
||||
</div>
|
||||
<div class="feature">
|
||||
<h3>Mod Support</h3>
|
||||
<p>HT and DT are both supported as the rating adjusts automatically based on the selected mod.</p>
|
||||
</div>
|
||||
|
||||
<h3 style="margin: 32px 0 6px; font-size:13px; color:var(--muted); letter-spacing:0.1em; text-transform:uppercase;">Overlay views</h3>
|
||||
<p class="tab-hint">You can press <code>Tab</code> to cycle between the overlay views.</p>
|
||||
<div class="steps">
|
||||
<div class="step" style="display:block; padding:0;">
|
||||
<div class="step-body" style="padding:20px 24px 0">
|
||||
<h3>Compact</h3>
|
||||
<p style="margin-bottom:0">A minimalistic display showing only the estimated dan plus the numerical rating.</p>
|
||||
</div>
|
||||
<img src="./images/compact.png" alt="Compact view" class="overlay-screenshot">
|
||||
</div>
|
||||
<div class="step" style="display:block; padding:0; border-top:1px solid var(--border)">
|
||||
<div class="step-body" style="padding:20px 24px 0">
|
||||
<h3>Statistics</h3>
|
||||
<p style="margin-bottom:0">The compact view with added MSD skillset calculations for skillsets and overall MSD rating.</p>
|
||||
</div>
|
||||
<img src="./images/statistics.png" alt="Statistics view" class="overlay-screenshot">
|
||||
</div>
|
||||
<div class="step" style="display:block; padding:0; border-top:1px solid var(--border)">
|
||||
<div class="step-body" style="padding:20px 24px 0">
|
||||
<h3>Graph</h3>
|
||||
<p style="margin-bottom:0">The statistics view with an added straintime graph that
|
||||
updates in real-time based on drain time in the beatmap. Red vertical lines indicate pauses in gameplay.</p>
|
||||
</div>
|
||||
<img src="./images/graph.png" alt="Graph view" class="overlay-screenshot">
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<!-- Download -->
|
||||
<section id="download">
|
||||
<h2>Download</h2>
|
||||
<div class="download-block">
|
||||
<div class="dl-info">
|
||||
<h3 id="dl-filename">Daniel.exe</h3>
|
||||
<div class="dl-version" id="dl-version">Loading...</div>
|
||||
<p>Requirements:</p>
|
||||
<div class="req-tags">
|
||||
<span class="req-tag">Windows 10 / 11</span>
|
||||
<span class="req-tag">osu! / osu!(stable)</span>
|
||||
<span class="req-tag">tosu</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="dl-btn-row">
|
||||
<a href="#" id="dl-btn" class="btn btn-primary">↓ Download .exe</a>
|
||||
<span class="dl-count"><span id="download-count">...</span> downloads</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="steps">
|
||||
<div class="step">
|
||||
<div class="step-num">01</div>
|
||||
<div class="step-body">
|
||||
<h3>Install tosu</h3>
|
||||
<p> Download and install <a href="https://github.com/tosuapp/tosu" target="_blank">tosu</a>.</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="step">
|
||||
<div class="step-num">02</div>
|
||||
<div class="step-body">
|
||||
<h3>Download Daniel</h3>
|
||||
<p>Save <code id="dl-filename-step">Daniel.exe</code> anywhere on your system.</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="step">
|
||||
<div class="step-num">03</div>
|
||||
<div class="step-body">
|
||||
<h3>Launch tosu and Daniel</h3>
|
||||
<p>Run both tosu and Daniel alongside your osu! client.</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="step">
|
||||
<div class="step-num">04</div>
|
||||
<div class="step-body">
|
||||
<h3>Have fun danning with Daniel</h3>
|
||||
<p>Boy do I love me some dans.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<!-- Changelog -->
|
||||
<section id="changelog">
|
||||
<h2>Changelog</h2>
|
||||
<div id="changelog-entries">
|
||||
<div style="color:var(--muted); font-size:13px;">Loading...</div>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<!-- FAQ -->
|
||||
<section id="faq">
|
||||
<h2>FAQ</h2>
|
||||
|
||||
<div class="faq-item">
|
||||
<button class="faq-q">How well does it calculate vibro? <span class="faq-arrow">▶</span></button>
|
||||
<div class="faq-a"><p>It will refuse to calculate vibro maps altogether and will just list them as 'VIBRO'. To my knowledge this doesn't affect any maps other than just straight vibro.</p></div>
|
||||
</div>
|
||||
|
||||
<div class="faq-item">
|
||||
<button class="faq-q">Does it affect in game performance? <span class="faq-arrow">▶</span></button>
|
||||
<div class="faq-a"><p>All calculations are done when the beatmap is first selected so any performance hits would only be in the song select menu.</p></div>
|
||||
</div>
|
||||
|
||||
<div class="faq-item">
|
||||
<button class="faq-q">How does the straintime graph work? <span class="faq-arrow">▶</span></button>
|
||||
<div class="faq-a"><p>The graph is based on the same modified Sunny Rework algorithm that's used for the numerical dan values. This means that it represents that actual difficulty of that point in the beatmap rather than the density.</p></div>
|
||||
</div>
|
||||
|
||||
<div class="faq-item">
|
||||
<button class="faq-q">Is the source code available? <span class="faq-arrow">▶</span></button>
|
||||
<div class="faq-a"><p>Source code is available <a href="https://github.com/TheBagelOfMan/Daniel" target="_blank">here!</a></p></div>
|
||||
</div>
|
||||
|
||||
<div class="faq-item">
|
||||
<button class="faq-q">What patterns is it bad at calculating? <span class="faq-arrow">▶</span></button>
|
||||
<div class="faq-a"><p>It's bad at quite a few different skillsets in particular. It heavily underrates speedjack (Vertex Beta Zeta) and awkward speed (Volcanic). It overrates in epsilon+ anchorjack and may also overrate some high bpm speed(Finixe Zeta). There are more patterns it struggles with occasionally but these are the most consistent ones.</p></div>
|
||||
</div>
|
||||
|
||||
<div class="faq-item">
|
||||
<button class="faq-q">Does it calculate long notes? <span class="faq-arrow">▶</span></button>
|
||||
<div class="faq-a"><p>No. It used to in testing versions but all LNs are converted to rice for the calculations now.</p></div>
|
||||
</div>
|
||||
|
||||
<div class="faq-item">
|
||||
<button class="faq-q">Does it factor in OD into the calculation? <span class="faq-arrow">▶</span></button>
|
||||
<div class="faq-a"><p>No. It used to in testing versions but now all maps are calculated as if they are OD9.</p></div>
|
||||
</div>
|
||||
|
||||
<div class="faq-item">
|
||||
<button class="faq-q">Will you make an LN dan calculator? <span class="faq-arrow">▶</span></button>
|
||||
<div class="faq-a"><p>Maybe but not confirmed.</p></div>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<!-- Credits -->
|
||||
<section id="credits">
|
||||
<h2>Credits</h2>
|
||||
<div class="credit-row">
|
||||
<div class="credit-role">Developer</div>
|
||||
<div class="credit-info">
|
||||
<h3>TheBagelOfMan</h3>
|
||||
</div>
|
||||
</div>
|
||||
<div class="credit-row">
|
||||
<div class="credit-role"><a href="https://github.com/sunnyxxy/Star-Rating-Rebirth" target="_blank">Sunny Rework Algorithm Used</a></div>
|
||||
<div class="credit-info">
|
||||
<h3>[Crz]sunnyxxy, Natelytle, vernonlim, ChlorieHCl, Imperial Wolf</h3>
|
||||
</div>
|
||||
</div>
|
||||
<div class="credit-row">
|
||||
<div class="credit-role"><a href="https://github.com/etternagame/etterna" target="_blank">MSD algorithm</a></div>
|
||||
<div class="credit-info">
|
||||
<h3>MinaciousGrace</h3>
|
||||
</div>
|
||||
</div>
|
||||
<div class="credit-row">
|
||||
<div class="credit-role"><a href="https://github.com/tosuapp/tosu" target="_blank">tosu</a></div>
|
||||
<div class="credit-info">
|
||||
<h3>KotRik, Cherry</h3>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<!-- Footer -->
|
||||
<footer>
|
||||
<p>Daniel Daniel Daniel Daniel Daniel</p>
|
||||
<p id="footer-version">...</p>
|
||||
</footer>
|
||||
|
||||
</div>
|
||||
|
||||
<script>
|
||||
document.querySelectorAll('.faq-q').forEach(btn => {
|
||||
btn.addEventListener('click', () => {
|
||||
const item = btn.closest('.faq-item');
|
||||
const isOpen = item.classList.contains('open');
|
||||
document.querySelectorAll('.faq-item.open').forEach(el => el.classList.remove('open'));
|
||||
if (!isOpen) item.classList.add('open');
|
||||
});
|
||||
});
|
||||
|
||||
fetch('benchmark_report.html')
|
||||
.then(r => r.text())
|
||||
.then(html => {
|
||||
const doc = new DOMParser().parseFromString(html, 'text/html');
|
||||
const vals = doc.querySelectorAll('.stat-val');
|
||||
if (vals.length >= 5) {
|
||||
const exact = vals[0].textContent.trim();
|
||||
const adjacent = vals[1].textContent.trim();
|
||||
const within = vals[2].textContent.trim();
|
||||
const maps = vals[4].textContent.trim();
|
||||
const exactN = parseFloat(exact);
|
||||
const adjacentN = parseFloat(adjacent);
|
||||
const withinN = parseFloat(within);
|
||||
document.getElementById('stat-exact').textContent = exact;
|
||||
document.getElementById('stat-adjacent').textContent = (exactN + adjacentN).toFixed(1) + '%';
|
||||
document.getElementById('stat-within').textContent = (exactN + adjacentN + withinN).toFixed(1) + '%';
|
||||
document.getElementById('stat-maps').textContent = maps;
|
||||
|
||||
}
|
||||
})
|
||||
.catch(() => {});
|
||||
|
||||
fetch('https://api.github.com/repos/TheBagelOfMan/Daniel/releases')
|
||||
.then(r => r.json())
|
||||
.then(releases => {
|
||||
let total = 0;
|
||||
releases.forEach(r => r.assets.forEach(a => total += a.download_count));
|
||||
document.getElementById('download-count').textContent = total.toLocaleString();
|
||||
|
||||
const latest = releases[0];
|
||||
if (!latest) return;
|
||||
|
||||
const version = latest.tag_name;
|
||||
const exeAsset = latest.assets.find(a => a.name.endsWith('.exe'));
|
||||
const downloadUrl = exeAsset
|
||||
? exeAsset.browser_download_url
|
||||
: `https://github.com/TheBagelOfMan/Daniel/releases/latest`;
|
||||
const filename = exeAsset ? exeAsset.name : 'Daniel.exe';
|
||||
|
||||
document.getElementById('dl-version').textContent = `${version} - Latest`;
|
||||
document.getElementById('dl-filename').textContent = filename;
|
||||
document.getElementById('dl-btn').href = downloadUrl;
|
||||
document.getElementById('dl-filename-step').textContent = filename;
|
||||
document.getElementById('footer-version').textContent = version;
|
||||
|
||||
const changelogEl = document.getElementById('changelog-entries');
|
||||
if (releases.length === 0) {
|
||||
changelogEl.innerHTML = '<div style="color:var(--muted); font-size:13px;">No releases found.</div>';
|
||||
} else {
|
||||
changelogEl.innerHTML = releases.map((r, i) => {
|
||||
const date = new Date(r.published_at).toLocaleDateString('en-GB', { year: 'numeric', month: 'long', day: 'numeric' });
|
||||
const bodyHtml = r.body
|
||||
? r.body
|
||||
.split('\n')
|
||||
.filter(l => l.trim())
|
||||
.map(l => {
|
||||
const stripped = l.replace(/^[-*]\s*/, '').replace(/\*\*(.*?)\*\*/g, '$1').trim();
|
||||
return `<li>${stripped}</li>`;
|
||||
})
|
||||
.join('')
|
||||
: '<li>No release notes.</li>';
|
||||
return `
|
||||
<div class="changelog-entry">
|
||||
<div>
|
||||
<div class="cl-ver">${r.tag_name}${i === 0 ? ' <span class="tag new">latest</span>' : ''}</div>
|
||||
<div class="cl-date">${date}</div>
|
||||
</div>
|
||||
<div class="cl-body"><ul>${bodyHtml}</ul></div>
|
||||
</div>`;
|
||||
}).join('');
|
||||
}
|
||||
})
|
||||
.catch(() => {
|
||||
document.getElementById('download-count').textContent = '–';
|
||||
document.getElementById('dl-version').textContent = 'latest';
|
||||
document.getElementById('dl-btn').href = 'https://github.com/TheBagelOfMan/Daniel/releases/latest';
|
||||
document.getElementById('footer-version').textContent = '–';
|
||||
});
|
||||
</script>
|
||||
<script data-goatcounter="https://thebagelofman.goatcounter.com/count" async src="//gc.zgo.at/count.js"></script>
|
||||
</body>
|
||||
</html>
|
||||
506
src/algorithm.py
Normal file
506
src/algorithm.py
Normal file
@ -0,0 +1,506 @@
|
||||
import math
|
||||
from collections import defaultdict
|
||||
|
||||
import numpy as np
|
||||
import osu_file_parser as osu_parser
|
||||
|
||||
# --- Constants ---
|
||||
|
||||
BREAK_ZERO_THRESHOLD_MS = 400
|
||||
GRAPH_RESAMPLE_INTERVAL_MS = 100
|
||||
SMOOTH_SIGMA_MS = 800
|
||||
|
||||
|
||||
# --- Helper Functions ---
|
||||
|
||||
def gaussian_filter1d(data, sigma, mode="constant", cval=0.0):
|
||||
kernel_radius = int(4 * sigma + 0.5)
|
||||
x = np.arange(-kernel_radius, kernel_radius + 1)
|
||||
kernel = np.exp(-0.5 * (x / sigma) ** 2)
|
||||
kernel /= kernel.sum()
|
||||
if mode == "constant":
|
||||
padded = np.pad(data, kernel_radius, mode="constant", constant_values=cval)
|
||||
else:
|
||||
padded = np.pad(data, kernel_radius, mode=mode)
|
||||
return np.convolve(padded, kernel, mode="valid")
|
||||
|
||||
|
||||
def cumulative_sum(x, f):
|
||||
"""Vectorised exact cumulative integral of piecewise-constant f on sorted x."""
|
||||
F = np.zeros(len(x))
|
||||
F[1:] = np.cumsum(f[:-1] * np.diff(x))
|
||||
return F
|
||||
|
||||
|
||||
def smooth_on_corners(x, f, window, scale=1.0, mode="sum"):
|
||||
"""Vectorised sliding-window integral of piecewise-constant f."""
|
||||
x = np.asarray(x, dtype=float)
|
||||
f = np.asarray(f, dtype=float)
|
||||
F = cumulative_sum(x, f)
|
||||
|
||||
a = np.clip(x - window, x[0], x[-1])
|
||||
b = np.clip(x + window, x[0], x[-1])
|
||||
|
||||
def _query_vec(q_arr):
|
||||
idx = np.searchsorted(x, q_arr) - 1
|
||||
idx = np.clip(idx, 0, len(x) - 2)
|
||||
return F[idx] + f[idx] * (q_arr - x[idx])
|
||||
|
||||
val = _query_vec(b) - _query_vec(a)
|
||||
|
||||
if mode == "avg":
|
||||
span = b - a
|
||||
return np.where(span > 0, val / span, 0.0)
|
||||
return scale * val
|
||||
|
||||
|
||||
def interp_values(new_x, old_x, old_vals):
|
||||
return np.interp(new_x, old_x, old_vals)
|
||||
|
||||
|
||||
def step_interp(new_x, old_x, old_vals):
|
||||
indices = np.searchsorted(old_x, new_x, side="right") - 1
|
||||
indices = np.clip(indices, 0, len(old_vals) - 1)
|
||||
return old_vals[indices]
|
||||
|
||||
|
||||
def rescale_high(sr):
|
||||
if sr <= 9:
|
||||
return sr
|
||||
return 9 + (sr - 9) / 1.2
|
||||
|
||||
|
||||
# --- Preprocessing ---
|
||||
|
||||
def preprocess_file(file_path, mod):
|
||||
p_obj = osu_parser.parser(file_path)
|
||||
p_obj.process()
|
||||
p = p_obj.get_parsed_data()
|
||||
|
||||
note_seq = []
|
||||
for i in range(len(p[1])):
|
||||
k = p[1][i]
|
||||
h = p[2][i]
|
||||
if mod == "DT":
|
||||
h = int(math.floor(h * 2 / 3))
|
||||
elif mod == "HT":
|
||||
h = int(math.floor(h * 4 / 3))
|
||||
note_seq.append((k, h))
|
||||
|
||||
x = 0.3 * ((64.5 - math.ceil(p[5] * 3)) / 500) ** 0.5
|
||||
x = min(x, 0.6 * (x - 0.09) + 0.09)
|
||||
note_seq.sort(key=lambda tup: (tup[1], tup[0]))
|
||||
|
||||
note_dict = defaultdict(list)
|
||||
for tup in note_seq:
|
||||
note_dict[tup[0]].append(tup)
|
||||
note_seq_by_column = sorted(note_dict.values(), key=lambda lst: lst[0][0])
|
||||
|
||||
K = p[0]
|
||||
T = max(n[1] for n in note_seq) + 1
|
||||
|
||||
return x, K, T, note_seq, note_seq_by_column
|
||||
|
||||
|
||||
# --- Corner Computation ---
|
||||
|
||||
def get_corners(T, note_seq):
|
||||
corners_base = set()
|
||||
for _, h in note_seq:
|
||||
corners_base.update([h, h + 501, h - 499, h + 1])
|
||||
corners_base.update([0, T])
|
||||
corners_base = sorted(s for s in corners_base if 0 <= s <= T)
|
||||
|
||||
corners_A = set()
|
||||
for _, h in note_seq:
|
||||
corners_A.update([h, h + 1000, h - 1000])
|
||||
corners_A.update([0, T])
|
||||
corners_A = sorted(s for s in corners_A if 0 <= s <= T)
|
||||
|
||||
all_corners = sorted(set(corners_base) | set(corners_A))
|
||||
return (
|
||||
np.array(all_corners, dtype=float),
|
||||
np.array(corners_base, dtype=float),
|
||||
np.array(corners_A, dtype=float),
|
||||
)
|
||||
|
||||
|
||||
# --- Key Usage ---
|
||||
|
||||
def get_key_usage(K, T, note_seq, base_corners):
|
||||
key_usage = {k: np.zeros(len(base_corners), dtype=bool) for k in range(K)}
|
||||
for k, h in note_seq:
|
||||
start = max(h - 150, 0)
|
||||
end = min(h + 150, T - 1)
|
||||
li = np.searchsorted(base_corners, start, side="left")
|
||||
ri = np.searchsorted(base_corners, end, side="left")
|
||||
key_usage[k][li:ri] = True
|
||||
return key_usage
|
||||
|
||||
|
||||
def get_key_usage_400(K, T, note_seq, base_corners):
|
||||
key_usage_400 = {k: np.zeros(len(base_corners), dtype=float) for k in range(K)}
|
||||
for k, h in note_seq:
|
||||
start = max(h, 0)
|
||||
li = np.searchsorted(base_corners, start - 400, side="left")
|
||||
ri = np.searchsorted(base_corners, start + 400, side="left")
|
||||
mid = np.searchsorted(base_corners, start, side="left")
|
||||
|
||||
key_usage_400[k][mid] += 3.75
|
||||
for idx_range in [np.arange(li, mid), np.arange(mid + 1, ri)]:
|
||||
key_usage_400[k][idx_range] += 3.75 - 3.75 / 400 ** 2 * (base_corners[idx_range] - start) ** 2
|
||||
return key_usage_400
|
||||
|
||||
|
||||
# --- Difficulty Components ---
|
||||
|
||||
def compute_anchor(K, key_usage_400, base_corners):
|
||||
counts = np.stack([key_usage_400[k] for k in range(K)], axis=1)
|
||||
counts = np.sort(counts, axis=1)[:, ::-1]
|
||||
|
||||
nonzero_mask = counts > 0
|
||||
n_nz = nonzero_mask.sum(axis=1)
|
||||
|
||||
c0 = counts[:, :-1]
|
||||
c1 = counts[:, 1:]
|
||||
safe_c0 = np.where(c0 > 0, c0, 1.0)
|
||||
ratio = np.where(c0 > 0, c1 / safe_c0, 0.0)
|
||||
weight = 1 - 4 * (0.5 - ratio) ** 2
|
||||
|
||||
pair_valid = nonzero_mask[:, :-1] & nonzero_mask[:, 1:]
|
||||
walk = np.sum(np.where(pair_valid, c0 * weight, 0.0), axis=1)
|
||||
max_walk = np.sum(np.where(pair_valid, c0, 0.0), axis=1)
|
||||
|
||||
raw_anchor = np.where(n_nz > 1, walk / np.maximum(max_walk, 1e-9), 0.0)
|
||||
return 1 + np.minimum(raw_anchor - 0.18, 5 * (raw_anchor - 0.22) ** 3)
|
||||
|
||||
|
||||
def compute_Jbar(K, T, x, note_seq_by_column, base_corners):
|
||||
def jack_nerfer(delta):
|
||||
return 1 - 7e-5 * (0.15 + np.abs(delta - 0.08)) ** (-4)
|
||||
|
||||
J_ks = {k: np.zeros(len(base_corners)) for k in range(K)}
|
||||
delta_ks = {k: np.full(len(base_corners), 1e9) for k in range(K)}
|
||||
|
||||
for k in range(K):
|
||||
notes = note_seq_by_column[k]
|
||||
if len(notes) < 2:
|
||||
continue
|
||||
starts = np.array([n[1] for n in notes[:-1]], dtype=float)
|
||||
ends = np.array([n[1] for n in notes[1:]], dtype=float)
|
||||
deltas = 0.001 * (ends - starts)
|
||||
vals = deltas ** -1 * (deltas + 0.11 * x ** 0.25) ** -1 * jack_nerfer(deltas)
|
||||
|
||||
for start, end, delta, val in zip(starts, ends, deltas, vals):
|
||||
li = np.searchsorted(base_corners, start, side="left")
|
||||
ri = np.searchsorted(base_corners, end, side="left")
|
||||
if ri > li:
|
||||
J_ks[k][li:ri] = val
|
||||
delta_ks[k][li:ri] = delta
|
||||
|
||||
Jbar_ks = {
|
||||
k: smooth_on_corners(base_corners, J_ks[k], window=500, scale=0.001, mode="sum")
|
||||
for k in range(K)
|
||||
}
|
||||
|
||||
Jbar_stack = np.stack([Jbar_ks[k] for k in range(K)], axis=0)
|
||||
delta_stack = np.stack([delta_ks[k] for k in range(K)], axis=0)
|
||||
weights = 1.0 / delta_stack
|
||||
num = np.sum(np.maximum(Jbar_stack, 0) ** 5 * weights, axis=0)
|
||||
den = np.sum(weights, axis=0)
|
||||
Jbar = (num / np.maximum(den, 1e-9)) ** 0.2
|
||||
|
||||
return delta_ks, Jbar
|
||||
|
||||
|
||||
def compute_Xbar(K, T, x, note_seq_by_column, active_columns, base_corners):
|
||||
cross_matrix = [
|
||||
[-1],
|
||||
[0.075, 0.075],
|
||||
[0.125, 0.05, 0.125],
|
||||
[0.125, 0.125, 0.125, 0.125],
|
||||
[0.175, 0.25, 0.05, 0.25, 0.175],
|
||||
[0.175, 0.25, 0.175, 0.175, 0.25, 0.175],
|
||||
[0.225, 0.35, 0.25, 0.05, 0.25, 0.35, 0.225],
|
||||
[0.225, 0.35, 0.25, 0.225, 0.225, 0.25, 0.35, 0.225],
|
||||
[0.275, 0.45, 0.35, 0.25, 0.05, 0.25, 0.35, 0.45, 0.275],
|
||||
[0.275, 0.45, 0.35, 0.25, 0.275, 0.275, 0.25, 0.35, 0.45, 0.275],
|
||||
[0.325, 0.55, 0.45, 0.35, 0.25, 0.05, 0.25, 0.35, 0.45, 0.55, 0.325],
|
||||
]
|
||||
cross_coeff = cross_matrix[K]
|
||||
X_ks = {k: np.zeros(len(base_corners)) for k in range(K + 1)}
|
||||
fast_cross = {k: np.zeros(len(base_corners)) for k in range(K + 1)}
|
||||
|
||||
for k in range(K + 1):
|
||||
if k == 0:
|
||||
notes_in_pair = note_seq_by_column[0]
|
||||
elif k == K:
|
||||
notes_in_pair = note_seq_by_column[K - 1]
|
||||
else:
|
||||
notes_in_pair = sorted(
|
||||
note_seq_by_column[k - 1] + note_seq_by_column[k], key=lambda t: t[1]
|
||||
)
|
||||
|
||||
for i in range(1, len(notes_in_pair)):
|
||||
start = notes_in_pair[i - 1][1]
|
||||
end = notes_in_pair[i][1]
|
||||
li = np.searchsorted(base_corners, start, side="left")
|
||||
ri = np.searchsorted(base_corners, end, side="left")
|
||||
if ri <= li:
|
||||
continue
|
||||
|
||||
delta = 0.001 * (notes_in_pair[i][1] - notes_in_pair[i - 1][1])
|
||||
val = 0.16 * max(x, delta) ** -2
|
||||
|
||||
left_inactive = (k - 1) not in active_columns[li] and (k - 1) not in active_columns[ri]
|
||||
right_inactive = k not in active_columns[li] and k not in active_columns[ri]
|
||||
if left_inactive or right_inactive:
|
||||
val *= 1 - cross_coeff[k]
|
||||
|
||||
X_ks[k][li:ri] = val
|
||||
fast_cross[k][li:ri] = max(0, 0.4 * max(delta, 0.06, 0.75 * x) ** -2 - 80)
|
||||
|
||||
X_base = np.array([
|
||||
sum(X_ks[k][i] * cross_coeff[k] for k in range(K + 1)) +
|
||||
sum(
|
||||
np.sqrt(fast_cross[k][i] * cross_coeff[k] * fast_cross[k + 1][i] * cross_coeff[k + 1])
|
||||
for k in range(K)
|
||||
)
|
||||
for i in range(len(base_corners))
|
||||
])
|
||||
|
||||
return smooth_on_corners(base_corners, X_base, window=500, scale=0.001, mode="sum")
|
||||
|
||||
|
||||
def compute_Pbar(K, T, x, note_seq, anchor, base_corners):
|
||||
def stream_booster(delta):
|
||||
bpm = np.clip(7.5 / delta, 0, 420)
|
||||
primary = 0.10 / (1 + np.exp(-0.06 * (bpm - 175)))
|
||||
secondary = np.where(
|
||||
(bpm >= 200) & (bpm <= 350),
|
||||
0.30 * (1 - np.exp(-0.02 * (bpm - 200))),
|
||||
0.0,
|
||||
)
|
||||
return 1 + primary + secondary
|
||||
|
||||
P_step = np.zeros(len(base_corners))
|
||||
|
||||
for i in range(len(note_seq) - 1):
|
||||
h_l = note_seq[i][1]
|
||||
h_r = note_seq[i + 1][1]
|
||||
delta_time = h_r - h_l
|
||||
|
||||
if delta_time < 1e-9:
|
||||
spike = 1000 * (0.02 * (4 / x - 24)) ** 0.25
|
||||
li = np.searchsorted(base_corners, h_l, side="left")
|
||||
ri = np.searchsorted(base_corners, h_l, side="right")
|
||||
if ri > li:
|
||||
P_step[li:ri] += spike
|
||||
continue
|
||||
|
||||
li = np.searchsorted(base_corners, h_l, side="left")
|
||||
ri = np.searchsorted(base_corners, h_r, side="left")
|
||||
if ri <= li:
|
||||
continue
|
||||
|
||||
delta = 0.001 * delta_time
|
||||
b_val = stream_booster(delta)
|
||||
base_inc = (0.08 * x ** -1 * (1 - 24 * x ** -1 * (x / 6) ** 2)) ** 0.25
|
||||
|
||||
if delta < 2 * x / 3:
|
||||
inc = delta ** -1 * (0.08 * x ** -1 * (1 - 24 * x ** -1 * (delta - x / 2) ** 2)) ** 0.25 * max(b_val, 1)
|
||||
else:
|
||||
inc = delta ** -1 * base_inc * max(b_val, 1)
|
||||
|
||||
seg_anchor = anchor[li:ri]
|
||||
P_step[li:ri] += np.minimum(inc * seg_anchor, np.maximum(inc, inc * 2 - 10))
|
||||
|
||||
return smooth_on_corners(base_corners, P_step, window=500, scale=0.001, mode="sum")
|
||||
|
||||
|
||||
def compute_Abar(K, T, x, note_seq_by_column, active_columns, delta_ks, A_corners, base_corners):
|
||||
dks = {k: np.zeros(len(base_corners)) for k in range(K - 1)}
|
||||
for i in range(len(base_corners)):
|
||||
cols = active_columns[i]
|
||||
for j in range(len(cols) - 1):
|
||||
k0, k1 = cols[j], cols[j + 1]
|
||||
dks[k0][i] = abs(delta_ks[k0][i] - delta_ks[k1][i]) + 0.4 * max(
|
||||
0, max(delta_ks[k0][i], delta_ks[k1][i]) - 0.11
|
||||
)
|
||||
|
||||
A_step = np.ones(len(A_corners))
|
||||
bc_idx = np.clip(np.searchsorted(base_corners, A_corners), 0, len(base_corners) - 1)
|
||||
|
||||
for i in range(len(A_corners)):
|
||||
idx = bc_idx[i]
|
||||
cols = active_columns[idx]
|
||||
for j in range(len(cols) - 1):
|
||||
k0, k1 = cols[j], cols[j + 1]
|
||||
d_val = dks[k0][idx]
|
||||
dk0, dk1 = delta_ks[k0][idx], delta_ks[k1][idx]
|
||||
if d_val < 0.02:
|
||||
A_step[i] *= min(0.75 + 0.5 * max(dk0, dk1), 1)
|
||||
elif d_val < 0.07:
|
||||
A_step[i] *= min(0.65 + 5 * d_val + 0.5 * max(dk0, dk1), 1)
|
||||
|
||||
return smooth_on_corners(A_corners, A_step, window=250, mode="avg")
|
||||
|
||||
|
||||
def compute_C_and_Ks(K, T, note_seq, key_usage, base_corners):
|
||||
note_hit_times = np.array(sorted(n[1] for n in note_seq), dtype=float)
|
||||
|
||||
lo = np.searchsorted(note_hit_times, base_corners - 500, side="left")
|
||||
hi = np.searchsorted(note_hit_times, base_corners + 500, side="left")
|
||||
C_step = (hi - lo).astype(float)
|
||||
|
||||
Ks_step = np.maximum(
|
||||
np.stack([key_usage[k] for k in range(K)], axis=0).sum(axis=0), 1
|
||||
).astype(float)
|
||||
|
||||
return C_step, Ks_step
|
||||
|
||||
|
||||
# --- Graph Post-Processing ---
|
||||
|
||||
def _apply_proximity_envelope(all_corners, D_all, note_seq):
|
||||
if not note_seq:
|
||||
return D_all.copy()
|
||||
|
||||
note_times = np.sort(np.array([float(h) for _, h in note_seq]))
|
||||
PROXIMITY_FADE_MS = 500.0
|
||||
|
||||
idx = np.searchsorted(note_times, all_corners)
|
||||
d_after = np.abs(note_times[np.clip(idx, 0, len(note_times) - 1)] - all_corners)
|
||||
d_before = np.abs(note_times[np.clip(idx - 1, 0, len(note_times) - 1)] - all_corners)
|
||||
d = np.minimum(d_after, d_before)
|
||||
|
||||
envelope = 0.5 * (1.0 + np.cos(np.pi * np.clip(d / PROXIMITY_FADE_MS, 0.0, 1.0)))
|
||||
return D_all * envelope
|
||||
|
||||
|
||||
def smooth_D_for_graph(all_corners, D_all, note_seq):
|
||||
note_times = np.array(sorted(float(h) for _, h in note_seq), dtype=float)
|
||||
|
||||
t_start = float(all_corners[0])
|
||||
t_end = float(all_corners[-1])
|
||||
uniform_t = np.arange(t_start, t_end + GRAPH_RESAMPLE_INTERVAL_MS, GRAPH_RESAMPLE_INTERVAL_MS, dtype=float)
|
||||
|
||||
if len(note_times) > 0:
|
||||
idx = np.searchsorted(note_times, uniform_t)
|
||||
idx_after = np.clip(idx, 0, len(note_times) - 1)
|
||||
idx_before = np.clip(idx - 1, 0, len(note_times) - 1)
|
||||
dist = np.minimum(np.abs(uniform_t - note_times[idx_before]), np.abs(uniform_t - note_times[idx_after]))
|
||||
break_mask = dist > BREAK_ZERO_THRESHOLD_MS
|
||||
else:
|
||||
break_mask = np.zeros(len(uniform_t), dtype=bool)
|
||||
|
||||
uniform_D = np.interp(uniform_t, all_corners, D_all)
|
||||
uniform_D[break_mask] = 0.0
|
||||
|
||||
sigma_samples = SMOOTH_SIGMA_MS / GRAPH_RESAMPLE_INTERVAL_MS
|
||||
uniform_result = gaussian_filter1d(uniform_D, sigma=sigma_samples, mode="constant", cval=0.0)
|
||||
uniform_result[break_mask] = 0.0
|
||||
|
||||
return np.interp(all_corners, uniform_t, uniform_result)
|
||||
|
||||
|
||||
# --- Main Entry Points ---
|
||||
|
||||
def calculate(file_path, mod):
|
||||
x, K, T, note_seq, note_seq_by_column = preprocess_file(file_path, mod)
|
||||
all_corners, base_corners, A_corners = get_corners(T, note_seq)
|
||||
|
||||
key_usage = get_key_usage(K, T, note_seq, base_corners)
|
||||
active_columns = [[k for k in range(K) if key_usage[k][i]] for i in range(len(base_corners))]
|
||||
key_usage_400 = get_key_usage_400(K, T, note_seq, base_corners)
|
||||
anchor = compute_anchor(K, key_usage_400, base_corners)
|
||||
|
||||
delta_ks, Jbar = compute_Jbar(K, T, x, note_seq_by_column, base_corners)
|
||||
Jbar = interp_values(all_corners, base_corners, Jbar)
|
||||
|
||||
Xbar = compute_Xbar(K, T, x, note_seq_by_column, active_columns, base_corners)
|
||||
Xbar = interp_values(all_corners, base_corners, Xbar)
|
||||
|
||||
Pbar = compute_Pbar(K, T, x, note_seq, anchor, base_corners)
|
||||
Pbar = interp_values(all_corners, base_corners, Pbar)
|
||||
|
||||
Abar = compute_Abar(K, T, x, note_seq_by_column, active_columns, delta_ks, A_corners, base_corners)
|
||||
Abar = interp_values(all_corners, A_corners, Abar)
|
||||
|
||||
C_step, Ks_step = compute_C_and_Ks(K, T, note_seq, key_usage, base_corners)
|
||||
C_arr = step_interp(all_corners, base_corners, C_step)
|
||||
Ks_arr = step_interp(all_corners, base_corners, Ks_step)
|
||||
|
||||
S_all = (
|
||||
(0.4 * (Abar ** (3 / Ks_arr) * np.minimum(Jbar, 8 + 0.85 * Jbar)) ** 1.5) +
|
||||
(0.6 * (Abar ** (2 / 3) * (0.8 * Pbar)) ** 1.5)
|
||||
) ** (2 / 3)
|
||||
T_all = (Abar ** (3 / Ks_arr) * Xbar) / (Xbar + S_all + 1)
|
||||
D_all = 2.7 * (S_all ** 0.5) * (T_all ** 1.5) + S_all * 0.27
|
||||
|
||||
gaps = np.empty_like(all_corners, dtype=float)
|
||||
gaps[0] = (all_corners[1] - all_corners[0]) / 2.0
|
||||
gaps[-1] = (all_corners[-1] - all_corners[-2]) / 2.0
|
||||
gaps[1:-1] = (all_corners[2:] - all_corners[:-2]) / 2.0
|
||||
|
||||
effective_weights = C_arr * gaps
|
||||
sorted_indices = np.argsort(D_all)
|
||||
D_sorted = D_all[sorted_indices]
|
||||
w_sorted = effective_weights[sorted_indices]
|
||||
|
||||
cum_weights = np.cumsum(w_sorted)
|
||||
norm_cum_weights = cum_weights / cum_weights[-1]
|
||||
|
||||
target_percentiles = np.array([0.945, 0.935, 0.925, 0.915, 0.845, 0.835, 0.825, 0.815])
|
||||
indices = np.searchsorted(norm_cum_weights, target_percentiles, side="left")
|
||||
|
||||
percentile_93 = np.mean(D_sorted[indices[:4]])
|
||||
percentile_83 = np.mean(D_sorted[indices[4:8]])
|
||||
weighted_mean = (np.sum(D_sorted ** 5 * w_sorted) / np.sum(w_sorted)) ** 0.2
|
||||
|
||||
SR = 0.88 * percentile_93 * 0.25 + 0.94 * percentile_83 * 0.2 + weighted_mean * 0.55
|
||||
total_notes = len(note_seq)
|
||||
SR *= total_notes / (total_notes + 60)
|
||||
SR = rescale_high(SR) * 0.975
|
||||
|
||||
D_pre = _apply_proximity_envelope(all_corners, D_all, note_seq)
|
||||
D_graph = smooth_D_for_graph(all_corners, D_pre, note_seq)
|
||||
|
||||
return (
|
||||
SR,
|
||||
all_corners,
|
||||
D_graph,
|
||||
{
|
||||
"Pressing Intensity": Pbar,
|
||||
"Unevenness": Abar,
|
||||
"Same-Column Pressure": Jbar,
|
||||
"Cross-Column Pressure": Xbar,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def factor_averages(times, factors):
|
||||
times = np.asarray(times, dtype=float)
|
||||
names = list(factors.keys())
|
||||
matrix = np.stack([factors[n] for n in names], axis=0)
|
||||
integrals = np.trapezoid(matrix, times, axis=1)
|
||||
duration = times[-1] - times[0]
|
||||
return {n: float(integrals[i] / duration) for i, n in enumerate(names)}
|
||||
|
||||
|
||||
def parse_hitobjects(file_path, mod="NM"):
|
||||
p_obj = osu_parser.parser(file_path)
|
||||
p_obj.process()
|
||||
p = p_obj.get_parsed_data()
|
||||
|
||||
hitobjects = []
|
||||
for i in range(len(p[1])):
|
||||
x = p[1][i]
|
||||
time = p[2][i]
|
||||
if mod == "DT":
|
||||
time *= 2 / 3
|
||||
elif mod == "HT":
|
||||
time *= 4 / 3
|
||||
hitobjects.append({"x": x, "time": time})
|
||||
|
||||
return hitobjects
|
||||
841
src/daniel.py
Normal file
841
src/daniel.py
Normal file
@ -0,0 +1,841 @@
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
import threading
|
||||
import ctypes
|
||||
import tkinter as tk
|
||||
|
||||
import numpy as np
|
||||
import websocket
|
||||
|
||||
import algorithm
|
||||
import msd_converter
|
||||
from graph_fast import FastGraph
|
||||
|
||||
|
||||
def resource_path(relative_path):
|
||||
"""Get absolute path to resource — works for dev and when compiled with PyInstaller."""
|
||||
base_path = getattr(sys, "_MEIPASS", os.path.dirname(os.path.abspath(__file__)))
|
||||
return os.path.join(base_path, relative_path)
|
||||
|
||||
|
||||
# --- Constants ---
|
||||
|
||||
TOSU_WS = "ws://localhost:24050/ws"
|
||||
|
||||
BREAK_ZERO_THRESHOLD_MS = 400
|
||||
TIME_JUMP_THRESHOLD_MS = 2000
|
||||
_OSU_TIMEOUT = 1.0
|
||||
|
||||
MODE_COMPACT = 0
|
||||
MODE_STATISTICS = 1
|
||||
MODE_FULL = 2
|
||||
MODE_NAMES = ["compact", "statistics", "full"]
|
||||
|
||||
GRAPH_HEIGHT = 250
|
||||
BAR_HEIGHT = 120
|
||||
WINDOW_WIDTH = 650
|
||||
|
||||
COMPACT_HEIGHT = 65
|
||||
STATISTICS_HEIGHT = 120
|
||||
FULL_HEIGHT = GRAPH_HEIGHT + BAR_HEIGHT
|
||||
|
||||
COMPACT_WIDTH = 550
|
||||
STATISTICS_WIDTH = 650
|
||||
FULL_WIDTH = 650
|
||||
|
||||
MODE_HEIGHTS = {
|
||||
MODE_COMPACT: COMPACT_HEIGHT,
|
||||
MODE_STATISTICS: STATISTICS_HEIGHT,
|
||||
MODE_FULL: FULL_HEIGHT,
|
||||
}
|
||||
MODE_WIDTHS = {
|
||||
MODE_COMPACT: COMPACT_WIDTH,
|
||||
MODE_STATISTICS: STATISTICS_WIDTH,
|
||||
MODE_FULL: FULL_WIDTH,
|
||||
}
|
||||
|
||||
BG_COLOR = "#000000"
|
||||
PREFIX_FILL = "#FFFFFF"
|
||||
DOT_RED = "#FF3B3B"
|
||||
DOT_GREEN = "#00E676"
|
||||
|
||||
FONT_SCALE = float(os.environ.get("DANIEL_FONT_SCALE", "1.0" if os.name == "nt" else "0.67"))
|
||||
|
||||
|
||||
def _font_size(size):
|
||||
return max(8, int(round(size * FONT_SCALE)))
|
||||
|
||||
|
||||
FONT_PREFIX = ("Segoe UI Semibold", _font_size(30))
|
||||
FONT_DAN = ("Segoe UI Bold", _font_size(45))
|
||||
FONT_MSD_SKILL = ("Segoe UI Semibold", _font_size(29))
|
||||
FONT_CONNECTION = ("Segoe UI Semibold", _font_size(18))
|
||||
|
||||
PREFIX_Y_OFFSET = 4.1
|
||||
MSD_RELEVANCE_FRACTION = 0.15
|
||||
VIBRO_JACKSPEED_THRESHOLD = 0.90
|
||||
|
||||
DAN_COLORS = {
|
||||
"Alpha": "#ff5a5a",
|
||||
"Beta": "#ffd84d",
|
||||
"Gamma": "#00ffd5",
|
||||
"Delta": "#ff7b00",
|
||||
"Epsilon": "#ff7a9e",
|
||||
"Zeta": "#D7F7FF",
|
||||
"Eta": "#ff2b2b",
|
||||
"Theta": "#CC00FF",
|
||||
}
|
||||
|
||||
DAN_MEANS = {
|
||||
"Alpha": 6.562,
|
||||
"Beta": 6.957,
|
||||
"Gamma": 7.459,
|
||||
"Delta": 7.939,
|
||||
"Epsilon": 9.095,
|
||||
"Zeta": 9.473,
|
||||
"Eta": 10.162,
|
||||
"Theta": 10.782,
|
||||
}
|
||||
ORDER = list(DAN_MEANS.keys())
|
||||
DAN_ORDER_START = 11
|
||||
|
||||
|
||||
# --- State ---
|
||||
|
||||
lock = threading.Lock()
|
||||
|
||||
current_map = None
|
||||
current_mod = "NM"
|
||||
last_state = None
|
||||
current_song_time_ms = 0
|
||||
|
||||
_ws_receive_time = 0.0
|
||||
_ws_song_time_ms = 0
|
||||
_prev_song_time_ms = 0
|
||||
_prev_receive_time = 0.0
|
||||
_last_message_time = 0.0
|
||||
|
||||
_paused = False
|
||||
_pause_time_ms = 0
|
||||
_frozen_interp_ms = 0.0
|
||||
|
||||
loading = False
|
||||
loading_step = 0
|
||||
_last_loading_dot = 0.0
|
||||
|
||||
current_strain_data = None
|
||||
current_msd_data = None
|
||||
connection_phase = "connecting"
|
||||
|
||||
_last_dan_label = "."
|
||||
_last_dan_numeric = ""
|
||||
current_mode = MODE_FULL
|
||||
|
||||
|
||||
# --- Window setup ---
|
||||
|
||||
if os.name == "nt" and hasattr(ctypes, "windll"):
|
||||
try:
|
||||
ctypes.windll.user32.SetProcessDPIAware()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
root = tk.Tk()
|
||||
root.tk.call("tk", "scaling", 1.0)
|
||||
root.title("Daniel by TheBagelOfMan")
|
||||
root.geometry(f"{WINDOW_WIDTH}x{FULL_HEIGHT}")
|
||||
root.resizable(False, False)
|
||||
root.configure(bg=BG_COLOR)
|
||||
root.attributes("-topmost", True)
|
||||
|
||||
|
||||
def _set_dark_title_bar(window):
|
||||
try:
|
||||
hwnd = ctypes.windll.user32.GetParent(window.winfo_id())
|
||||
value = ctypes.c_int(1)
|
||||
ctypes.windll.dwmapi.DwmSetWindowAttribute(hwnd, 20, ctypes.byref(value), ctypes.sizeof(value))
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
hwnd = ctypes.windll.user32.GetParent(window.winfo_id())
|
||||
value = ctypes.c_int(1)
|
||||
ctypes.windll.dwmapi.DwmSetWindowAttribute(hwnd, 19, ctypes.byref(value), ctypes.sizeof(value))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
root.update_idletasks()
|
||||
_set_dark_title_bar(root)
|
||||
|
||||
_icon_path = resource_path("icon.ico")
|
||||
if os.path.exists(_icon_path):
|
||||
try:
|
||||
root.iconbitmap(_icon_path)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
_icon_png_path = resource_path("icon.png")
|
||||
if os.path.exists(_icon_png_path):
|
||||
try:
|
||||
_icon_img = tk.PhotoImage(file=_icon_png_path)
|
||||
root.iconphoto(True, _icon_img)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
canvas = tk.Canvas(root, width=WINDOW_WIDTH, height=FULL_HEIGHT, bg=BG_COLOR, highlightthickness=0)
|
||||
canvas.pack(expand=True, fill="both")
|
||||
|
||||
graph = FastGraph(canvas, GRAPH_HEIGHT, WINDOW_WIDTH)
|
||||
|
||||
text_items = []
|
||||
msd_items = []
|
||||
accent_bar = None
|
||||
current_bar_color = "#333333"
|
||||
_connection_items = []
|
||||
_pulse_job = None
|
||||
|
||||
|
||||
# --- Drawing helpers ---
|
||||
|
||||
def rgb(hex_color):
|
||||
r, g, b = root.winfo_rgb(hex_color)
|
||||
return r // 256, g // 256, b // 256
|
||||
|
||||
|
||||
def lerp_color(c1, c2, t):
|
||||
r1, g1, b1 = rgb(c1)
|
||||
r2, g2, b2 = rgb(c2)
|
||||
r = int(r1 + (r2 - r1) * t)
|
||||
g = int(g1 + (g2 - g1) * t)
|
||||
b = int(b1 + (b2 - b1) * t)
|
||||
return f"#{r:02x}{g:02x}{b:02x}"
|
||||
|
||||
|
||||
def draw_text(x, y, text, fill, font, anchor="w"):
|
||||
return [canvas.create_text(x, y, text=text, fill=fill, font=font, anchor=anchor)]
|
||||
|
||||
|
||||
def draw_outline_text(x, y, text, fill, outline, font):
|
||||
items = []
|
||||
for ox, oy in [(-1, 0), (1, 0), (0, -1), (0, 1), (-1, -1), (-1, 1), (1, -1), (1, 1)]:
|
||||
items.append(canvas.create_text(x + ox, y + oy, text=text, fill=outline, font=font, anchor="w"))
|
||||
items.append(canvas.create_text(x, y, text=text, fill=fill, font=font, anchor="w"))
|
||||
return items
|
||||
|
||||
|
||||
def is_loading_text(text):
|
||||
return text in (".", "..", "...")
|
||||
|
||||
|
||||
def _get_text_y_offset():
|
||||
return GRAPH_HEIGHT if current_mode == MODE_FULL else 0
|
||||
|
||||
|
||||
# --- Connection screen ---
|
||||
|
||||
def _draw_connection_screen():
|
||||
global _connection_items, _pulse_job
|
||||
|
||||
if _pulse_job is not None:
|
||||
root.after_cancel(_pulse_job)
|
||||
_pulse_job = None
|
||||
|
||||
for item in _connection_items:
|
||||
canvas.delete(item)
|
||||
_connection_items.clear()
|
||||
|
||||
if connection_phase == "ready":
|
||||
return
|
||||
|
||||
cy = MODE_HEIGHTS[current_mode] // 2
|
||||
|
||||
if connection_phase == "connecting":
|
||||
label = "Waiting for tosu connection..."
|
||||
dot_color = DOT_RED
|
||||
else:
|
||||
label = "tosu connected - waiting for map data"
|
||||
dot_color = DOT_GREEN
|
||||
|
||||
dot_r = 6
|
||||
dot_cx = 20
|
||||
inner = canvas.create_oval(
|
||||
dot_cx - dot_r, cy - dot_r,
|
||||
dot_cx + dot_r, cy + dot_r,
|
||||
fill=dot_color, outline="",
|
||||
)
|
||||
title = canvas.create_text(
|
||||
dot_cx + dot_r + 10, cy,
|
||||
text=label, fill="#AAAAAA",
|
||||
font=FONT_CONNECTION, anchor="w",
|
||||
)
|
||||
_connection_items += [inner, title]
|
||||
_pulse_connection(inner, dot_color, 0)
|
||||
|
||||
|
||||
def _pulse_connection(inner, dot_color, step):
|
||||
global _pulse_job
|
||||
|
||||
if connection_phase == "ready":
|
||||
_pulse_job = None
|
||||
return
|
||||
|
||||
phase = (step % 40) / 40
|
||||
alpha = 0.4 + 0.6 * abs(1 - 2 * phase)
|
||||
pulsed = lerp_color("#000000", dot_color, alpha)
|
||||
canvas.itemconfig(inner, fill=pulsed)
|
||||
_pulse_job = root.after(50, lambda: _pulse_connection(inner, dot_color, step + 1))
|
||||
|
||||
|
||||
def _clear_connection_screen():
|
||||
global _connection_items, _pulse_job
|
||||
|
||||
if _pulse_job is not None:
|
||||
root.after_cancel(_pulse_job)
|
||||
_pulse_job = None
|
||||
|
||||
for item in _connection_items:
|
||||
canvas.delete(item)
|
||||
_connection_items.clear()
|
||||
|
||||
if current_mode == MODE_FULL and _last_dan_label not in ("Invalid Beatmap", ".", "..", "..."):
|
||||
graph.show()
|
||||
|
||||
|
||||
def _clear_normal_ui():
|
||||
global text_items, msd_items, accent_bar
|
||||
|
||||
for item in text_items:
|
||||
canvas.delete(item)
|
||||
text_items.clear()
|
||||
|
||||
for item in msd_items:
|
||||
canvas.delete(item)
|
||||
msd_items.clear()
|
||||
|
||||
if accent_bar:
|
||||
canvas.delete(accent_bar)
|
||||
accent_bar = None
|
||||
|
||||
graph.hide()
|
||||
|
||||
|
||||
def _clear_invalid_ui():
|
||||
global msd_items
|
||||
for item in msd_items:
|
||||
canvas.delete(item)
|
||||
msd_items.clear()
|
||||
graph.hide()
|
||||
|
||||
|
||||
# --- UI components ---
|
||||
|
||||
def get_relevant_skillsets(msd_result):
|
||||
overall = msd_result.get("overall", 0)
|
||||
threshold = overall * MSD_RELEVANCE_FRACTION
|
||||
relevant = {k: v for k, v in msd_result.items() if k != "overall" and (overall - v) <= threshold}
|
||||
top3 = sorted(relevant.items(), key=lambda x: x[1], reverse=True)[:3]
|
||||
jackspeed = msd_result.get("jackspeed", 0)
|
||||
is_vibro = (overall > 0) and (jackspeed / overall >= VIBRO_JACKSPEED_THRESHOLD)
|
||||
return overall, top3, is_vibro
|
||||
|
||||
|
||||
def draw_msd(msd_result, color):
|
||||
global msd_items
|
||||
for item in msd_items:
|
||||
canvas.delete(item)
|
||||
msd_items.clear()
|
||||
|
||||
if current_mode == MODE_COMPACT:
|
||||
return
|
||||
|
||||
if msd_result is None:
|
||||
y = _get_text_y_offset() + 80
|
||||
msd_items += draw_text(14, y, "MSD Error", "#FF4444", FONT_MSD_SKILL)
|
||||
return
|
||||
|
||||
overall, top3, _ = get_relevant_skillsets(msd_result)
|
||||
if not top3:
|
||||
return
|
||||
|
||||
skillset_str = ", ".join(key.capitalize() for key, _ in top3)
|
||||
y = _get_text_y_offset() + 80
|
||||
msd_items += draw_text(14, y, f"{skillset_str} {overall:.2f}MSD", "#FFFFFF", FONT_MSD_SKILL)
|
||||
|
||||
|
||||
def draw_accent_bar():
|
||||
global accent_bar
|
||||
if accent_bar:
|
||||
canvas.delete(accent_bar)
|
||||
h = MODE_HEIGHTS[current_mode]
|
||||
accent_bar = canvas.create_rectangle(0, 0, 6, h, fill=current_bar_color, outline="")
|
||||
return accent_bar
|
||||
|
||||
|
||||
def fade_items(text_item, bar_item, start_color, end_color, steps=14):
|
||||
global current_bar_color
|
||||
|
||||
def _step(i):
|
||||
global current_bar_color
|
||||
if i < steps:
|
||||
color = lerp_color(start_color, end_color, i / steps)
|
||||
canvas.itemconfig(text_item, fill=color)
|
||||
canvas.itemconfig(bar_item, fill=color)
|
||||
current_bar_color = color
|
||||
root.after(15, lambda: _step(i + 1))
|
||||
else:
|
||||
canvas.itemconfig(text_item, fill=end_color)
|
||||
canvas.itemconfig(bar_item, fill=end_color)
|
||||
current_bar_color = end_color
|
||||
if current_mode == MODE_FULL:
|
||||
graph.set_color(end_color)
|
||||
|
||||
_step(0)
|
||||
|
||||
|
||||
def update_dan_text(dan_label, dan_numeric):
|
||||
global text_items, current_bar_color
|
||||
|
||||
if connection_phase != "ready":
|
||||
return
|
||||
|
||||
for item in text_items:
|
||||
canvas.delete(item)
|
||||
text_items.clear()
|
||||
|
||||
if is_loading_text(dan_label):
|
||||
fill = "#888888"
|
||||
new_bar_color = "#333333"
|
||||
else:
|
||||
if dan_label.startswith("<"):
|
||||
fill = "#7DF0FF"
|
||||
new_bar_color = fill
|
||||
else:
|
||||
base = dan_label.split()[0]
|
||||
fill = DAN_COLORS.get(base, "#FFFFFF")
|
||||
new_bar_color = fill
|
||||
|
||||
bar = draw_accent_bar()
|
||||
y_off = _get_text_y_offset()
|
||||
y = y_off + 28
|
||||
prefix_y = y + PREFIX_Y_OFFSET
|
||||
|
||||
prefix = draw_text(14, prefix_y, "Est. Dan:", PREFIX_FILL, FONT_PREFIX)
|
||||
text_items.extend(prefix)
|
||||
|
||||
bbox = canvas.bbox(prefix[-1])
|
||||
pw = bbox[2] - bbox[0] if bbox else 0
|
||||
xpos = 14 + pw + 8
|
||||
|
||||
is_vibro = False
|
||||
if (
|
||||
not is_loading_text(dan_label)
|
||||
and dan_label not in ("Invalid Beatmap", "? ? ? ? ?")
|
||||
and current_msd_data is not None
|
||||
):
|
||||
_, _, is_vibro = get_relevant_skillsets(current_msd_data)
|
||||
|
||||
if dan_label == "? ? ? ? ?":
|
||||
dan_items = draw_outline_text(xpos, y, dan_label, fill="#000000", outline="#FFFFFF", font=FONT_DAN)
|
||||
new_bar_color = "#FFFFFF"
|
||||
elif is_vibro:
|
||||
dan_items = draw_text(xpos, y, "VIBRO", "#FFFFFF", FONT_DAN)
|
||||
new_bar_color = "#FFFFFF"
|
||||
else:
|
||||
dan_items = draw_text(xpos, y, dan_label, current_bar_color, FONT_DAN)
|
||||
|
||||
text_items.extend(dan_items)
|
||||
|
||||
if not is_loading_text(dan_label) and dan_numeric:
|
||||
dan_bbox = canvas.bbox(dan_items[-1])
|
||||
numeric_x = (dan_bbox[2] if dan_bbox else xpos) + 10
|
||||
display_numeric = "N/A" if is_vibro else f"({dan_numeric})"
|
||||
text_items.extend(draw_text(numeric_x, prefix_y, display_numeric, "#FFFFFF", FONT_PREFIX))
|
||||
|
||||
if current_mode != MODE_COMPACT:
|
||||
draw_msd(current_msd_data, new_bar_color if not is_loading_text(dan_label) else "#333333")
|
||||
else:
|
||||
for item in msd_items:
|
||||
canvas.delete(item)
|
||||
msd_items.clear()
|
||||
|
||||
if current_mode == MODE_FULL:
|
||||
graph.set_color(
|
||||
new_bar_color if (is_loading_text(dan_label) or dan_label == "? ? ? ? ?") else current_bar_color
|
||||
)
|
||||
|
||||
if not is_loading_text(dan_label) and dan_label != "? ? ? ? ?":
|
||||
fade_items(dan_items[-1], bar, current_bar_color, new_bar_color)
|
||||
else:
|
||||
canvas.itemconfig(bar, fill=new_bar_color)
|
||||
current_bar_color = new_bar_color
|
||||
if dan_label != "? ? ? ? ?":
|
||||
canvas.itemconfig(dan_items[-1], fill=fill)
|
||||
|
||||
|
||||
def set_dan_text(label, numeric):
|
||||
root.after(0, lambda: update_dan_text(label, numeric))
|
||||
|
||||
|
||||
# --- Mode switching ---
|
||||
|
||||
def _apply_mode():
|
||||
h = MODE_HEIGHTS[current_mode]
|
||||
w = MODE_WIDTHS[current_mode]
|
||||
root.geometry(f"{w}x{h}")
|
||||
canvas.configure(width=w, height=h)
|
||||
|
||||
if current_mode == MODE_FULL:
|
||||
graph.show()
|
||||
else:
|
||||
graph.hide()
|
||||
|
||||
if connection_phase != "ready":
|
||||
_draw_connection_screen()
|
||||
else:
|
||||
update_dan_text(_last_dan_label, _last_dan_numeric)
|
||||
|
||||
|
||||
def cycle_mode(event=None):
|
||||
global current_mode
|
||||
current_mode = (current_mode + 1) % 3
|
||||
_apply_mode()
|
||||
print(f"[Mode] Switched to {MODE_NAMES[current_mode]}")
|
||||
|
||||
|
||||
root.bind("<Tab>", cycle_mode)
|
||||
|
||||
|
||||
# --- Tick loop ---
|
||||
|
||||
def _tick():
|
||||
global loading_step, _last_loading_dot
|
||||
|
||||
if connection_phase != "ready":
|
||||
root.after(16, _tick)
|
||||
return
|
||||
|
||||
now = time.monotonic()
|
||||
|
||||
if loading and now - _last_loading_dot >= 0.4:
|
||||
dots = [".", "..", "..."]
|
||||
update_dan_text(dots[loading_step % 3], "")
|
||||
loading_step += 1
|
||||
_last_loading_dot = now
|
||||
|
||||
if current_mode == MODE_FULL:
|
||||
with lock:
|
||||
ws_time = _ws_song_time_ms
|
||||
ws_recv = _ws_receive_time
|
||||
prev_time = _prev_song_time_ms
|
||||
prev_recv = _prev_receive_time
|
||||
md = current_mod
|
||||
paused = _paused
|
||||
frozen_ms = _frozen_interp_ms
|
||||
|
||||
if paused:
|
||||
graph.update_position(frozen_ms, md)
|
||||
else:
|
||||
real_dt = ws_recv - prev_recv
|
||||
rate = (ws_time - prev_time) / real_dt if real_dt > 0.01 and ws_time > prev_time else 1000.0
|
||||
rate = max(0.0, min(rate, 5000.0))
|
||||
interpolated_ms = ws_time + rate * (now - ws_recv)
|
||||
graph.update_position(interpolated_ms, md)
|
||||
|
||||
root.after(16, _tick)
|
||||
|
||||
|
||||
# --- WebSocket callbacks ---
|
||||
|
||||
def on_open(ws_app):
|
||||
global connection_phase, last_state
|
||||
print("[WS] Connected to tosu.")
|
||||
last_state = None
|
||||
connection_phase = "waiting_map"
|
||||
root.after(0, _draw_connection_screen)
|
||||
|
||||
|
||||
def on_message(ws_app, msg):
|
||||
global current_map, current_mod, current_song_time_ms
|
||||
global _ws_receive_time, _ws_song_time_ms, _prev_song_time_ms, _prev_receive_time
|
||||
global connection_phase, last_state, _last_message_time
|
||||
global _paused, _pause_time_ms, _frozen_interp_ms
|
||||
|
||||
_last_message_time = time.monotonic()
|
||||
|
||||
try:
|
||||
d = json.loads(msg)
|
||||
bm = d.get("menu", {}).get("bm")
|
||||
if not bm:
|
||||
return
|
||||
|
||||
folder = bm["path"]["folder"]
|
||||
file = bm["path"]["file"]
|
||||
|
||||
if not folder or not file:
|
||||
if connection_phase == "ready":
|
||||
print("[WS] osu closed — no map data.")
|
||||
last_state = None
|
||||
connection_phase = "waiting_map"
|
||||
root.after(0, _clear_normal_ui)
|
||||
root.after(0, _draw_connection_screen)
|
||||
return
|
||||
|
||||
songs = d["settings"]["folders"]["songs"]
|
||||
new_map = os.path.join(songs, folder, file)
|
||||
new_mod = get_rate_mod(read_mods(d))
|
||||
new_time = bm.get("time", {}).get("current", 0)
|
||||
now = time.monotonic()
|
||||
|
||||
with lock:
|
||||
prev_ws_time = _ws_song_time_ms
|
||||
current_map = new_map
|
||||
current_mod = new_mod
|
||||
current_song_time_ms = new_time
|
||||
_prev_song_time_ms = _ws_song_time_ms
|
||||
_prev_receive_time = _ws_receive_time
|
||||
_ws_song_time_ms = new_time
|
||||
_ws_receive_time = now
|
||||
|
||||
sd = current_strain_data
|
||||
t_max_ms = float(sd[0][-1]) if (sd is not None and len(sd[0]) > 0) else None
|
||||
at_end = (t_max_ms is not None) and (new_time >= t_max_ms - 500)
|
||||
|
||||
time_delta = new_time - prev_ws_time
|
||||
jumped = abs(time_delta) > TIME_JUMP_THRESHOLD_MS and not (0 < time_delta < TIME_JUMP_THRESHOLD_MS)
|
||||
|
||||
if jumped and not at_end:
|
||||
if _paused:
|
||||
_paused = False
|
||||
_pause_time_ms = 0
|
||||
print(f"[Jump] Time jumped {time_delta:+.0f} ms — clearing pause markers")
|
||||
root.after(0, graph.clear_all_pause_markers)
|
||||
|
||||
elif new_time == prev_ws_time and not at_end:
|
||||
if not _paused:
|
||||
_paused = True
|
||||
_pause_time_ms = new_time
|
||||
_frozen_interp_ms = float(new_time)
|
||||
print(f"[Pause] Detected at {new_time} ms")
|
||||
root.after(0, lambda t=new_time, m=new_mod: graph.add_pause_marker(t, m))
|
||||
|
||||
else:
|
||||
if _paused:
|
||||
_paused = False
|
||||
_pause_time_ms = 0
|
||||
print(f"[Pause] Resumed at {new_time} ms")
|
||||
|
||||
if connection_phase != "ready":
|
||||
connection_phase = "ready"
|
||||
print("[WS] Map data received. Entering normal operation.")
|
||||
root.after(0, _clear_connection_screen)
|
||||
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def on_close(ws_app, close_status_code, close_msg):
|
||||
global connection_phase, last_state, _paused
|
||||
print(f"[WS] Disconnected from tosu (code={close_status_code}).")
|
||||
last_state = None
|
||||
_paused = False
|
||||
connection_phase = "connecting"
|
||||
root.after(0, _clear_normal_ui)
|
||||
root.after(0, _draw_connection_screen)
|
||||
|
||||
|
||||
def on_error(ws_app, error):
|
||||
print(f"[WS] Error: {error}")
|
||||
|
||||
|
||||
def read_mods(d):
|
||||
return (
|
||||
d.get("gameplay", {}).get("mods", {}).get("str")
|
||||
or d.get("menu", {}).get("mods", {}).get("str")
|
||||
or ""
|
||||
)
|
||||
|
||||
|
||||
def get_rate_mod(m):
|
||||
if "DT" in m or "NC" in m:
|
||||
return "DT"
|
||||
if "HT" in m:
|
||||
return "HT"
|
||||
return "NM"
|
||||
|
||||
|
||||
# --- Calculation loop ---
|
||||
|
||||
def calculation_loop():
|
||||
global last_state, loading, loading_step
|
||||
global current_strain_data, current_msd_data
|
||||
global _last_dan_label, _last_dan_numeric
|
||||
|
||||
while True:
|
||||
if connection_phase != "ready":
|
||||
time.sleep(0.1)
|
||||
continue
|
||||
|
||||
with lock:
|
||||
state = (current_map, current_mod)
|
||||
|
||||
mp, mod = state
|
||||
|
||||
if not mp or not os.path.exists(mp):
|
||||
time.sleep(0.1)
|
||||
continue
|
||||
|
||||
if state == last_state:
|
||||
time.sleep(0.1)
|
||||
continue
|
||||
|
||||
try:
|
||||
loading = True
|
||||
loading_step = 0
|
||||
|
||||
import osu_file_parser as osu_parser
|
||||
_p = osu_parser.parser(mp)
|
||||
_p.process()
|
||||
if _p.get_parsed_data()[0] != 4:
|
||||
raise ValueError(f"Not a 4k map (keycount={_p.get_parsed_data()[0]})")
|
||||
|
||||
SR, times, strain, factors = algorithm.calculate(mp, mod)
|
||||
|
||||
t_arr = np.asarray(times, dtype=float)
|
||||
d_arr = np.asarray(strain, dtype=float)
|
||||
current_strain_data = (t_arr, d_arr)
|
||||
|
||||
try:
|
||||
hitobjects = msd_converter.parse_hitobjects(mp, mod)
|
||||
etterna_rows = msd_converter.osu_to_etterna_rows(hitobjects)
|
||||
msd_result = msd_converter.calculate_msd(etterna_rows)
|
||||
print("\n[MSD Skillsets]")
|
||||
for k, v in msd_result.items():
|
||||
print(f"{k:<10}: {v:.2f}")
|
||||
except Exception as msd_e:
|
||||
print(f"[MSD] Error calculating MSD, skipping: {msd_e}")
|
||||
msd_result = None
|
||||
|
||||
with lock:
|
||||
current_msd_data = msd_result
|
||||
|
||||
averages = algorithm.factor_averages(times, factors)
|
||||
dan_label, dan_numeric = get_dan_from_diff(SR)
|
||||
|
||||
_last_dan_label = dan_label
|
||||
_last_dan_numeric = dan_numeric
|
||||
|
||||
print(f"\n[Map Factors] {os.path.basename(mp)} [{mod}]")
|
||||
for k, v in averages.items():
|
||||
print(f"{k:<6}: {v:.4f}")
|
||||
print(f"SR : {SR:.4f}★")
|
||||
print(f"Dan : {dan_label} ({dan_numeric})\n")
|
||||
|
||||
loading = False
|
||||
last_state = state
|
||||
|
||||
if current_mode == MODE_FULL:
|
||||
root.after(0, lambda _t=t_arr, _d=d_arr: graph.set_data(_t, _d))
|
||||
root.after(0, lambda: graph.set_color(current_bar_color))
|
||||
root.after(0, graph.show)
|
||||
set_dan_text(dan_label, dan_numeric)
|
||||
|
||||
except Exception as e:
|
||||
loading = False
|
||||
last_state = state
|
||||
print("Calculation error:", e)
|
||||
_last_dan_label = "Invalid Beatmap"
|
||||
_last_dan_numeric = ""
|
||||
with lock:
|
||||
current_msd_data = None
|
||||
current_strain_data = None
|
||||
root.after(0, _clear_invalid_ui)
|
||||
set_dan_text("Invalid Beatmap", "")
|
||||
|
||||
time.sleep(0.1)
|
||||
|
||||
|
||||
# --- Dan boundary tables ---
|
||||
|
||||
def _precompute_dan_boundaries():
|
||||
means = [DAN_MEANS[d] for d in ORDER]
|
||||
boundaries = []
|
||||
for i in range(len(ORDER)):
|
||||
mean = means[i]
|
||||
lower = (means[i - 1] + mean) / 2 if i > 0 else mean - ((means[1] + mean) / 2 - mean)
|
||||
upper = (mean + means[i + 1]) / 2 if i < len(means) - 1 else mean + (mean - means[i - 1]) / 2
|
||||
boundaries.append((lower, upper))
|
||||
return boundaries
|
||||
|
||||
|
||||
_DAN_BOUNDARIES = _precompute_dan_boundaries()
|
||||
|
||||
|
||||
def get_dan_from_diff(diff):
|
||||
if diff < _DAN_BOUNDARIES[0][0]:
|
||||
return f"<{ORDER[0]} Low", "N/A"
|
||||
if diff >= _DAN_BOUNDARIES[-1][1]:
|
||||
return "? ? ? ? ?", "N/A"
|
||||
|
||||
for i, dan in enumerate(ORDER):
|
||||
lower, upper = _DAN_BOUNDARIES[i]
|
||||
if lower <= diff < upper:
|
||||
t = max(0.0, min((diff - lower) / (upper - lower), 1.0))
|
||||
numeric = round(DAN_ORDER_START + i + t, 2)
|
||||
if t < 1 / 3:
|
||||
label = f"{dan} Low"
|
||||
elif t < 2 / 3:
|
||||
label = f"{dan} Mid"
|
||||
else:
|
||||
label = f"{dan} High"
|
||||
return label, numeric
|
||||
|
||||
return "? ? ? ? ?", "N/A"
|
||||
|
||||
|
||||
# --- Boot ---
|
||||
|
||||
root.after(100, _draw_connection_screen)
|
||||
|
||||
|
||||
def _ws_loop():
|
||||
global connection_phase
|
||||
while True:
|
||||
print("[WS] Attempting to connect to tosu...")
|
||||
ws = websocket.WebSocketApp(
|
||||
TOSU_WS,
|
||||
on_open=on_open,
|
||||
on_message=on_message,
|
||||
on_close=on_close,
|
||||
on_error=on_error,
|
||||
)
|
||||
ws.run_forever()
|
||||
if connection_phase == "ready":
|
||||
connection_phase = "connecting"
|
||||
root.after(0, _draw_connection_screen)
|
||||
print("[WS] Retrying in 3 seconds...")
|
||||
time.sleep(3)
|
||||
|
||||
|
||||
def _message_timeout_watcher():
|
||||
global connection_phase, last_state
|
||||
while True:
|
||||
time.sleep(1)
|
||||
if connection_phase != "ready":
|
||||
continue
|
||||
elapsed = time.monotonic() - _last_message_time
|
||||
if elapsed > _OSU_TIMEOUT:
|
||||
print(f"[Watcher] No message for {elapsed:.1f}s — osu likely closed.")
|
||||
last_state = None
|
||||
connection_phase = "waiting_map"
|
||||
root.after(0, _clear_normal_ui)
|
||||
root.after(0, _draw_connection_screen)
|
||||
|
||||
|
||||
threading.Thread(target=calculation_loop, daemon=True).start()
|
||||
threading.Thread(target=_ws_loop, daemon=True).start()
|
||||
threading.Thread(target=_message_timeout_watcher, daemon=True).start()
|
||||
|
||||
root.after(16, _tick)
|
||||
root.mainloop()
|
||||
250
src/graph_fast.py
Normal file
250
src/graph_fast.py
Normal file
@ -0,0 +1,250 @@
|
||||
import tkinter as tk
|
||||
|
||||
import numpy as np
|
||||
from PIL import Image, ImageDraw
|
||||
|
||||
PAD_X = 6
|
||||
PAD_Y_TOP = 8
|
||||
PAD_Y_BOT = 6
|
||||
BG_COLOR_RGB = (0, 0, 0)
|
||||
UNPLAYED_FILL_RGB = (17, 17, 17)
|
||||
UNPLAYED_STROKE_RGB = (42, 42, 42)
|
||||
LINE_WIDTH = 5 # Width at 2x render resolution
|
||||
LINE_BASELINE_INSET = 4
|
||||
MAX_GRAPH_POINTS = 300 # Lower cap for smoother curves
|
||||
SUPERSAMPLE = 2 # Render at 2x then downscale for anti-aliased lines
|
||||
MIN_BREAK_MS = 2000 # Breaks shorter than this get interpolated through
|
||||
|
||||
PAUSE_LINE_COLOR = "#FF3B3B"
|
||||
PAUSE_LINE_WIDTH = 2
|
||||
|
||||
|
||||
def _hex_to_rgb(h):
|
||||
h = h.lstrip("#")
|
||||
return (int(h[0:2], 16), int(h[2:4], 16), int(h[4:6], 16))
|
||||
|
||||
|
||||
def _lerp_rgb(c1, c2, t):
|
||||
return tuple(int(c1[i] + (c2[i] - c1[i]) * t) for i in range(3))
|
||||
|
||||
|
||||
class FastGraph:
|
||||
def __init__(self, canvas, graph_height, window_width):
|
||||
self.canvas = canvas
|
||||
self.graph_height = graph_height
|
||||
self.window_width = window_width
|
||||
|
||||
self._w = window_width
|
||||
self._h = graph_height
|
||||
self._plot_w = self._w - PAD_X
|
||||
self._plot_h = self._h - PAD_Y_TOP - PAD_Y_BOT
|
||||
self._bottom_y = self._h - PAD_Y_BOT
|
||||
self._poly_bottom_y = self._bottom_y + 1
|
||||
|
||||
self._times = None
|
||||
self._strain = None
|
||||
self._t_min = 0.0
|
||||
self._t_max = 1.0
|
||||
self._poly_data = None
|
||||
|
||||
self._played_rgb = None
|
||||
self._unplayed_rgb = None
|
||||
self._composite_rgb = None
|
||||
|
||||
self._color_rgb = (255, 90, 90)
|
||||
self._played_fill_rgb = _lerp_rgb(BG_COLOR_RGB, (255, 90, 90), 0.45)
|
||||
self._played_stroke_rgb = _lerp_rgb(BG_COLOR_RGB, (255, 90, 90), 0.85)
|
||||
|
||||
self._ppm_header = b"P6\n%d %d\n255\n" % (self._w, self._h)
|
||||
self._tk_photo = tk.PhotoImage(width=self._w, height=self._h)
|
||||
self._tk_photo.put("#000000", to=(0, 0, self._w, self._h))
|
||||
|
||||
self._canvas_item = self.canvas.create_image(0, 0, image=self._tk_photo, anchor="nw")
|
||||
self.canvas.tag_lower(self._canvas_item)
|
||||
|
||||
self._last_split_px = -1
|
||||
self._visible = True
|
||||
self._pause_line_items = []
|
||||
|
||||
# --- Public API ---
|
||||
|
||||
def set_data(self, times, strain):
|
||||
self._times = np.asarray(times, dtype=float)
|
||||
self._strain = np.asarray(strain, dtype=float)
|
||||
|
||||
nonzero = np.where(self._strain > 0)[0]
|
||||
if len(nonzero) == 0:
|
||||
nonzero = np.arange(len(self._times))
|
||||
crop_start = max(nonzero[0] - 1, 0)
|
||||
crop_end = min(nonzero[-1] + 2, len(self._times))
|
||||
self._times = self._times[crop_start:crop_end]
|
||||
self._strain = self._strain[crop_start:crop_end]
|
||||
|
||||
if len(self._times) < 2:
|
||||
self._poly_data = None
|
||||
return
|
||||
|
||||
self._t_min = float(self._times[0])
|
||||
self._t_max = float(self._times[-1])
|
||||
self._poly_data = self._build_polygon()
|
||||
self._rebuild_images()
|
||||
self._last_split_px = -1
|
||||
self.clear_all_pause_markers()
|
||||
|
||||
def set_color(self, hex_color):
|
||||
self._color_rgb = _hex_to_rgb(hex_color)
|
||||
self._played_fill_rgb = _lerp_rgb(BG_COLOR_RGB, self._color_rgb, 0.45)
|
||||
self._played_stroke_rgb = _lerp_rgb(BG_COLOR_RGB, self._color_rgb, 0.85)
|
||||
|
||||
if self._poly_data is not None:
|
||||
self._rebuild_images()
|
||||
self._last_split_px = -1
|
||||
|
||||
def hide(self):
|
||||
if self._visible and self._canvas_item is not None:
|
||||
self.canvas.itemconfigure(self._canvas_item, state="hidden")
|
||||
self._visible = False
|
||||
self.clear_all_pause_markers()
|
||||
|
||||
def show(self):
|
||||
if not self._visible and self._canvas_item is not None:
|
||||
self.canvas.itemconfigure(self._canvas_item, state="normal")
|
||||
self._visible = True
|
||||
self._last_split_px = -1
|
||||
|
||||
def update_position(self, song_time_ms, mod="NM"):
|
||||
if not self._visible or self._played_rgb is None or self._unplayed_rgb is None:
|
||||
return
|
||||
|
||||
scale = {"DT": 2 / 3, "HT": 4 / 3}.get(mod, 1.0)
|
||||
adj_time = song_time_ms * scale
|
||||
duration = self._t_max - self._t_min
|
||||
frac = max(0.0, min((adj_time - self._t_min) / duration, 1.0)) if duration > 0 else 0.0
|
||||
split_px = max(0, min(round(PAD_X + frac * self._plot_w), self._w))
|
||||
|
||||
if split_px == self._last_split_px:
|
||||
return
|
||||
self._last_split_px = split_px
|
||||
|
||||
buf = self._composite_rgb
|
||||
if split_px > 0:
|
||||
buf[:, :split_px, :] = self._played_rgb[:, :split_px, :]
|
||||
if split_px < self._w:
|
||||
buf[:, split_px:, :] = self._unplayed_rgb[:, split_px:, :]
|
||||
|
||||
self._tk_photo.configure(data=self._ppm_header + buf.tobytes())
|
||||
|
||||
def add_pause_marker(self, song_time_ms, mod="NM"):
|
||||
"""Add a red vertical line at the given song time. Call from main thread only."""
|
||||
if not self._visible or self._t_max <= self._t_min:
|
||||
return
|
||||
|
||||
scale = {"DT": 2 / 3, "HT": 4 / 3}.get(mod, 1.0)
|
||||
adj_time = song_time_ms * scale
|
||||
duration = self._t_max - self._t_min
|
||||
frac = max(0.0, min((adj_time - self._t_min) / duration, 1.0))
|
||||
x = max(PAD_X, min(round(PAD_X + frac * self._plot_w), self._w - 1))
|
||||
|
||||
if x <= PAD_X:
|
||||
return
|
||||
|
||||
hw = max(1, PAUSE_LINE_WIDTH // 2)
|
||||
item = self.canvas.create_rectangle(
|
||||
x - hw, PAD_Y_TOP, x + hw, self._bottom_y,
|
||||
fill=PAUSE_LINE_COLOR, outline="", tags="pause_marker",
|
||||
)
|
||||
self.canvas.tag_raise(item, self._canvas_item)
|
||||
self._pause_line_items.append(item)
|
||||
|
||||
def clear_all_pause_markers(self):
|
||||
"""Remove every pause marker line. Call from main thread only."""
|
||||
for item in self._pause_line_items:
|
||||
self.canvas.delete(item)
|
||||
self._pause_line_items.clear()
|
||||
|
||||
def destroy(self):
|
||||
self.clear_all_pause_markers()
|
||||
if self._canvas_item is not None:
|
||||
self.canvas.delete(self._canvas_item)
|
||||
self._canvas_item = None
|
||||
self._tk_photo = None
|
||||
self._played_rgb = None
|
||||
self._unplayed_rgb = None
|
||||
self._composite_rgb = None
|
||||
self._poly_data = None
|
||||
self._last_split_px = -1
|
||||
|
||||
# --- Internal ---
|
||||
|
||||
def _build_polygon(self):
|
||||
t = self._times.copy()
|
||||
d = self._strain.copy()
|
||||
|
||||
is_zero = (d == 0).astype(np.int8)
|
||||
transitions = np.diff(is_zero, prepend=0, append=0)
|
||||
gap_starts = np.where(transitions == 1)[0]
|
||||
gap_ends = np.where(transitions == -1)[0]
|
||||
|
||||
for gs, ge in zip(gap_starts, gap_ends):
|
||||
gap_duration = t[min(ge, len(t) - 1)] - t[max(gs - 1, 0)]
|
||||
if gap_duration < MIN_BREAK_MS and gs > 0 and ge < len(d):
|
||||
val_before = d[gs - 1]
|
||||
val_after = d[ge] if ge < len(d) else 0
|
||||
n_gap = ge - gs
|
||||
if n_gap > 0:
|
||||
d[gs:ge] = np.linspace(val_before, val_after, n_gap + 2)[1:-1]
|
||||
|
||||
d_max = max(d.max(), 1.0)
|
||||
px_x = PAD_X + (t - self._t_min) / (self._t_max - self._t_min) * self._plot_w
|
||||
px_y = self._h - PAD_Y_BOT - d / d_max * self._plot_h
|
||||
|
||||
n = len(d)
|
||||
if n > MAX_GRAPH_POINTS:
|
||||
is_zero = (d == 0).astype(np.int8)
|
||||
transitions = np.abs(np.diff(is_zero))
|
||||
critical = set(np.where(transitions == 1)[0].tolist())
|
||||
critical |= set((np.where(transitions == 1)[0] + 1).tolist())
|
||||
critical = {i for i in critical if 0 <= i < n}
|
||||
base = set(np.round(np.linspace(0, n - 1, MAX_GRAPH_POINTS)).astype(int).tolist())
|
||||
keep = sorted(base | critical)
|
||||
idx = np.array(keep)
|
||||
px_x = px_x[idx]
|
||||
px_y = px_y[idx]
|
||||
|
||||
x0 = float(px_x[0])
|
||||
x1 = float(px_x[-1])
|
||||
poly_bottom = float(self._poly_bottom_y)
|
||||
line_bottom = float(self._bottom_y - LINE_BASELINE_INSET)
|
||||
|
||||
pts = [(float(x), float(y)) for x, y in zip(px_x, px_y)]
|
||||
poly = [(x0, poly_bottom)] + pts + [(x1, poly_bottom)]
|
||||
line = [(x0, line_bottom)] + pts + [(x1, line_bottom)]
|
||||
|
||||
return [poly], [line]
|
||||
|
||||
def _rebuild_images(self):
|
||||
if self._poly_data is None:
|
||||
return
|
||||
self._unplayed_rgb = self._render_to_numpy(UNPLAYED_FILL_RGB, UNPLAYED_STROKE_RGB)
|
||||
self._played_rgb = self._render_to_numpy(self._played_fill_rgb, self._played_stroke_rgb)
|
||||
self._composite_rgb = np.empty_like(self._unplayed_rgb)
|
||||
|
||||
def _render_to_numpy(self, fill_rgb, stroke_rgb):
|
||||
ss = SUPERSAMPLE
|
||||
sw, sh = self._w * ss, self._h * ss
|
||||
|
||||
img = Image.new("RGB", (sw, sh), BG_COLOR_RGB)
|
||||
draw = ImageDraw.Draw(img)
|
||||
|
||||
polys, lines = self._poly_data
|
||||
|
||||
for seg_poly in polys:
|
||||
if len(seg_poly) >= 3:
|
||||
draw.polygon([(x * ss, y * ss) for x, y in seg_poly], fill=fill_rgb)
|
||||
|
||||
for seg_line in lines:
|
||||
if len(seg_line) >= 2:
|
||||
draw.line([(x * ss, y * ss) for x, y in seg_line], fill=stroke_rgb, width=LINE_WIDTH)
|
||||
|
||||
img = img.resize((self._w, self._h), Image.LANCZOS)
|
||||
return np.frombuffer(img.tobytes(), dtype=np.uint8).reshape((self._h, self._w, 3)).copy()
|
||||
BIN
src/msd.exe
Normal file
BIN
src/msd.exe
Normal file
Binary file not shown.
108
src/msd_converter.py
Normal file
108
src/msd_converter.py
Normal file
@ -0,0 +1,108 @@
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
BASE_DIR = os.path.dirname(__file__)
|
||||
|
||||
|
||||
def _resolve_msd_command():
|
||||
env_path = os.environ.get("MSD_BIN_PATH")
|
||||
if env_path:
|
||||
if os.name != "nt" and env_path.lower().endswith(".exe"):
|
||||
wine = shutil.which("wine64") or shutil.which("wine")
|
||||
if wine:
|
||||
return [wine, env_path], env_path
|
||||
return [env_path], env_path
|
||||
|
||||
windows_msd = os.path.join(BASE_DIR, "msd.exe")
|
||||
native_msd = os.path.join(BASE_DIR, "msd")
|
||||
|
||||
if os.name == "nt":
|
||||
return [windows_msd], windows_msd
|
||||
|
||||
if os.path.exists(native_msd):
|
||||
return [native_msd], native_msd
|
||||
|
||||
if os.path.exists(windows_msd):
|
||||
wine = shutil.which("wine64") or shutil.which("wine")
|
||||
if wine:
|
||||
return [wine, windows_msd], windows_msd
|
||||
|
||||
return [native_msd], native_msd
|
||||
|
||||
|
||||
def parse_hitobjects(osu_file, mod="NM"):
|
||||
hitobjects = []
|
||||
in_section = False
|
||||
|
||||
with open(osu_file, "r", encoding="utf8") as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
|
||||
if line == "[HitObjects]":
|
||||
in_section = True
|
||||
continue
|
||||
|
||||
if not in_section or not line:
|
||||
continue
|
||||
|
||||
parts = line.split(",")
|
||||
x = int(parts[0])
|
||||
time = int(parts[2])
|
||||
obj_type = int(parts[3])
|
||||
|
||||
if mod == "DT":
|
||||
time = int(time * 2 / 3)
|
||||
elif mod == "HT":
|
||||
time = int(time * 4 / 3)
|
||||
|
||||
hitobjects.append({"x": x, "time": time, "type": obj_type})
|
||||
|
||||
return hitobjects
|
||||
|
||||
|
||||
def osu_to_etterna_rows(hitobjects, keycount=4):
|
||||
rows = {}
|
||||
column_width = 512 / keycount
|
||||
|
||||
for obj in hitobjects:
|
||||
time = round(obj["time"] / 1000.0, 4)
|
||||
column = int(obj["x"] // column_width)
|
||||
rows[time] = rows.get(time, 0) | (1 << column)
|
||||
# LN releases are intentionally ignored (obj_type & 128)
|
||||
|
||||
return [{"notes": rows[t], "time": t} for t in sorted(rows)]
|
||||
|
||||
|
||||
def calculate_msd(notes):
|
||||
cmd, msd_path = _resolve_msd_command()
|
||||
|
||||
if not os.path.exists(msd_path):
|
||||
raise FileNotFoundError(
|
||||
f"MSD binary not found at '{msd_path}'. Set MSD_BIN_PATH or add a compatible executable to src/."
|
||||
)
|
||||
|
||||
if os.name != "nt" and msd_path.lower().endswith(".exe") and len(cmd) == 1:
|
||||
raise RuntimeError(
|
||||
"Found msd.exe on Linux/macOS, but Wine is not installed. Install Wine or provide a native msd via MSD_BIN_PATH."
|
||||
)
|
||||
|
||||
popen_kwargs = {
|
||||
"stdin": subprocess.PIPE,
|
||||
"stdout": subprocess.PIPE,
|
||||
"stderr": subprocess.PIPE,
|
||||
"text": True,
|
||||
}
|
||||
|
||||
if os.name == "nt":
|
||||
popen_kwargs["creationflags"] = subprocess.CREATE_NO_WINDOW
|
||||
|
||||
p = subprocess.Popen(cmd, **popen_kwargs)
|
||||
output, err = p.communicate(json.dumps(notes))
|
||||
|
||||
if err:
|
||||
print("MSD ERROR:", err, file=sys.stderr)
|
||||
|
||||
return json.loads(output)
|
||||
66
src/osu_file_parser.py
Normal file
66
src/osu_file_parser.py
Normal file
@ -0,0 +1,66 @@
|
||||
class parser:
|
||||
def __init__(self, file_path):
|
||||
self.file_path = file_path
|
||||
self.od = -1
|
||||
self.column_count = -1
|
||||
self.columns = []
|
||||
self.note_starts = []
|
||||
self.note_ends = []
|
||||
self.note_types = []
|
||||
|
||||
def process(self):
|
||||
with open(self.file_path, "r", encoding="utf-8") as f:
|
||||
try:
|
||||
for line in f:
|
||||
self.read_metadata(f, line)
|
||||
|
||||
cc = self._read_column_count(line)
|
||||
if cc != -1:
|
||||
self.column_count = cc
|
||||
|
||||
self.od = 9
|
||||
|
||||
if self.column_count != -1:
|
||||
self._read_notes(f, line, self.column_count)
|
||||
|
||||
except StopIteration:
|
||||
pass
|
||||
|
||||
def read_metadata(self, f, line):
|
||||
if "[Metadata]" in line:
|
||||
while "Source:" not in line:
|
||||
line = next(f)
|
||||
|
||||
def _read_column_count(self, line):
|
||||
if "CircleSize:" not in line:
|
||||
return -1
|
||||
val = line.strip()[-1]
|
||||
if val == "0":
|
||||
val = "10"
|
||||
return int(float(val))
|
||||
|
||||
def _read_notes(self, f, line, column_count):
|
||||
if "[HitObjects]" not in line:
|
||||
return
|
||||
line = next(f)
|
||||
while line is not None:
|
||||
self._parse_hit_object(line, column_count)
|
||||
line = next(f)
|
||||
|
||||
def _parse_hit_object(self, line, column_count):
|
||||
params = line.split(",")
|
||||
column_width = 512 // column_count
|
||||
self.columns.append(int(float(params[0])) // column_width)
|
||||
self.note_starts.append(int(params[2]))
|
||||
self.note_types.append(int(params[3]))
|
||||
self.note_ends.append(int(params[5].split(":")[0]))
|
||||
|
||||
def get_parsed_data(self):
|
||||
return [
|
||||
self.column_count,
|
||||
self.columns,
|
||||
self.note_starts,
|
||||
self.note_ends,
|
||||
self.note_types,
|
||||
self.od,
|
||||
]
|
||||
Loading…
x
Reference in New Issue
Block a user