Compare commits

..

2 Commits

Author SHA1 Message Date
Swifty
88ebef601a add progress bar completion 2026-02-05 11:44:58 +01:00
Swifty
d919bd5f54 feat(frontend): Add progress indicator during agent generation [SECRT-1883]
Add an asymptotic progress bar that appears after 10 seconds of waiting,
showing visual feedback for long-running tasks. The progress bar uses a
half-life formula where it reaches ~50% at 30s, ~75% at 60s, ~87.5% at 90s,
and so on - creating the classic "game loading bar" effect that slows down
but never reaches 100%.
2026-02-05 09:12:39 +01:00
12 changed files with 223 additions and 277 deletions

View File

@@ -1385,23 +1385,8 @@ class ExecutionProcessor:
f"[View User Details]({base_url}/admin/spending?search={user_email})"
)
# Send both Discord and AllQuiet alerts
correlation_id = f"insufficient_funds_{user_id}"
get_notification_manager_client().system_alert(
content=alert_message,
channel=DiscordChannel.PRODUCT,
correlation_id=correlation_id,
severity="critical",
status="open",
extra_attributes={
"user_id": user_id,
"user_email": user_email or "unknown",
"balance": f"${e.balance / 100:.2f}",
"attempted_cost": f"${abs(e.amount) / 100:.2f}",
"shortfall": f"${abs(shortfall) / 100:.2f}",
"agent_name": metadata.name if metadata else "Unknown",
},
get_notification_manager_client().discord_system_alert(
alert_message, DiscordChannel.PRODUCT
)
except Exception as alert_error:
logger.error(
@@ -1448,22 +1433,8 @@ class ExecutionProcessor:
f"Transaction cost: ${transaction_cost / 100:.2f}\n"
f"[View User Details]({base_url}/admin/spending?search={user_email})"
)
# Send both Discord and AllQuiet alerts
correlation_id = f"low_balance_{user_id}"
get_notification_manager_client().system_alert(
content=alert_message,
channel=DiscordChannel.PRODUCT,
correlation_id=correlation_id,
severity="warning",
status="open",
extra_attributes={
"user_id": user_id,
"user_email": user_email or "unknown",
"current_balance": f"${current_balance / 100:.2f}",
"transaction_cost": f"${transaction_cost / 100:.2f}",
"threshold": f"${LOW_BALANCE_THRESHOLD / 100:.2f}",
},
get_notification_manager_client().discord_system_alert(
alert_message, DiscordChannel.PRODUCT
)
except Exception as e:
logger.error(f"Failed to send low balance Discord alert: {e}")

View File

@@ -54,16 +54,13 @@ async def test_handle_low_balance_threshold_crossing(server: SpinTestServer):
assert isinstance(notification_call.data, LowBalanceData)
assert notification_call.data.current_balance == current_balance
# Verify system alert was sent (includes Discord and AllQuiet)
mock_client.system_alert.assert_called_once()
alert_call = mock_client.system_alert.call_args
discord_message = alert_call[1]["content"]
# Verify Discord alert was sent
mock_client.discord_system_alert.assert_called_once()
discord_message = mock_client.discord_system_alert.call_args[0][0]
assert "Low Balance Alert" in discord_message
assert "test@example.com" in discord_message
assert "$4.00" in discord_message
assert "$6.00" in discord_message
# Verify AllQuiet correlation ID is set
assert alert_call[1]["correlation_id"] == f"low_balance_{user_id}"
@pytest.mark.asyncio(loop_scope="session")
@@ -106,7 +103,7 @@ async def test_handle_low_balance_no_notification_when_not_crossing(
# Verify no notification was sent
mock_queue_notif.assert_not_called()
mock_client.system_alert.assert_not_called()
mock_client.discord_system_alert.assert_not_called()
@pytest.mark.asyncio(loop_scope="session")
@@ -149,4 +146,4 @@ async def test_handle_low_balance_no_duplicate_when_already_below(
# Verify no notification was sent (user was already below threshold)
mock_queue_notif.assert_not_called()
mock_client.system_alert.assert_not_called()
mock_client.discord_system_alert.assert_not_called()

View File

@@ -12,7 +12,7 @@ from backend.util.clients import (
get_database_manager_client,
get_notification_manager_client,
)
from backend.util.metrics import DiscordChannel, sentry_capture_error
from backend.util.metrics import sentry_capture_error
from backend.util.settings import Config
logger = logging.getLogger(__name__)
@@ -75,32 +75,7 @@ class BlockErrorMonitor:
if critical_alerts:
msg = "Block Error Rate Alert:\n\n" + "\n\n".join(critical_alerts)
# Send alert with correlation ID for block errors
# We'll create a simple hash of the block IDs that have errors
blocks_with_errors = [
stats.block_id
for name, stats in block_stats.items()
if stats.total_executions >= 10
and stats.error_rate >= threshold * 100
]
correlation_id = (
f"block_errors_{len(blocks_with_errors)}_blocks_{end_time.date()}"
)
self.notification_client.system_alert(
content=msg,
channel=DiscordChannel.PLATFORM,
correlation_id=correlation_id,
severity="warning",
status="open",
extra_attributes={
"blocks_affected": str(len(critical_alerts)),
"date": end_time.date().isoformat(),
"threshold": f"{threshold * 100}%",
},
)
self.notification_client.discord_system_alert(msg)
logger.info(
f"Sent block error rate alert for {len(critical_alerts)} blocks"
)
@@ -112,22 +87,7 @@ class BlockErrorMonitor:
block_stats, start_time, end_time
)
if top_blocks_msg:
# Daily summary gets a date-based correlation ID
correlation_id = f"block_error_daily_summary_{end_time.date()}"
self.notification_client.system_alert(
content=top_blocks_msg,
channel=DiscordChannel.PLATFORM,
correlation_id=correlation_id,
severity="minor",
status="open",
extra_attributes={
"type": "daily_summary",
"date": end_time.date().isoformat(),
"top_blocks_count": str(self.include_top_blocks),
},
)
self.notification_client.discord_system_alert(top_blocks_msg)
logger.info("Sent top blocks summary")
return "Sent top blocks summary"
@@ -140,22 +100,7 @@ class BlockErrorMonitor:
error = Exception(f"Error checking block error rates: {e}")
msg = str(error)
sentry_capture_error(error)
# Send error alert with generic correlation ID
correlation_id = "block_error_monitoring_failure"
self.notification_client.system_alert(
content=msg,
channel=DiscordChannel.PLATFORM,
correlation_id=correlation_id,
severity="critical",
status="open",
extra_attributes={
"error_type": type(e).__name__,
"error_message": str(e)[:200],
},
)
self.notification_client.discord_system_alert(msg)
return msg
def _get_block_stats_from_db(

View File

@@ -8,7 +8,7 @@ from backend.util.clients import (
get_database_manager_client,
get_notification_manager_client,
)
from backend.util.metrics import DiscordChannel, sentry_capture_error
from backend.util.metrics import sentry_capture_error
from backend.util.settings import Config
logger = logging.getLogger(__name__)
@@ -102,28 +102,7 @@ class LateExecutionMonitor:
msg = str(error)
sentry_capture_error(error)
# Generate correlation ID based on the threshold and number of late executions
correlation_id = f"late_execution_{self.config.execution_late_notification_threshold_secs}s_{num_total_late}_execs_{num_users}_users"
# Send both Discord and AllQuiet alerts
self.notification_client.system_alert(
content=msg,
channel=DiscordChannel.PLATFORM,
correlation_id=correlation_id,
severity="critical",
status="open",
extra_attributes={
"total_late_executions": str(num_total_late),
"queued_executions": str(num_queued),
"running_executions": str(num_running),
"affected_users": str(num_users),
"threshold_seconds": str(
self.config.execution_late_notification_threshold_secs
),
},
)
self.notification_client.discord_system_alert(msg)
return msg

View File

@@ -1,7 +1,7 @@
import asyncio
import logging
from datetime import datetime, timedelta, timezone
from typing import Awaitable, Callable, Literal
from typing import Awaitable, Callable
import aio_pika
from prisma.enums import NotificationType
@@ -33,12 +33,7 @@ from backend.data.user import (
from backend.notifications.email import EmailSender
from backend.util.clients import get_database_manager_async_client
from backend.util.logging import TruncatedLogger
from backend.util.metrics import (
AllQuietAlert,
DiscordChannel,
discord_send_alert,
send_allquiet_alert,
)
from backend.util.metrics import DiscordChannel, discord_send_alert
from backend.util.retry import continuous_retry
from backend.util.service import (
AppService,
@@ -422,54 +417,6 @@ class NotificationManager(AppService):
):
await discord_send_alert(content, channel)
@expose
async def allquiet_system_alert(self, alert: AllQuietAlert):
await send_allquiet_alert(alert)
@expose
async def system_alert(
self,
content: str,
channel: DiscordChannel = DiscordChannel.PLATFORM,
correlation_id: str | None = None,
severity: Literal["warning", "critical", "minor"] = "warning",
status: Literal["resolved", "open"] = "open",
extra_attributes: dict[str, str] | None = None,
):
"""Send both Discord and AllQuiet alerts for system events."""
# Send Discord alert
await discord_send_alert(content, channel)
# Send AllQuiet alert if correlation_id is provided
if correlation_id:
# Extract title from content (first line or first sentence)
lines = content.split("\n")
title = lines[0] if lines else content[:100]
# Remove Discord formatting from title
title = (
title.replace("**", "")
.replace("🚨", "")
.replace("⚠️", "")
.replace("", "")
.replace("", "")
.replace("📊", "")
.strip()
)
alert = AllQuietAlert(
severity=severity,
status=status,
title=title[:100], # Limit title length
description=content,
correlation_id=correlation_id,
channel=channel.value, # Pass channel as first-class field
extra_attributes=extra_attributes or {},
)
try:
await send_allquiet_alert(alert)
except Exception as e:
logger.error(f"Failed to send AllQuiet alert: {e}")
async def _queue_scheduled_notification(self, event: SummaryParamsEventModel):
"""Queue a scheduled notification - exposed method for other services to call"""
try:
@@ -1159,5 +1106,3 @@ class NotificationManagerClient(AppServiceClient):
)
queue_weekly_summary = endpoint_to_sync(NotificationManager.queue_weekly_summary)
discord_system_alert = endpoint_to_sync(NotificationManager.discord_system_alert)
allquiet_system_alert = endpoint_to_sync(NotificationManager.allquiet_system_alert)
system_alert = endpoint_to_sync(NotificationManager.system_alert)

View File

@@ -1,9 +1,8 @@
import logging
from enum import Enum
from typing import Literal
import sentry_sdk
from pydantic import BaseModel, Field, SecretStr
from pydantic import SecretStr
from sentry_sdk.integrations import DidNotEnable
from sentry_sdk.integrations.anthropic import AnthropicIntegration
from sentry_sdk.integrations.asyncio import AsyncioIntegration
@@ -52,31 +51,6 @@ def sentry_capture_error(error: BaseException):
sentry_sdk.flush()
class AllQuietAlert(BaseModel):
severity: Literal["warning"] | Literal["critical"] | Literal["minor"]
status: Literal["resolved"] | Literal["open"]
title: str | None = None
description: str | None = None
correlation_id: str | None = None
channel: str | None = None # Discord channel (platform or product)
extra_attributes: dict[str, str] = Field(default_factory=dict)
environment: str = (
f"app:{settings.config.app_env.value}-behave:{settings.config.behave_as.value}"
)
async def send_allquiet_alert(alert: AllQuietAlert):
hook_url = settings.secrets.allquiet_webhook_url
if not hook_url:
logging.warning("AllQuiet webhook URL not configured")
return
from backend.util.request import Requests
await Requests().post(hook_url, json=alert.model_dump())
async def discord_send_alert(
content: str, channel: DiscordChannel = DiscordChannel.PLATFORM
):

View File

@@ -4,7 +4,6 @@ import os
import threading
import time
from functools import wraps
from typing import Literal
from uuid import uuid4
from tenacity import (
@@ -44,14 +43,7 @@ def should_send_alert(func_name: str, exception: Exception, context: str = "") -
def send_rate_limited_discord_alert(
func_name: str,
exception: Exception,
context: str,
alert_msg: str,
channel=None,
correlation_id: str | None = None,
severity: Literal["warning", "critical", "minor"] = "critical",
extra_attributes: dict | None = None,
func_name: str, exception: Exception, context: str, alert_msg: str, channel=None
) -> bool:
"""
Send a Discord alert with rate limiting.
@@ -66,13 +58,8 @@ def send_rate_limited_discord_alert(
from backend.util.metrics import DiscordChannel
notification_client = get_notification_manager_client()
notification_client.system_alert(
content=alert_msg,
channel=channel or DiscordChannel.PLATFORM,
correlation_id=correlation_id,
severity=severity,
status="open",
extra_attributes=extra_attributes or {},
notification_client.discord_system_alert(
alert_msg, channel or DiscordChannel.PLATFORM
)
return True
@@ -87,30 +74,14 @@ def _send_critical_retry_alert(
"""Send alert when a function is approaching the retry failure threshold."""
prefix = f"{context}: " if context else ""
error_type = type(exception).__name__
# Create correlation ID based on context, function name, and error type
correlation_id = f"retry_failure_{context}_{func_name}_{error_type}".replace(
" ", "_"
).replace(":", "")
if send_rate_limited_discord_alert(
func_name,
exception,
context,
f"🚨 CRITICAL: Operation Approaching Failure Threshold: {prefix}'{func_name}'\n\n"
f"Current attempt: {attempt_number}/{EXCESSIVE_RETRY_THRESHOLD}\n"
f"Error: {error_type}: {exception}\n\n"
f"Error: {type(exception).__name__}: {exception}\n\n"
f"This operation is about to fail permanently. Investigate immediately.",
correlation_id=correlation_id,
severity="critical",
extra_attributes={
"function_name": func_name,
"attempt_number": str(attempt_number),
"max_attempts": str(EXCESSIVE_RETRY_THRESHOLD),
"error_type": error_type,
"context": context or "none",
},
):
logger.critical(
f"CRITICAL ALERT SENT: Operation {func_name} at attempt {attempt_number}"
@@ -214,14 +185,6 @@ def conn_retry(
logger.error(f"{prefix} {action_name} failed after retries: {exception}")
else:
if attempt_number == EXCESSIVE_RETRY_THRESHOLD:
error_type = type(exception).__name__
# Create correlation ID for infrastructure issues
correlation_id = (
f"infrastructure_{resource_name}_{action_name}_{func_name}".replace(
" ", "_"
)
)
if send_rate_limited_discord_alert(
func_name,
exception,
@@ -231,18 +194,8 @@ def conn_retry(
f"Action: {action_name}\n"
f"Function: {func_name}\n"
f"Current attempt: {attempt_number}/{max_retry + 1}\n"
f"Error: {error_type}: {str(exception)[:200]}{'...' if len(str(exception)) > 200 else ''}\n\n"
f"Error: {type(exception).__name__}: {str(exception)[:200]}{'...' if len(str(exception)) > 200 else ''}\n\n"
f"Infrastructure component is approaching failure threshold. Investigate immediately.",
correlation_id=correlation_id,
severity="critical",
extra_attributes={
"resource_name": resource_name,
"action_name": action_name,
"function_name": func_name,
"attempt_number": str(attempt_number),
"max_attempts": str(max_retry + 1),
"error_type": error_type,
},
):
logger.critical(
f"INFRASTRUCTURE ALERT SENT: {resource_name} at {attempt_number} attempts"

View File

@@ -166,16 +166,16 @@ class TestRetryRateLimiting:
# First alert should be sent
_send_critical_retry_alert("spend_credits", 50, exc, "Service communication")
assert mock_client.system_alert.call_count == 1
assert mock_client.discord_system_alert.call_count == 1
# Second identical alert should be rate limited (not sent)
_send_critical_retry_alert("spend_credits", 50, exc, "Service communication")
assert mock_client.system_alert.call_count == 1 # Still 1, not 2
assert mock_client.discord_system_alert.call_count == 1 # Still 1, not 2
# Different error should be allowed
exc2 = ValueError("different API error")
_send_critical_retry_alert("spend_credits", 50, exc2, "Service communication")
assert mock_client.system_alert.call_count == 2
assert mock_client.discord_system_alert.call_count == 2
@patch("backend.util.clients.get_notification_manager_client")
def test_send_critical_retry_alert_handles_notification_failure(
@@ -183,7 +183,7 @@ class TestRetryRateLimiting:
):
"""Test that notification failures don't break the rate limiter."""
mock_client = Mock()
mock_client.system_alert.side_effect = Exception("Notification failed")
mock_client.discord_system_alert.side_effect = Exception("Notification failed")
mock_get_client.return_value = mock_client
exc = ValueError("test error")
@@ -221,7 +221,7 @@ class TestRetryRateLimiting:
_send_critical_retry_alert(
"_call_method_sync", 50, exc, "Service communication"
)
assert mock_client.system_alert.call_count == 1
assert mock_client.discord_system_alert.call_count == 1
# Next 950 failures should not send alerts (rate limited)
for _ in range(950):
@@ -230,7 +230,7 @@ class TestRetryRateLimiting:
)
# Still only 1 alert sent total
assert mock_client.system_alert.call_count == 1
assert mock_client.discord_system_alert.call_count == 1
@patch("backend.util.clients.get_notification_manager_client")
def test_retry_decorator_with_excessive_failures(self, mock_get_client):
@@ -248,4 +248,4 @@ class TestRetryRateLimiting:
always_failing_function()
# Should have sent exactly one alert at the threshold
assert mock_client.system_alert.call_count == 1
assert mock_client.discord_system_alert.call_count == 1

View File

@@ -682,8 +682,6 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings):
description="The LaunchDarkly SDK key for feature flag management",
)
allquiet_webhook_url: str = Field(default="", description="AllQuiet webhook URL")
ayrshare_api_key: str = Field(default="", description="Ayrshare API Key")
ayrshare_jwt_key: str = Field(default="", description="Ayrshare private Key")

View File

@@ -1,6 +1,7 @@
"use client";
import { cn } from "@/lib/utils";
import { useEffect, useState } from "react";
import type { ChatMessageData } from "../ChatMessage/useChatMessage";
import { StreamingMessage } from "../StreamingMessage/StreamingMessage";
import { ThinkingMessage } from "../ThinkingMessage/ThinkingMessage";
@@ -31,6 +32,29 @@ export function MessageList({
isStreaming,
});
const [showThinkingMessage, setShowThinkingMessage] = useState(false);
const [thinkingComplete, setThinkingComplete] = useState(false);
// Manage thinking message visibility and completion state
useEffect(() => {
if (isStreaming && streamingChunks.length === 0) {
// Start showing thinking message
setShowThinkingMessage(true);
setThinkingComplete(false);
} else if (streamingChunks.length > 0 && showThinkingMessage) {
// Chunks arrived - trigger completion animation
setThinkingComplete(true);
} else if (!isStreaming) {
// Streaming ended completely - reset state
setShowThinkingMessage(false);
setThinkingComplete(false);
}
}, [isStreaming, streamingChunks.length, showThinkingMessage]);
function handleThinkingAnimationComplete() {
setShowThinkingMessage(false);
}
return (
<div className="relative flex min-h-0 flex-1 flex-col">
{/* Top fade shadow */}
@@ -92,10 +116,15 @@ export function MessageList({
})()}
{/* Render thinking message when streaming but no chunks yet */}
{isStreaming && streamingChunks.length === 0 && <ThinkingMessage />}
{showThinkingMessage && (
<ThinkingMessage
isComplete={thinkingComplete}
onAnimationComplete={handleThinkingAnimationComplete}
/>
)}
{/* Render streaming message if active */}
{isStreaming && streamingChunks.length > 0 && (
{/* Render streaming message if active (wait for thinking animation to complete) */}
{isStreaming && streamingChunks.length > 0 && !showThinkingMessage && (
<StreamingMessage
chunks={streamingChunks}
onComplete={onStreamComplete}

View File

@@ -1,28 +1,41 @@
import { Progress } from "@/components/atoms/Progress/Progress";
import { cn } from "@/lib/utils";
import { useEffect, useRef, useState } from "react";
import { AIChatBubble } from "../AIChatBubble/AIChatBubble";
import { useAsymptoticProgress } from "../ToolCallMessage/useAsymptoticProgress";
export interface ThinkingMessageProps {
className?: string;
isComplete?: boolean;
onAnimationComplete?: () => void;
}
export function ThinkingMessage({ className }: ThinkingMessageProps) {
export function ThinkingMessage({
className,
isComplete = false,
onAnimationComplete,
}: ThinkingMessageProps) {
const [showSlowLoader, setShowSlowLoader] = useState(false);
const [showCoffeeMessage, setShowCoffeeMessage] = useState(false);
const timerRef = useRef<NodeJS.Timeout | null>(null);
const coffeeTimerRef = useRef<NodeJS.Timeout | null>(null);
const delayTimerRef = useRef<NodeJS.Timeout | null>(null);
const { progress, isAnimationDone } = useAsymptoticProgress(
showCoffeeMessage,
isComplete,
);
useEffect(() => {
if (timerRef.current === null) {
timerRef.current = setTimeout(() => {
setShowSlowLoader(true);
}, 8000);
}, 3000);
}
if (coffeeTimerRef.current === null) {
coffeeTimerRef.current = setTimeout(() => {
setShowCoffeeMessage(true);
}, 10000);
}, 8000);
}
return () => {
@@ -37,6 +50,22 @@ export function ThinkingMessage({ className }: ThinkingMessageProps) {
};
}, []);
// Handle completion animation delay before unmounting
useEffect(() => {
if (isAnimationDone && onAnimationComplete) {
delayTimerRef.current = setTimeout(() => {
onAnimationComplete();
}, 200); // 200ms delay after animation completes
}
return () => {
if (delayTimerRef.current) {
clearTimeout(delayTimerRef.current);
delayTimerRef.current = null;
}
};
}, [isAnimationDone, onAnimationComplete]);
return (
<div
className={cn(
@@ -49,9 +78,18 @@ export function ThinkingMessage({ className }: ThinkingMessageProps) {
<AIChatBubble>
<div className="transition-all duration-500 ease-in-out">
{showCoffeeMessage ? (
<span className="inline-block animate-shimmer bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-[length:200%_100%] bg-clip-text text-transparent">
This could take a few minutes, grab a coffee
</span>
<div className="flex flex-col items-center gap-3">
<div className="flex w-full max-w-[280px] flex-col gap-1.5">
<div className="flex items-center justify-between text-xs text-neutral-500">
<span>Working on it...</span>
<span>{Math.round(progress)}%</span>
</div>
<Progress value={progress} className="h-2 w-full" />
</div>
<span className="inline-block animate-shimmer bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-[length:200%_100%] bg-clip-text text-transparent">
This could take a few minutes, grab a coffee
</span>
</div>
) : showSlowLoader ? (
<span className="inline-block animate-shimmer bg-gradient-to-r from-neutral-400 via-neutral-600 to-neutral-400 bg-[length:200%_100%] bg-clip-text text-transparent">
Taking a bit more time...

View File

@@ -0,0 +1,117 @@
import { useEffect, useRef, useState } from "react";
/**
* Cubic Ease Out easing function: 1 - (1 - t)^3
* Starts fast and decelerates smoothly to a stop.
*/
function cubicEaseOut(t: number): number {
return 1 - Math.pow(1 - t, 3);
}
export interface AsymptoticProgressResult {
progress: number;
isAnimationDone: boolean;
}
/**
* Hook that returns a progress value that starts fast and slows down,
* asymptotically approaching but never reaching the max value.
*
* Uses a half-life formula: progress = max * (1 - 0.5^(time/halfLife))
* This creates the "game loading bar" effect where:
* - 50% is reached at halfLifeSeconds
* - 75% is reached at 2 * halfLifeSeconds
* - 87.5% is reached at 3 * halfLifeSeconds
* - and so on...
*
* When isComplete is set to true, animates from current progress to 100%
* using Cubic Ease Out over 300ms.
*
* @param isActive - Whether the progress should be animating
* @param isComplete - Whether to animate to 100% (completion animation)
* @param halfLifeSeconds - Time in seconds to reach 50% progress (default: 30)
* @param maxProgress - Maximum progress value to approach (default: 100)
* @param intervalMs - Update interval in milliseconds (default: 100)
* @returns Object with current progress value and whether completion animation is done
*/
export function useAsymptoticProgress(
isActive: boolean,
isComplete = false,
halfLifeSeconds = 30,
maxProgress = 100,
intervalMs = 100,
): AsymptoticProgressResult {
const [progress, setProgress] = useState(0);
const [isAnimationDone, setIsAnimationDone] = useState(false);
const elapsedTimeRef = useRef(0);
const completionStartProgressRef = useRef<number | null>(null);
const animationFrameRef = useRef<number | null>(null);
// Handle asymptotic progress when active but not complete
useEffect(() => {
if (!isActive || isComplete) {
if (!isComplete) {
setProgress(0);
elapsedTimeRef.current = 0;
setIsAnimationDone(false);
completionStartProgressRef.current = null;
}
return;
}
const interval = setInterval(() => {
elapsedTimeRef.current += intervalMs / 1000;
// Half-life approach: progress = max * (1 - 0.5^(time/halfLife))
// At t=halfLife: 50%, at t=2*halfLife: 75%, at t=3*halfLife: 87.5%, etc.
const newProgress =
maxProgress *
(1 - Math.pow(0.5, elapsedTimeRef.current / halfLifeSeconds));
setProgress(newProgress);
}, intervalMs);
return () => clearInterval(interval);
}, [isActive, isComplete, halfLifeSeconds, maxProgress, intervalMs]);
// Handle completion animation
useEffect(() => {
if (!isComplete) {
return;
}
// Capture the starting progress when completion begins
if (completionStartProgressRef.current === null) {
completionStartProgressRef.current = progress;
}
const startProgress = completionStartProgressRef.current;
const animationDuration = 300; // 300ms
const startTime = performance.now();
function animate(currentTime: number) {
const elapsed = currentTime - startTime;
const t = Math.min(elapsed / animationDuration, 1);
// Cubic Ease Out from current progress to maxProgress
const easedProgress =
startProgress + (maxProgress - startProgress) * cubicEaseOut(t);
setProgress(easedProgress);
if (t < 1) {
animationFrameRef.current = requestAnimationFrame(animate);
} else {
setProgress(maxProgress);
setIsAnimationDone(true);
}
}
animationFrameRef.current = requestAnimationFrame(animate);
return () => {
if (animationFrameRef.current !== null) {
cancelAnimationFrame(animationFrameRef.current);
}
};
}, [isComplete, maxProgress]);
return { progress, isAnimationDone };
}