mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-11 15:25:16 -05:00
Compare commits
5 Commits
feat/dummy
...
fix/copilo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
62c9e840b8 | ||
|
|
495d01b09b | ||
|
|
b334f1a843 | ||
|
|
5fd1482944 | ||
|
|
efd1e96235 |
@@ -1090,6 +1090,7 @@ async def _stream_chat_chunks(
|
|||||||
return
|
return
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
last_error = e
|
last_error = e
|
||||||
|
|
||||||
if _is_retryable_error(e) and retry_count < MAX_RETRIES:
|
if _is_retryable_error(e) and retry_count < MAX_RETRIES:
|
||||||
retry_count += 1
|
retry_count += 1
|
||||||
# Calculate delay with exponential backoff
|
# Calculate delay with exponential backoff
|
||||||
@@ -1105,12 +1106,26 @@ async def _stream_chat_chunks(
|
|||||||
continue # Retry the stream
|
continue # Retry the stream
|
||||||
else:
|
else:
|
||||||
# Non-retryable error or max retries exceeded
|
# Non-retryable error or max retries exceeded
|
||||||
logger.error(
|
_log_api_error(
|
||||||
f"Error in stream (not retrying): {e!s}",
|
error=e,
|
||||||
exc_info=True,
|
session_id=session.session_id if session else None,
|
||||||
|
message_count=len(messages) if messages else None,
|
||||||
|
model=model,
|
||||||
|
retry_count=retry_count,
|
||||||
)
|
)
|
||||||
error_code = None
|
error_code = None
|
||||||
error_text = str(e)
|
error_text = str(e)
|
||||||
|
|
||||||
|
error_details = _extract_api_error_details(e)
|
||||||
|
if error_details.get("response_body"):
|
||||||
|
body = error_details["response_body"]
|
||||||
|
if isinstance(body, dict):
|
||||||
|
err = body.get("error")
|
||||||
|
if isinstance(err, dict) and err.get("message"):
|
||||||
|
error_text = err["message"]
|
||||||
|
elif body.get("message"):
|
||||||
|
error_text = body["message"]
|
||||||
|
|
||||||
if _is_region_blocked_error(e):
|
if _is_region_blocked_error(e):
|
||||||
error_code = "MODEL_NOT_AVAILABLE_REGION"
|
error_code = "MODEL_NOT_AVAILABLE_REGION"
|
||||||
error_text = (
|
error_text = (
|
||||||
@@ -1127,9 +1142,12 @@ async def _stream_chat_chunks(
|
|||||||
|
|
||||||
# If we exit the retry loop without returning, it means we exhausted retries
|
# If we exit the retry loop without returning, it means we exhausted retries
|
||||||
if last_error:
|
if last_error:
|
||||||
logger.error(
|
_log_api_error(
|
||||||
f"Max retries ({MAX_RETRIES}) exceeded. Last error: {last_error!s}",
|
error=last_error,
|
||||||
exc_info=True,
|
session_id=session.session_id if session else None,
|
||||||
|
message_count=len(messages) if messages else None,
|
||||||
|
model=model,
|
||||||
|
retry_count=MAX_RETRIES,
|
||||||
)
|
)
|
||||||
yield StreamError(errorText=f"Max retries exceeded: {last_error!s}")
|
yield StreamError(errorText=f"Max retries exceeded: {last_error!s}")
|
||||||
yield StreamFinish()
|
yield StreamFinish()
|
||||||
@@ -1701,6 +1719,7 @@ async def _generate_llm_continuation(
|
|||||||
break # Success, exit retry loop
|
break # Success, exit retry loop
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
last_error = e
|
last_error = e
|
||||||
|
|
||||||
if _is_retryable_error(e) and retry_count < MAX_RETRIES:
|
if _is_retryable_error(e) and retry_count < MAX_RETRIES:
|
||||||
retry_count += 1
|
retry_count += 1
|
||||||
delay = min(
|
delay = min(
|
||||||
@@ -1714,17 +1733,23 @@ async def _generate_llm_continuation(
|
|||||||
await asyncio.sleep(delay)
|
await asyncio.sleep(delay)
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
# Non-retryable error - log and exit gracefully
|
# Non-retryable error - log details and exit gracefully
|
||||||
logger.error(
|
_log_api_error(
|
||||||
f"Non-retryable error in LLM continuation: {e!s}",
|
error=e,
|
||||||
exc_info=True,
|
session_id=session_id,
|
||||||
|
message_count=len(messages) if messages else None,
|
||||||
|
model=config.model,
|
||||||
|
retry_count=retry_count,
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
if last_error:
|
if last_error:
|
||||||
logger.error(
|
_log_api_error(
|
||||||
f"Max retries ({MAX_RETRIES}) exceeded for LLM continuation. "
|
error=last_error,
|
||||||
f"Last error: {last_error!s}"
|
session_id=session_id,
|
||||||
|
message_count=len(messages) if messages else None,
|
||||||
|
model=config.model,
|
||||||
|
retry_count=MAX_RETRIES,
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -1764,6 +1789,89 @@ async def _generate_llm_continuation(
|
|||||||
logger.error(f"Failed to generate LLM continuation: {e}", exc_info=True)
|
logger.error(f"Failed to generate LLM continuation: {e}", exc_info=True)
|
||||||
|
|
||||||
|
|
||||||
|
def _log_api_error(
|
||||||
|
error: Exception,
|
||||||
|
session_id: str | None = None,
|
||||||
|
message_count: int | None = None,
|
||||||
|
model: str | None = None,
|
||||||
|
retry_count: int = 0,
|
||||||
|
) -> None:
|
||||||
|
"""Log detailed API error information for debugging."""
|
||||||
|
details = _extract_api_error_details(error)
|
||||||
|
details["session_id"] = session_id
|
||||||
|
details["message_count"] = message_count
|
||||||
|
details["model"] = model
|
||||||
|
details["retry_count"] = retry_count
|
||||||
|
|
||||||
|
if isinstance(error, RateLimitError):
|
||||||
|
logger.warning(f"Rate limit error: {details}")
|
||||||
|
elif isinstance(error, APIConnectionError):
|
||||||
|
logger.warning(f"API connection error: {details}")
|
||||||
|
elif isinstance(error, APIStatusError) and error.status_code >= 500:
|
||||||
|
logger.error(f"API server error (5xx): {details}")
|
||||||
|
else:
|
||||||
|
logger.error(f"API error: {details}")
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_api_error_details(error: Exception) -> dict[str, Any]:
|
||||||
|
"""Extract detailed information from OpenAI/OpenRouter API errors."""
|
||||||
|
error_msg = str(error)
|
||||||
|
details: dict[str, Any] = {
|
||||||
|
"error_type": type(error).__name__,
|
||||||
|
"error_message": error_msg[:500] + "..." if len(error_msg) > 500 else error_msg,
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasattr(error, "code"):
|
||||||
|
details["code"] = getattr(error, "code", None)
|
||||||
|
if hasattr(error, "param"):
|
||||||
|
details["param"] = getattr(error, "param", None)
|
||||||
|
|
||||||
|
if isinstance(error, APIStatusError):
|
||||||
|
details["status_code"] = error.status_code
|
||||||
|
details["request_id"] = getattr(error, "request_id", None)
|
||||||
|
|
||||||
|
if hasattr(error, "body") and error.body:
|
||||||
|
details["response_body"] = _sanitize_error_body(error.body)
|
||||||
|
|
||||||
|
if hasattr(error, "response") and error.response:
|
||||||
|
headers = error.response.headers
|
||||||
|
details["openrouter_provider"] = headers.get("x-openrouter-provider")
|
||||||
|
details["openrouter_model"] = headers.get("x-openrouter-model")
|
||||||
|
details["retry_after"] = headers.get("retry-after")
|
||||||
|
details["rate_limit_remaining"] = headers.get("x-ratelimit-remaining")
|
||||||
|
|
||||||
|
return details
|
||||||
|
|
||||||
|
|
||||||
|
def _sanitize_error_body(
|
||||||
|
body: Any, max_length: int = 2000
|
||||||
|
) -> dict[str, Any] | str | None:
|
||||||
|
"""Extract only safe fields from error response body to avoid logging sensitive data."""
|
||||||
|
if not isinstance(body, dict):
|
||||||
|
# Non-dict bodies (e.g., HTML error pages) - return truncated string
|
||||||
|
if body is not None:
|
||||||
|
body_str = str(body)
|
||||||
|
if len(body_str) > max_length:
|
||||||
|
return body_str[:max_length] + "...[truncated]"
|
||||||
|
return body_str
|
||||||
|
return None
|
||||||
|
|
||||||
|
safe_fields = ("message", "type", "code", "param", "error")
|
||||||
|
sanitized: dict[str, Any] = {}
|
||||||
|
|
||||||
|
for field in safe_fields:
|
||||||
|
if field in body:
|
||||||
|
value = body[field]
|
||||||
|
if field == "error" and isinstance(value, dict):
|
||||||
|
sanitized[field] = _sanitize_error_body(value, max_length)
|
||||||
|
elif isinstance(value, str) and len(value) > max_length:
|
||||||
|
sanitized[field] = value[:max_length] + "...[truncated]"
|
||||||
|
else:
|
||||||
|
sanitized[field] = value
|
||||||
|
|
||||||
|
return sanitized if sanitized else None
|
||||||
|
|
||||||
|
|
||||||
async def _generate_llm_continuation_with_streaming(
|
async def _generate_llm_continuation_with_streaming(
|
||||||
session_id: str,
|
session_id: str,
|
||||||
user_id: str | None,
|
user_id: str | None,
|
||||||
|
|||||||
@@ -3,8 +3,6 @@
|
|||||||
import queue
|
import queue
|
||||||
import threading
|
import threading
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from backend.data.execution import ExecutionQueue
|
from backend.data.execution import ExecutionQueue
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user