mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-02-03 11:24:57 -05:00
Compare commits
1 Commits
feat/text-
...
fix/copilo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
efd1e96235 |
@@ -1419,6 +1419,7 @@ async def _stream_chat_chunks(
|
|||||||
return
|
return
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
last_error = e
|
last_error = e
|
||||||
|
|
||||||
if _is_retryable_error(e) and retry_count < MAX_RETRIES:
|
if _is_retryable_error(e) and retry_count < MAX_RETRIES:
|
||||||
retry_count += 1
|
retry_count += 1
|
||||||
# Calculate delay with exponential backoff
|
# Calculate delay with exponential backoff
|
||||||
@@ -1434,12 +1435,24 @@ async def _stream_chat_chunks(
|
|||||||
continue # Retry the stream
|
continue # Retry the stream
|
||||||
else:
|
else:
|
||||||
# Non-retryable error or max retries exceeded
|
# Non-retryable error or max retries exceeded
|
||||||
logger.error(
|
_log_api_error(
|
||||||
f"Error in stream (not retrying): {e!s}",
|
error=e,
|
||||||
exc_info=True,
|
session_id=session.session_id if session else None,
|
||||||
|
message_count=len(messages) if messages else None,
|
||||||
|
model=model,
|
||||||
|
retry_count=retry_count,
|
||||||
)
|
)
|
||||||
error_code = None
|
error_code = None
|
||||||
error_text = str(e)
|
error_text = str(e)
|
||||||
|
|
||||||
|
error_details = _extract_api_error_details(e)
|
||||||
|
if error_details.get("response_body"):
|
||||||
|
body = error_details["response_body"]
|
||||||
|
if isinstance(body, dict) and body.get("error", {}).get(
|
||||||
|
"message"
|
||||||
|
):
|
||||||
|
error_text = body["error"]["message"]
|
||||||
|
|
||||||
if _is_region_blocked_error(e):
|
if _is_region_blocked_error(e):
|
||||||
error_code = "MODEL_NOT_AVAILABLE_REGION"
|
error_code = "MODEL_NOT_AVAILABLE_REGION"
|
||||||
error_text = (
|
error_text = (
|
||||||
@@ -1456,9 +1469,12 @@ async def _stream_chat_chunks(
|
|||||||
|
|
||||||
# If we exit the retry loop without returning, it means we exhausted retries
|
# If we exit the retry loop without returning, it means we exhausted retries
|
||||||
if last_error:
|
if last_error:
|
||||||
logger.error(
|
_log_api_error(
|
||||||
f"Max retries ({MAX_RETRIES}) exceeded. Last error: {last_error!s}",
|
error=last_error,
|
||||||
exc_info=True,
|
session_id=session.session_id if session else None,
|
||||||
|
message_count=len(messages) if messages else None,
|
||||||
|
model=model,
|
||||||
|
retry_count=MAX_RETRIES,
|
||||||
)
|
)
|
||||||
yield StreamError(errorText=f"Max retries exceeded: {last_error!s}")
|
yield StreamError(errorText=f"Max retries exceeded: {last_error!s}")
|
||||||
yield StreamFinish()
|
yield StreamFinish()
|
||||||
@@ -1879,6 +1895,7 @@ async def _generate_llm_continuation(
|
|||||||
break # Success, exit retry loop
|
break # Success, exit retry loop
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
last_error = e
|
last_error = e
|
||||||
|
|
||||||
if _is_retryable_error(e) and retry_count < MAX_RETRIES:
|
if _is_retryable_error(e) and retry_count < MAX_RETRIES:
|
||||||
retry_count += 1
|
retry_count += 1
|
||||||
delay = min(
|
delay = min(
|
||||||
@@ -1892,17 +1909,23 @@ async def _generate_llm_continuation(
|
|||||||
await asyncio.sleep(delay)
|
await asyncio.sleep(delay)
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
# Non-retryable error - log and exit gracefully
|
# Non-retryable error - log details and exit gracefully
|
||||||
logger.error(
|
_log_api_error(
|
||||||
f"Non-retryable error in LLM continuation: {e!s}",
|
error=e,
|
||||||
exc_info=True,
|
session_id=session_id,
|
||||||
|
message_count=len(messages) if messages else None,
|
||||||
|
model=config.model,
|
||||||
|
retry_count=retry_count,
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
if last_error:
|
if last_error:
|
||||||
logger.error(
|
_log_api_error(
|
||||||
f"Max retries ({MAX_RETRIES}) exceeded for LLM continuation. "
|
error=last_error,
|
||||||
f"Last error: {last_error!s}"
|
session_id=session_id,
|
||||||
|
message_count=len(messages) if messages else None,
|
||||||
|
model=config.model,
|
||||||
|
retry_count=MAX_RETRIES,
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -1940,3 +1963,78 @@ async def _generate_llm_continuation(
|
|||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Failed to generate LLM continuation: {e}", exc_info=True)
|
logger.error(f"Failed to generate LLM continuation: {e}", exc_info=True)
|
||||||
|
|
||||||
|
|
||||||
|
def _log_api_error(
|
||||||
|
error: Exception,
|
||||||
|
session_id: str | None = None,
|
||||||
|
message_count: int | None = None,
|
||||||
|
model: str | None = None,
|
||||||
|
retry_count: int = 0,
|
||||||
|
) -> None:
|
||||||
|
"""Log detailed API error information for debugging."""
|
||||||
|
details = _extract_api_error_details(error)
|
||||||
|
details["session_id"] = session_id
|
||||||
|
details["message_count"] = message_count
|
||||||
|
details["model"] = model
|
||||||
|
details["retry_count"] = retry_count
|
||||||
|
|
||||||
|
if isinstance(error, RateLimitError):
|
||||||
|
logger.warning(f"Rate limit error: {details}")
|
||||||
|
elif isinstance(error, APIConnectionError):
|
||||||
|
logger.warning(f"API connection error: {details}")
|
||||||
|
elif isinstance(error, APIStatusError) and error.status_code >= 500:
|
||||||
|
logger.error(f"API server error (5xx): {details}")
|
||||||
|
else:
|
||||||
|
logger.error(f"API error: {details}")
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_api_error_details(error: Exception) -> dict[str, Any]:
|
||||||
|
"""Extract detailed information from OpenAI/OpenRouter API errors."""
|
||||||
|
error_msg = str(error)
|
||||||
|
details: dict[str, Any] = {
|
||||||
|
"error_type": type(error).__name__,
|
||||||
|
"error_message": error_msg[:500] + "..." if len(error_msg) > 500 else error_msg,
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasattr(error, "code"):
|
||||||
|
details["code"] = error.code
|
||||||
|
if hasattr(error, "param"):
|
||||||
|
details["param"] = error.param
|
||||||
|
|
||||||
|
if isinstance(error, APIStatusError):
|
||||||
|
details["status_code"] = error.status_code
|
||||||
|
details["request_id"] = getattr(error, "request_id", None)
|
||||||
|
|
||||||
|
if hasattr(error, "body") and error.body:
|
||||||
|
details["response_body"] = _sanitize_error_body(error.body)
|
||||||
|
|
||||||
|
if hasattr(error, "response") and error.response:
|
||||||
|
headers = error.response.headers
|
||||||
|
details["openrouter_provider"] = headers.get("x-openrouter-provider")
|
||||||
|
details["openrouter_model"] = headers.get("x-openrouter-model")
|
||||||
|
details["retry_after"] = headers.get("retry-after")
|
||||||
|
details["rate_limit_remaining"] = headers.get("x-ratelimit-remaining")
|
||||||
|
|
||||||
|
return details
|
||||||
|
|
||||||
|
|
||||||
|
def _sanitize_error_body(body: Any, max_length: int = 2000) -> dict[str, Any] | None:
|
||||||
|
"""Extract only safe fields from error response body to avoid logging sensitive data."""
|
||||||
|
if not isinstance(body, dict):
|
||||||
|
return None
|
||||||
|
|
||||||
|
safe_fields = ("message", "type", "code", "param", "error")
|
||||||
|
sanitized: dict[str, Any] = {}
|
||||||
|
|
||||||
|
for field in safe_fields:
|
||||||
|
if field in body:
|
||||||
|
value = body[field]
|
||||||
|
if field == "error" and isinstance(value, dict):
|
||||||
|
sanitized[field] = _sanitize_error_body(value, max_length)
|
||||||
|
elif isinstance(value, str) and len(value) > max_length:
|
||||||
|
sanitized[field] = value[:max_length] + "...[truncated]"
|
||||||
|
else:
|
||||||
|
sanitized[field] = value
|
||||||
|
|
||||||
|
return sanitized if sanitized else None
|
||||||
|
|||||||
Reference in New Issue
Block a user