mirror of
https://github.com/Pythagora-io/gpt-pilot.git
synced 2026-01-09 13:17:55 -05:00
add telemetry for project import; small telemetry fixes
This commit is contained in:
@@ -6,6 +6,7 @@ from core.agents.response import AgentResponse, ResponseType
|
||||
from core.db.models import Complexity
|
||||
from core.llm.parser import JSONParser
|
||||
from core.log import get_logger
|
||||
from core.telemetry import telemetry
|
||||
from core.templates.example_project import EXAMPLE_PROJECT_DESCRIPTION
|
||||
|
||||
log = get_logger(__name__)
|
||||
@@ -84,3 +85,13 @@ class Importer(BaseAgent):
|
||||
"complexity": Complexity.HARD if len(self.current_state.files) > 5 else Complexity.SIMPLE,
|
||||
}
|
||||
]
|
||||
|
||||
n_lines = sum(len(f.content.content.splitlines()) for f in self.current_state.files)
|
||||
await telemetry.trace_code_event(
|
||||
"existing-project",
|
||||
{
|
||||
"num_files": len(self.current_state.files),
|
||||
"num_lines": n_lines,
|
||||
"description": llm_response,
|
||||
},
|
||||
)
|
||||
|
||||
@@ -28,7 +28,6 @@ async def run_project(sm: StateManager, ui: UIBase) -> bool:
|
||||
:return: True if the orchestrator exited successfully, False otherwise.
|
||||
"""
|
||||
|
||||
telemetry.start()
|
||||
telemetry.set("app_id", str(sm.project.id))
|
||||
telemetry.set("initial_prompt", sm.current_state.specification.description)
|
||||
|
||||
@@ -58,7 +57,6 @@ async def run_project(sm: StateManager, ui: UIBase) -> bool:
|
||||
source=pythagora_source,
|
||||
)
|
||||
|
||||
await telemetry.send()
|
||||
return success
|
||||
|
||||
|
||||
@@ -147,7 +145,6 @@ async def run_pythagora_session(sm: StateManager, ui: UIBase, args: Namespace):
|
||||
|
||||
if args.project or args.branch or args.step:
|
||||
telemetry.set("is_continuation", True)
|
||||
# FIXME: we should send the project stage and other runtime info to the UI
|
||||
success = await load_project(sm, args.project, args.branch, args.step)
|
||||
if not success:
|
||||
return False
|
||||
|
||||
@@ -181,6 +181,7 @@ class BaseLLMClient:
|
||||
remaining_retries -= 1
|
||||
request_log.messages = convo.messages[:]
|
||||
request_log.response = None
|
||||
request_log.status = LLMRequestStatus.SUCCESS
|
||||
request_log.error = None
|
||||
response = None
|
||||
|
||||
@@ -278,7 +279,9 @@ class BaseLLMClient:
|
||||
response = parser(response)
|
||||
break
|
||||
except ValueError as err:
|
||||
log.debug(f"Error parsing GPT response: {err}, asking LLM to retry", exc_info=True)
|
||||
request_log.error = f"Error parsing response: {err}"
|
||||
request_log.status = LLMRequestStatus.ERROR
|
||||
log.debug(f"Error parsing LLM response: {err}, asking LLM to retry", exc_info=True)
|
||||
convo.assistant(response)
|
||||
convo.user(f"Error parsing response: {err}. Please output your response EXACTLY as requested.")
|
||||
continue
|
||||
|
||||
@@ -321,6 +321,7 @@ class Telemetry:
|
||||
Note: this method clears all telemetry data after sending it.
|
||||
"""
|
||||
if not self.enabled:
|
||||
log.debug("Telemetry.send(): telemetry is disabled, not sending data")
|
||||
return
|
||||
|
||||
if self.endpoint is None:
|
||||
|
||||
@@ -114,7 +114,7 @@ async def test_openai_parser_fails(mock_AsyncOpenAI):
|
||||
|
||||
llm = OpenAIClient(cfg)
|
||||
|
||||
with pytest.raises(APIError, match="Error parsing LLM response"):
|
||||
with pytest.raises(APIError, match="Error parsing response"):
|
||||
await llm(convo, parser=parser, max_retries=1)
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user