mirror of
https://github.com/Pythagora-io/gpt-pilot.git
synced 2026-01-09 13:17:55 -05:00
Merge pull request #1008 from Pythagora-io/more-telemetry
More telemetry
This commit is contained in:
@@ -5,8 +5,11 @@ from pydantic import BaseModel, Field
|
||||
from core.agents.base import BaseAgent
|
||||
from core.agents.convo import AgentConvo
|
||||
from core.agents.response import AgentResponse
|
||||
from core.db.models import Specification
|
||||
from core.llm.parser import JSONParser
|
||||
from core.log import get_logger
|
||||
from core.telemetry import telemetry
|
||||
from core.templates.example_project import EXAMPLE_PROJECTS
|
||||
from core.templates.registry import PROJECT_TEMPLATES, ProjectTemplateEnum
|
||||
from core.ui.base import ProjectStage
|
||||
|
||||
@@ -15,6 +18,8 @@ WARN_SYSTEM_DEPS = ["docker", "kubernetes", "microservices"]
|
||||
WARN_FRAMEWORKS = ["next.js", "vue", "vue.js", "svelte", "angular"]
|
||||
WARN_FRAMEWORKS_URL = "https://github.com/Pythagora-io/gpt-pilot/wiki/Using-GPT-Pilot-with-frontend-frameworks"
|
||||
|
||||
log = get_logger(__name__)
|
||||
|
||||
|
||||
# FIXME: all the reponse pydantic models should be strict (see config._StrictModel), also check if we
|
||||
# can disallow adding custom Python attributes to the model
|
||||
@@ -74,34 +79,34 @@ class Architect(BaseAgent):
|
||||
async def run(self) -> AgentResponse:
|
||||
await self.ui.send_project_stage(ProjectStage.ARCHITECTURE)
|
||||
|
||||
spec = self.current_state.specification.clone()
|
||||
|
||||
if spec.example_project:
|
||||
self.prepare_example_project(spec)
|
||||
else:
|
||||
await self.plan_architecture(spec)
|
||||
|
||||
await self.check_system_dependencies(spec)
|
||||
|
||||
self.next_state.specification = spec
|
||||
telemetry.set("template", spec.template)
|
||||
self.next_state.action = ARCHITECTURE_STEP_NAME
|
||||
return AgentResponse.done(self)
|
||||
|
||||
async def plan_architecture(self, spec: Specification):
|
||||
await self.send_message("Planning project architecture ...")
|
||||
|
||||
llm = self.get_llm()
|
||||
convo = AgentConvo(self).template("technologies", templates=PROJECT_TEMPLATES).require_schema(Architecture)
|
||||
|
||||
await self.send_message("Planning project architecture ...")
|
||||
arch: Architecture = await llm(convo, parser=JSONParser(Architecture))
|
||||
|
||||
await self.check_compatibility(arch)
|
||||
await self.check_system_dependencies(arch.system_dependencies)
|
||||
|
||||
spec = self.current_state.specification.clone()
|
||||
spec.architecture = arch.architecture
|
||||
spec.system_dependencies = [d.model_dump() for d in arch.system_dependencies]
|
||||
spec.package_dependencies = [d.model_dump() for d in arch.package_dependencies]
|
||||
spec.template = arch.template.value if arch.template else None
|
||||
|
||||
self.next_state.specification = spec
|
||||
telemetry.set(
|
||||
"architecture",
|
||||
{
|
||||
"description": spec.architecture,
|
||||
"system_dependencies": spec.system_dependencies,
|
||||
"package_dependencies": spec.package_dependencies,
|
||||
},
|
||||
)
|
||||
telemetry.set("template", spec.template)
|
||||
self.next_state.action = ARCHITECTURE_STEP_NAME
|
||||
return AgentResponse.done(self)
|
||||
|
||||
async def check_compatibility(self, arch: Architecture) -> bool:
|
||||
warn_system_deps = [dep.name for dep in arch.system_dependencies if dep.name.lower() in WARN_SYSTEM_DEPS]
|
||||
warn_package_deps = [dep.name for dep in arch.package_dependencies if dep.name.lower() in WARN_FRAMEWORKS]
|
||||
@@ -130,18 +135,50 @@ class Architect(BaseAgent):
|
||||
# that SpecWriter should catch and allow the user to reword the initial spec.
|
||||
return True
|
||||
|
||||
async def check_system_dependencies(self, deps: list[SystemDependency]):
|
||||
def prepare_example_project(self, spec: Specification):
|
||||
log.debug(f"Setting architecture for example project: {spec.example_project}")
|
||||
arch = EXAMPLE_PROJECTS[spec.example_project]["architecture"]
|
||||
|
||||
spec.architecture = arch["architecture"]
|
||||
spec.system_dependencies = arch["system_dependencies"]
|
||||
spec.package_dependencies = arch["package_dependencies"]
|
||||
spec.template = arch["template"]
|
||||
telemetry.set("template", spec.template)
|
||||
|
||||
async def check_system_dependencies(self, spec: Specification):
|
||||
"""
|
||||
Check whether the required system dependencies are installed.
|
||||
|
||||
This also stores the app architecture telemetry data, including the
|
||||
information about whether each system dependency is installed.
|
||||
|
||||
:param spec: Project specification.
|
||||
"""
|
||||
deps = spec.system_dependencies
|
||||
|
||||
for dep in deps:
|
||||
status_code, _, _ = await self.process_manager.run_command(dep.test)
|
||||
status_code, _, _ = await self.process_manager.run_command(dep["test"])
|
||||
dep["installed"] = bool(status_code == 0)
|
||||
if status_code != 0:
|
||||
if dep.required_locally:
|
||||
if dep["required_locally"]:
|
||||
remedy = "Please install it before proceeding with your app."
|
||||
else:
|
||||
remedy = "If you would like to use it locally, please install it before proceeding."
|
||||
await self.send_message(f"❌ {dep.name} is not available. {remedy}")
|
||||
await self.send_message(f"❌ {dep['name']} is not available. {remedy}")
|
||||
await self.ask_question(
|
||||
f"Once you have installed {dep['name']}, please press Continue.",
|
||||
buttons={"continue": "Continue"},
|
||||
buttons_only=True,
|
||||
default="continue",
|
||||
)
|
||||
else:
|
||||
await self.send_message(f"✅ {dep.name} is available.")
|
||||
await self.send_message(f"✅ {dep['name']} is available.")
|
||||
|
||||
telemetry.set(
|
||||
"architecture",
|
||||
{
|
||||
"description": spec.architecture,
|
||||
"system_dependencies": deps,
|
||||
"package_dependencies": spec.package_dependencies,
|
||||
},
|
||||
)
|
||||
|
||||
@@ -10,6 +10,7 @@ from core.agents.response import AgentResponse, ResponseType
|
||||
from core.db.models.project_state import TaskStatus
|
||||
from core.llm.parser import JSONParser
|
||||
from core.log import get_logger
|
||||
from core.telemetry import telemetry
|
||||
|
||||
log = get_logger(__name__)
|
||||
|
||||
@@ -195,6 +196,14 @@ class Developer(BaseAgent):
|
||||
self.next_state.modified_files = {}
|
||||
self.set_next_steps(response, source)
|
||||
self.next_state.action = f"Task #{current_task_index + 1} start"
|
||||
await telemetry.trace_code_event(
|
||||
"task-start",
|
||||
{
|
||||
"task_index": current_task_index + 1,
|
||||
"num_tasks": len(self.current_state.tasks),
|
||||
"num_epics": len(self.current_state.epics),
|
||||
},
|
||||
)
|
||||
return AgentResponse.done(self)
|
||||
|
||||
async def get_relevant_files(
|
||||
|
||||
@@ -26,7 +26,7 @@ class SelectedDocsets(BaseModel):
|
||||
class ExternalDocumentation(BaseAgent):
|
||||
"""Agent in charge of collecting and storing additional documentation.
|
||||
|
||||
Docs are per task and are stores in the `tasks` variable in the project state.
|
||||
Docs are per task and are stores in the `docs` variable in the project state.
|
||||
This agent ensures documentation is collected only once per task.
|
||||
|
||||
Agent does 2 LLM interactions:
|
||||
@@ -44,7 +44,12 @@ class ExternalDocumentation(BaseAgent):
|
||||
display_name = "Documentation"
|
||||
|
||||
async def run(self) -> AgentResponse:
|
||||
available_docsets = await self._get_available_docsets()
|
||||
if self.current_state.specification.example_project:
|
||||
log.debug("Example project detected, no documentation selected.")
|
||||
available_docsets = []
|
||||
else:
|
||||
available_docsets = await self._get_available_docsets()
|
||||
|
||||
selected_docsets = await self._select_docsets(available_docsets)
|
||||
await telemetry.trace_code_event("docsets_used", selected_docsets)
|
||||
|
||||
@@ -153,6 +158,8 @@ class ExternalDocumentation(BaseAgent):
|
||||
Documentation snippets are stored as a list of dictionaries:
|
||||
{"key": docset-key, "desc": documentation-description, "snippets": list-of-snippets}
|
||||
|
||||
:param snippets: List of tuples: (docset_key, snippets)
|
||||
:param available_docsets: List of available docsets from the API.
|
||||
"""
|
||||
|
||||
docsets_dict = dict(available_docsets)
|
||||
@@ -161,4 +168,3 @@ class ExternalDocumentation(BaseAgent):
|
||||
docs.append({"key": docset_key, "desc": docsets_dict[docset_key], "snippets": snip})
|
||||
|
||||
self.next_state.docs = docs
|
||||
self.next_state.flag_tasks_as_modified()
|
||||
|
||||
@@ -187,7 +187,7 @@ class Orchestrator(BaseAgent):
|
||||
return Importer(self.state_manager, self.ui)
|
||||
else:
|
||||
# New project: ask the Spec Writer to refine and save the project specification
|
||||
return SpecWriter(self.state_manager, self.ui)
|
||||
return SpecWriter(self.state_manager, self.ui, process_manager=self.process_manager)
|
||||
elif not state.specification.architecture:
|
||||
# Ask the Architect to design the project architecture and determine dependencies
|
||||
return Architect(self.state_manager, self.ui, process_manager=self.process_manager)
|
||||
|
||||
@@ -3,11 +3,11 @@ from core.agents.convo import AgentConvo
|
||||
from core.agents.response import AgentResponse
|
||||
from core.db.models import Complexity
|
||||
from core.llm.parser import StringParser
|
||||
from core.log import get_logger
|
||||
from core.telemetry import telemetry
|
||||
from core.templates.example_project import (
|
||||
EXAMPLE_PROJECT_ARCHITECTURE,
|
||||
EXAMPLE_PROJECT_DESCRIPTION,
|
||||
EXAMPLE_PROJECT_PLAN,
|
||||
DEFAULT_EXAMPLE_PROJECT,
|
||||
EXAMPLE_PROJECTS,
|
||||
)
|
||||
|
||||
# If the project description is less than this, perform an analysis using LLM
|
||||
@@ -18,6 +18,8 @@ INITIAL_PROJECT_HOWTO_URL = (
|
||||
)
|
||||
SPEC_STEP_NAME = "Create specification"
|
||||
|
||||
log = get_logger(__name__)
|
||||
|
||||
|
||||
class SpecWriter(BaseAgent):
|
||||
agent_type = "spec-writer"
|
||||
@@ -41,18 +43,25 @@ class SpecWriter(BaseAgent):
|
||||
return AgentResponse.import_project(self)
|
||||
|
||||
if response.button == "example":
|
||||
await self.send_message("Starting example project with description:")
|
||||
await self.send_message(EXAMPLE_PROJECT_DESCRIPTION)
|
||||
self.prepare_example_project()
|
||||
await self.prepare_example_project(DEFAULT_EXAMPLE_PROJECT)
|
||||
return AgentResponse.done(self)
|
||||
|
||||
elif response.button == "continue":
|
||||
# FIXME: Workaround for the fact that VSCode "continue" button does
|
||||
# nothing but repeat the question. We reproduce this bug for bug here.
|
||||
return AgentResponse.done(self)
|
||||
|
||||
spec = response.text
|
||||
spec = response.text.strip()
|
||||
|
||||
complexity = await self.check_prompt_complexity(spec)
|
||||
await telemetry.trace_code_event(
|
||||
"project-description",
|
||||
{
|
||||
"initial_prompt": spec,
|
||||
"complexity": complexity,
|
||||
},
|
||||
)
|
||||
|
||||
if len(spec) < ANALYZE_THRESHOLD and complexity != Complexity.SIMPLE:
|
||||
spec = await self.analyze_spec(spec)
|
||||
spec = await self.review_spec(spec)
|
||||
@@ -73,36 +82,21 @@ class SpecWriter(BaseAgent):
|
||||
llm_response: str = await llm(convo, temperature=0, parser=StringParser())
|
||||
return llm_response.lower()
|
||||
|
||||
def prepare_example_project(self):
|
||||
async def prepare_example_project(self, example_name: str):
|
||||
example_description = EXAMPLE_PROJECTS[example_name]["description"].strip()
|
||||
|
||||
log.debug(f"Starting example project: {example_name}")
|
||||
await self.send_message(f"Starting example project with description:\n\n{example_description}")
|
||||
|
||||
spec = self.current_state.specification.clone()
|
||||
spec.description = EXAMPLE_PROJECT_DESCRIPTION
|
||||
spec.architecture = EXAMPLE_PROJECT_ARCHITECTURE["architecture"]
|
||||
spec.system_dependencies = EXAMPLE_PROJECT_ARCHITECTURE["system_dependencies"]
|
||||
spec.package_dependencies = EXAMPLE_PROJECT_ARCHITECTURE["package_dependencies"]
|
||||
spec.template = EXAMPLE_PROJECT_ARCHITECTURE["template"]
|
||||
spec.complexity = Complexity.SIMPLE
|
||||
telemetry.set("initial_prompt", spec.description.strip())
|
||||
telemetry.set("is_complex_app", False)
|
||||
telemetry.set("template", spec.template)
|
||||
telemetry.set(
|
||||
"architecture",
|
||||
{
|
||||
"architecture": spec.architecture,
|
||||
"system_dependencies": spec.system_dependencies,
|
||||
"package_dependencies": spec.package_dependencies,
|
||||
},
|
||||
)
|
||||
spec.example_project = example_name
|
||||
spec.description = example_description
|
||||
spec.complexity = EXAMPLE_PROJECTS[example_name]["complexity"]
|
||||
self.next_state.specification = spec
|
||||
|
||||
self.next_state.epics = [
|
||||
{
|
||||
"name": "Initial Project",
|
||||
"description": EXAMPLE_PROJECT_DESCRIPTION,
|
||||
"completed": False,
|
||||
"complexity": Complexity.SIMPLE,
|
||||
}
|
||||
]
|
||||
self.next_state.tasks = EXAMPLE_PROJECT_PLAN
|
||||
telemetry.set("initial_prompt", spec.description)
|
||||
telemetry.set("example_project", example_name)
|
||||
telemetry.set("is_complex_app", spec.complexity != Complexity.SIMPLE)
|
||||
|
||||
async def analyze_spec(self, spec: str) -> str:
|
||||
msg = (
|
||||
@@ -115,6 +109,8 @@ class SpecWriter(BaseAgent):
|
||||
|
||||
llm = self.get_llm()
|
||||
convo = AgentConvo(self).template("ask_questions").user(spec)
|
||||
n_questions = 0
|
||||
n_answers = 0
|
||||
|
||||
while True:
|
||||
response: str = await llm(convo)
|
||||
@@ -129,12 +125,21 @@ class SpecWriter(BaseAgent):
|
||||
buttons={"continue": "continue"},
|
||||
)
|
||||
if confirm.cancelled or confirm.button == "continue" or confirm.text == "":
|
||||
await self.telemetry.trace_code_event(
|
||||
"spec-writer-questions",
|
||||
{
|
||||
"num_questions": n_questions,
|
||||
"num_answers": n_answers,
|
||||
"new_spec": spec,
|
||||
},
|
||||
)
|
||||
return spec
|
||||
convo.user(confirm.text)
|
||||
|
||||
else:
|
||||
convo.assistant(response)
|
||||
|
||||
n_questions += 1
|
||||
user_response = await self.ask_question(
|
||||
response,
|
||||
buttons={"skip": "Skip questions"},
|
||||
@@ -147,6 +152,7 @@ class SpecWriter(BaseAgent):
|
||||
response: str = await llm(convo)
|
||||
return response
|
||||
|
||||
n_answers += 1
|
||||
convo.user(user_response.text)
|
||||
|
||||
async def review_spec(self, spec: str) -> str:
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from core.agents.base import BaseAgent
|
||||
from core.agents.response import AgentResponse
|
||||
from core.log import get_logger
|
||||
from core.telemetry import telemetry
|
||||
|
||||
log = get_logger(__name__)
|
||||
|
||||
@@ -25,5 +26,14 @@ class TaskCompleter(BaseAgent):
|
||||
self.current_state.get_source_index(source),
|
||||
tasks,
|
||||
)
|
||||
await telemetry.trace_code_event(
|
||||
"task-end",
|
||||
{
|
||||
"task_index": current_task_index1,
|
||||
"num_tasks": len(self.current_state.tasks),
|
||||
"num_epics": len(self.current_state.epics),
|
||||
"num_iterations": len(self.current_state.iterations),
|
||||
},
|
||||
)
|
||||
|
||||
return AgentResponse.done(self)
|
||||
|
||||
@@ -10,6 +10,8 @@ from core.db.models import Complexity
|
||||
from core.db.models.project_state import TaskStatus
|
||||
from core.llm.parser import JSONParser
|
||||
from core.log import get_logger
|
||||
from core.telemetry import telemetry
|
||||
from core.templates.example_project import EXAMPLE_PROJECTS
|
||||
from core.templates.registry import apply_project_template, get_template_description, get_template_summary
|
||||
from core.ui.base import ProjectStage, success_source
|
||||
|
||||
@@ -41,8 +43,10 @@ class TechLead(BaseAgent):
|
||||
return await self.update_epic()
|
||||
|
||||
if len(self.current_state.epics) == 0:
|
||||
self.create_initial_project_epic()
|
||||
# Orchestrator will rerun us to break down the initial project epic
|
||||
if self.current_state.specification.example_project:
|
||||
self.plan_example_project()
|
||||
else:
|
||||
self.create_initial_project_epic()
|
||||
return AgentResponse.done(self)
|
||||
|
||||
await self.ui.send_project_stage(ProjectStage.CODING)
|
||||
@@ -151,6 +155,13 @@ class TechLead(BaseAgent):
|
||||
}
|
||||
for task in response.plan
|
||||
]
|
||||
await telemetry.trace_code_event(
|
||||
"development-plan",
|
||||
{
|
||||
"num_tasks": len(self.current_state.tasks),
|
||||
"num_epics": len(self.current_state.epics),
|
||||
},
|
||||
)
|
||||
return AgentResponse.done(self)
|
||||
|
||||
async def update_epic(self) -> AgentResponse:
|
||||
@@ -201,3 +212,18 @@ class TechLead(BaseAgent):
|
||||
]
|
||||
log.debug(f"Updated development plan for {epic['name']}, {len(response.plan)} tasks remaining")
|
||||
return AgentResponse.done(self)
|
||||
|
||||
def plan_example_project(self):
|
||||
example_name = self.current_state.specification.example_project
|
||||
log.debug(f"Planning example project: {example_name}")
|
||||
|
||||
example = EXAMPLE_PROJECTS[example_name]
|
||||
self.next_state.epics = [
|
||||
{
|
||||
"name": "Initial Project",
|
||||
"description": example["description"],
|
||||
"completed": False,
|
||||
"complexity": example["complexity"],
|
||||
}
|
||||
]
|
||||
self.next_state.tasks = example["plan"]
|
||||
|
||||
@@ -182,6 +182,21 @@ class Troubleshooter(IterationPromptMixin, BaseAgent):
|
||||
return False, False, ""
|
||||
|
||||
if user_response.button == "loop":
|
||||
await telemetry.trace_code_event(
|
||||
"stuck-in-loop",
|
||||
{
|
||||
"clicked": True,
|
||||
"task_index": self.current_state.tasks.index(self.current_state.current_task) + 1,
|
||||
"num_tasks": len(self.current_state.tasks),
|
||||
"num_epics": len(self.current_state.epics),
|
||||
"num_iterations": len(self.current_state.iterations),
|
||||
"num_steps": len(self.current_state.steps),
|
||||
"architecture": {
|
||||
"system_dependencies": self.current_state.specification.system_dependencies,
|
||||
"app_dependencies": self.current_state.specification.package_dependencies,
|
||||
},
|
||||
},
|
||||
)
|
||||
return True, True, ""
|
||||
|
||||
return True, False, user_response.text
|
||||
|
||||
@@ -1 +1,10 @@
|
||||
Generic single-database configuration.
|
||||
Pythagora uses Alembic for database migrations.
|
||||
|
||||
After changing any of the database models, create a new migration:
|
||||
|
||||
alembic -c core/db/alembic.ini revision --autogenerate -m "description"
|
||||
|
||||
Migrations are applied automatically when the application starts, but can also be
|
||||
run manually with:
|
||||
|
||||
alembic -c core/db/alembic.ini upgrade head
|
||||
|
||||
@@ -0,0 +1,34 @@
|
||||
"""add example project to spec
|
||||
|
||||
Revision ID: ff891d366761
|
||||
Revises: b760f66138c0
|
||||
Create Date: 2024-06-13 09:38:33.329161
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "ff891d366761"
|
||||
down_revision: Union[str, None] = "b760f66138c0"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table("specifications", schema=None) as batch_op:
|
||||
batch_op.add_column(sa.Column("example_project", sa.String(), nullable=True))
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table("specifications", schema=None) as batch_op:
|
||||
batch_op.drop_column("example_project")
|
||||
|
||||
# ### end Alembic commands ###
|
||||
@@ -31,6 +31,7 @@ class Specification(Base):
|
||||
package_dependencies: Mapped[list[dict]] = mapped_column(default=list)
|
||||
template: Mapped[Optional[str]] = mapped_column()
|
||||
complexity: Mapped[str] = mapped_column(server_default=Complexity.HARD)
|
||||
example_project: Mapped[Optional[str]] = mapped_column()
|
||||
|
||||
# Relationships
|
||||
project_states: Mapped[list["ProjectState"]] = relationship(back_populates="specification", lazy="raise")
|
||||
@@ -46,6 +47,7 @@ class Specification(Base):
|
||||
package_dependencies=self.package_dependencies,
|
||||
template=self.template,
|
||||
complexity=self.complexity,
|
||||
example_project=self.example_project,
|
||||
)
|
||||
return clone
|
||||
|
||||
|
||||
@@ -77,6 +77,7 @@ class StateManager:
|
||||
f'with default branch "{branch.name}" (id={branch.id}) '
|
||||
f"and initial state id={state.id} (step_index={state.step_index})"
|
||||
)
|
||||
await telemetry.trace_code_event("create-project", {"name": name})
|
||||
|
||||
self.current_session = session
|
||||
self.current_state = state
|
||||
|
||||
@@ -2,6 +2,7 @@ import sys
|
||||
import time
|
||||
import traceback
|
||||
from copy import deepcopy
|
||||
from os import getenv
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
@@ -72,7 +73,7 @@ class Telemetry:
|
||||
"python_version": sys.version,
|
||||
# GPT Pilot version
|
||||
"pilot_version": get_version(),
|
||||
# GPT Pilot Extension version
|
||||
# Pythagora VSCode Extension version
|
||||
"extension_version": None,
|
||||
# Is extension used
|
||||
"is_extension": False,
|
||||
@@ -85,6 +86,8 @@ class Telemetry:
|
||||
"is_complex_app": None,
|
||||
# Optional template used for the project
|
||||
"template": None,
|
||||
# Optional, example project selected by the user
|
||||
"example_project": None,
|
||||
# Optional user contact email
|
||||
"user_contact": None,
|
||||
# Unique project ID (app_id)
|
||||
@@ -320,7 +323,7 @@ class Telemetry:
|
||||
|
||||
Note: this method clears all telemetry data after sending it.
|
||||
"""
|
||||
if not self.enabled:
|
||||
if not self.enabled or getenv("DISABLE_TELEMETRY"):
|
||||
log.debug("Telemetry.send(): telemetry is disabled, not sending data")
|
||||
return
|
||||
|
||||
@@ -362,22 +365,26 @@ class Telemetry:
|
||||
:param name: name of the event
|
||||
:param data: data to send with the event
|
||||
"""
|
||||
if not self.enabled:
|
||||
if not self.enabled or getenv("DISABLE_TELEMETRY"):
|
||||
return
|
||||
|
||||
data = deepcopy(data)
|
||||
for item in ["app_id", "user_contact", "platform", "pilot_version", "model"]:
|
||||
data[item] = self.data[item]
|
||||
|
||||
payload = {
|
||||
"pathId": self.telemetry_id,
|
||||
"event": f"trace-{name}",
|
||||
"data": data,
|
||||
}
|
||||
|
||||
log.debug(f"Sending trace event {name} to {self.endpoint}")
|
||||
log.debug(f"Sending trace event {name} to {self.endpoint}: {repr(payload)}")
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient() as client:
|
||||
await client.post(self.endpoint, json=payload)
|
||||
except httpx.RequestError:
|
||||
pass
|
||||
except httpx.RequestError as e:
|
||||
log.error(f"Failed to send trace event {name}: {e}", exc_info=True)
|
||||
|
||||
async def trace_loop(self, name: str, task_with_loop: dict):
|
||||
payload = deepcopy(self.data)
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
from core.db.models import Complexity
|
||||
|
||||
EXAMPLE_PROJECT_DESCRIPTION = """
|
||||
The application is a simple ToDo app built using React. Its primary function is to allow users to manage a list of tasks (todos). Each task has a description and a state (open or completed, with the default state being open). The application is frontend-only, with no user sign-up or authentication process. The goal is to provide a straightforward and user-friendly interface for task management.
|
||||
|
||||
@@ -64,3 +66,15 @@ EXAMPLE_PROJECT_PLAN = [
|
||||
"status": "todo",
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
EXAMPLE_PROJECTS = {
|
||||
"example-project": {
|
||||
"description": EXAMPLE_PROJECT_DESCRIPTION,
|
||||
"architecture": EXAMPLE_PROJECT_ARCHITECTURE,
|
||||
"complexity": Complexity.SIMPLE,
|
||||
"plan": EXAMPLE_PROJECT_PLAN,
|
||||
}
|
||||
}
|
||||
|
||||
DEFAULT_EXAMPLE_PROJECT = "example-project"
|
||||
|
||||
@@ -1,30 +1,34 @@
|
||||
## Telemetry in GPT Pilot
|
||||
## Telemetry in Pythagora
|
||||
|
||||
At GPT Pilot, we are dedicated to improving your experience and the overall quality of our software. To achieve this, we gather anonymous telemetry data which helps us understand how the tool is being used and identify areas for improvement.
|
||||
At Pythagora, we are dedicated to improving your experience and the overall quality of our software. To achieve this, we gather anonymous telemetry data which helps us understand how the tool is being used and identify areas for improvement.
|
||||
|
||||
### What We Collect
|
||||
|
||||
The telemetry data we collect includes:
|
||||
|
||||
- **Total Runtime**: The total time GPT Pilot was active and running.
|
||||
- **Total Runtime**: The total time Pythagora was active and running.
|
||||
- **Command Runs**: How many commands were executed during a session.
|
||||
- **Development Steps**: The number of development steps that were performed.
|
||||
- **LLM Requests**: The number of LLM requests made.
|
||||
- **User Inputs**: The number of times you provide input to the tool.
|
||||
- **Operating System**: The operating system you are using (and Linux distro if applicable).
|
||||
- **Python Version**: The version of Python you are using.
|
||||
- **GPT Pilot Version**: The version of GPT Pilot you are using.
|
||||
- **LLM Model**: LLM model used for the session.
|
||||
- **GPT Pilot Version**: The version of Pythagora you are using.
|
||||
- **LLM Model**: LLM model(s) used for the session.
|
||||
- **Time**: How long it took to generate a project.
|
||||
- **Initial prompt**: App description used to create app (after Specification Writer Agent).
|
||||
- **Architecture**: Architecture designed by Pythagora for the app.
|
||||
- **Documentation**: Pythagora documentation that was used while creating the app.
|
||||
- **User Email**: User email (if using Pythagora VSCode Extgension, or if explicitly provided when running Pythagora from the command line).
|
||||
- **Pythagora Tasks/Steps**: Information about the development tasks and steps Pythagora does while coding the app.
|
||||
|
||||
All the data points are listed in [pilot.utils.telemetry:Telemetry.clear_data()](../pilot/utils/telemetry.py).
|
||||
All the data points are listed in [core.telemetry:Telemetry.clear_data()](../core/telemetry/__init__.py).
|
||||
|
||||
### How We Use This Data
|
||||
|
||||
We use this data to:
|
||||
|
||||
- Monitor the performance and reliability of GPT Pilot.
|
||||
- Monitor the performance and reliability of Pythagora.
|
||||
- Understand usage patterns to guide our development and feature prioritization.
|
||||
- Identify common workflows and improve the user experience.
|
||||
- Ensure the scalability and efficiency of our language model interactions.
|
||||
@@ -37,9 +41,9 @@ Your privacy is important to us. The data collected is purely for internal analy
|
||||
|
||||
We believe in transparency and control. If you prefer not to send telemetry data, you can opt-out at any time by setting `telemetry.enabled` to `false` in your `~/.gpt-pilot/config.json` configuration file.
|
||||
|
||||
After you update this setting, GPT Pilot will no longer collect telemetry data from your machine.
|
||||
After you update this setting, Pythagora will no longer collect telemetry data from your machine.
|
||||
|
||||
### Questions and Feedback
|
||||
If you have questions about our telemetry practices or would like to provide feedback, please open an issue in our repository, and we will be happy to engage with you.
|
||||
|
||||
Thank you for supporting GPT Pilot and helping us make it better for everyone.
|
||||
Thank you for supporting Pythagora and helping us make it better for everyone.
|
||||
|
||||
@@ -8,9 +8,9 @@ from core.state.state_manager import StateManager
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_offline_changes_check_restores_if_workspace_empty():
|
||||
sm = Mock(spec=StateManager)
|
||||
sm.workspace_is_empty.return_value = True
|
||||
ui = Mock()
|
||||
sm = AsyncMock(spec=StateManager)
|
||||
sm.workspace_is_empty = Mock(return_value=False)
|
||||
ui = AsyncMock()
|
||||
orca = Orchestrator(state_manager=sm, ui=ui)
|
||||
await orca.offline_changes_check()
|
||||
assert sm.restore_files.assert_called_once
|
||||
@@ -19,7 +19,8 @@ async def test_offline_changes_check_restores_if_workspace_empty():
|
||||
@pytest.mark.asyncio
|
||||
async def test_offline_changes_check_imports_changes_from_disk():
|
||||
sm = AsyncMock()
|
||||
sm.workspace_is_empty.return_value = False
|
||||
sm.workspace_is_empty = Mock(return_value=False)
|
||||
sm.import_files = AsyncMock(return_value=([], []))
|
||||
ui = AsyncMock()
|
||||
ui.ask_question.return_value.button = "yes"
|
||||
orca = Orchestrator(state_manager=sm, ui=ui)
|
||||
@@ -31,7 +32,7 @@ async def test_offline_changes_check_imports_changes_from_disk():
|
||||
@pytest.mark.asyncio
|
||||
async def test_offline_changes_check_restores_changes_from_db():
|
||||
sm = AsyncMock()
|
||||
sm.workspace_is_empty.return_value = False
|
||||
sm.workspace_is_empty = Mock(return_value=False)
|
||||
ui = AsyncMock()
|
||||
ui.ask_question.return_value.button = "no"
|
||||
orca = Orchestrator(state_manager=sm, ui=ui)
|
||||
|
||||
@@ -1,28 +1,30 @@
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
import pytest
|
||||
|
||||
from core.agents.response import ResponseType
|
||||
from core.agents.spec_writer import SpecWriter
|
||||
from core.db.models import Complexity
|
||||
from core.telemetry import telemetry
|
||||
from core.ui.base import UserInput
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_start_example_project(agentcontext):
|
||||
sm, _, ui, _ = agentcontext
|
||||
sm, pm, ui, _ = agentcontext
|
||||
|
||||
ui.ask_question.return_value = UserInput(button="example")
|
||||
pm.run_command = AsyncMock(return_value=(0, "", ""))
|
||||
|
||||
sw = SpecWriter(sm, ui)
|
||||
telemetry.start()
|
||||
|
||||
sw = SpecWriter(sm, ui, process_manager=pm)
|
||||
response = await sw.run()
|
||||
assert response.type == ResponseType.DONE
|
||||
|
||||
assert sm.current_state.specification.description != ""
|
||||
assert sm.current_state.specification.architecture != ""
|
||||
assert sm.current_state.specification.system_dependencies != []
|
||||
assert sm.current_state.specification.package_dependencies != []
|
||||
assert sm.current_state.specification.complexity == Complexity.SIMPLE
|
||||
assert sm.current_state.epics != []
|
||||
assert sm.current_state.tasks != []
|
||||
assert telemetry.data["initial_prompt"] == sm.current_state.specification.description
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import os
|
||||
from typing import Callable
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
|
||||
from core.config import DBConfig
|
||||
@@ -9,6 +11,11 @@ from core.db.session import SessionManager
|
||||
from core.state.state_manager import StateManager
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def disable_test_telemetry(monkeypatch):
|
||||
os.environ["DISABLE_TELEMETRY"] = "1"
|
||||
|
||||
|
||||
@pytest_asyncio.fixture
|
||||
async def testmanager():
|
||||
"""
|
||||
|
||||
@@ -98,14 +98,15 @@ def test_inc_ignores_unknown_data_field(mock_settings):
|
||||
assert "unknown_field" not in telemetry.data
|
||||
|
||||
|
||||
@patch("core.telemetry.getenv")
|
||||
@patch("core.telemetry.time")
|
||||
@patch("core.telemetry.settings")
|
||||
def test_start_with_telemetry_enabled(mock_settings, mock_time):
|
||||
def test_start_with_telemetry_enabled(mock_settings, mock_time, mock_getenv):
|
||||
mock_settings.telemetry = MagicMock(id="test-id", endpoint="test-endpoint", enabled=True)
|
||||
mock_time.time.return_value = 1234.0
|
||||
mock_getenv.return_value = None # override DISABLE_TELEMETRY test env var
|
||||
|
||||
telemetry = Telemetry()
|
||||
|
||||
telemetry.start()
|
||||
assert telemetry.start_time == 1234.0
|
||||
|
||||
@@ -134,9 +135,11 @@ def test_stop_calculates_elapsed_time(mock_settings, mock_time):
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("core.telemetry.getenv")
|
||||
@patch("core.telemetry.settings")
|
||||
async def test_send_enabled_and_successful(mock_settings, mock_httpx_post):
|
||||
async def test_send_enabled_and_successful(mock_settings, mock_getenv, mock_httpx_post):
|
||||
mock_settings.telemetry = MagicMock(id="test-id", endpoint="test-endpoint", enabled=True)
|
||||
mock_getenv.return_value = None # override DISABLE_TELEMETRY test env var
|
||||
|
||||
telemetry = Telemetry()
|
||||
with patch.object(telemetry, "calculate_statistics"):
|
||||
@@ -151,10 +154,12 @@ async def test_send_enabled_and_successful(mock_settings, mock_httpx_post):
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("core.telemetry.getenv")
|
||||
@patch("core.telemetry.settings")
|
||||
async def test_send_enabled_but_post_fails(mock_settings, mock_httpx_post):
|
||||
async def test_send_enabled_but_post_fails(mock_settings, mock_getenv, mock_httpx_post):
|
||||
mock_settings.telemetry = MagicMock(id="test-id", endpoint="test-endpoint", enabled=True)
|
||||
mock_httpx_post.side_effect = httpx.RequestError("Connection error")
|
||||
mock_getenv.return_value = None # override DISABLE_TELEMETRY test env var
|
||||
|
||||
telemetry = Telemetry()
|
||||
with patch.object(telemetry, "calculate_statistics"):
|
||||
@@ -180,9 +185,11 @@ async def test_send_not_enabled(mock_settings, mock_httpx_post):
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("core.telemetry.getenv")
|
||||
@patch("core.telemetry.settings")
|
||||
async def test_send_no_endpoint_configured(mock_settings, mock_httpx_post):
|
||||
async def test_send_no_endpoint_configured(mock_settings, mock_getenv, mock_httpx_post):
|
||||
mock_settings.telemetry = MagicMock(id="test-id", endpoint=None, enabled=True)
|
||||
mock_getenv.return_value = None # override DISABLE_TELEMETRY test env var
|
||||
|
||||
telemetry = Telemetry()
|
||||
await telemetry.send()
|
||||
@@ -191,9 +198,11 @@ async def test_send_no_endpoint_configured(mock_settings, mock_httpx_post):
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("core.telemetry.getenv")
|
||||
@patch("core.telemetry.settings")
|
||||
async def test_send_clears_counters_after_sending(mock_settings, mock_httpx_post):
|
||||
async def test_send_clears_counters_after_sending(mock_settings, mock_getenv, mock_httpx_post):
|
||||
mock_settings.telemetry = MagicMock(id="test-id", endpoint="test-endpoint", enabled=True)
|
||||
mock_getenv.return_value = None # override DISABLE_TELEMETRY test env var
|
||||
|
||||
telemetry = Telemetry()
|
||||
telemetry.data["model"] = "test-model"
|
||||
|
||||
Reference in New Issue
Block a user