diff --git a/docs/docs.json b/docs/docs.json
index d3e442be6..8c9677706 100644
--- a/docs/docs.json
+++ b/docs/docs.json
@@ -308,6 +308,7 @@
"en/learn/hierarchical-process",
"en/learn/human-input-on-execution",
"en/learn/human-in-the-loop",
+ "en/learn/human-feedback-in-flows",
"en/learn/kickoff-async",
"en/learn/kickoff-for-each",
"en/learn/llm-connections",
@@ -735,6 +736,7 @@
"pt-BR/learn/hierarchical-process",
"pt-BR/learn/human-input-on-execution",
"pt-BR/learn/human-in-the-loop",
+ "pt-BR/learn/human-feedback-in-flows",
"pt-BR/learn/kickoff-async",
"pt-BR/learn/kickoff-for-each",
"pt-BR/learn/llm-connections",
@@ -1171,6 +1173,7 @@
"ko/learn/hierarchical-process",
"ko/learn/human-input-on-execution",
"ko/learn/human-in-the-loop",
+ "ko/learn/human-feedback-in-flows",
"ko/learn/kickoff-async",
"ko/learn/kickoff-for-each",
"ko/learn/llm-connections",
diff --git a/docs/en/concepts/flows.mdx b/docs/en/concepts/flows.mdx
index 067918c21..6e9977512 100644
--- a/docs/en/concepts/flows.mdx
+++ b/docs/en/concepts/flows.mdx
@@ -572,6 +572,55 @@ The `third_method` and `fourth_method` listen to the output of the `second_metho
When you run this Flow, the output will change based on the random boolean value generated by the `start_method`.
+### Human in the Loop (human feedback)
+
+The `@human_feedback` decorator enables human-in-the-loop workflows by pausing flow execution to collect feedback from a human. This is useful for approval gates, quality review, and decision points that require human judgment.
+
+```python Code
+from crewai.flow.flow import Flow, start, listen
+from crewai.flow.human_feedback import human_feedback, HumanFeedbackResult
+
+class ReviewFlow(Flow):
+ @start()
+ @human_feedback(
+ message="Do you approve this content?",
+ emit=["approved", "rejected", "needs_revision"],
+ llm="gpt-4o-mini",
+ default_outcome="needs_revision",
+ )
+ def generate_content(self):
+ return "Content to be reviewed..."
+
+ @listen("approved")
+ def on_approval(self, result: HumanFeedbackResult):
+ print(f"Approved! Feedback: {result.feedback}")
+
+ @listen("rejected")
+ def on_rejection(self, result: HumanFeedbackResult):
+ print(f"Rejected. Reason: {result.feedback}")
+```
+
+When `emit` is specified, the human's free-form feedback is interpreted by an LLM and collapsed into one of the specified outcomes, which then triggers the corresponding `@listen` decorator.
+
+You can also use `@human_feedback` without routing to simply collect feedback:
+
+```python Code
+@start()
+@human_feedback(message="Any comments on this output?")
+def my_method(self):
+ return "Output for review"
+
+@listen(my_method)
+def next_step(self, result: HumanFeedbackResult):
+ # Access feedback via result.feedback
+ # Access original output via result.output
+ pass
+```
+
+Access all feedback collected during a flow via `self.last_human_feedback` (most recent) or `self.human_feedback_history` (all feedback as a list).
+
+For a complete guide on human feedback in flows, including **async/non-blocking feedback** with custom providers (Slack, webhooks, etc.), see [Human Feedback in Flows](/en/learn/human-feedback-in-flows).
+
## Adding Agents to Flows
Agents can be seamlessly integrated into your flows, providing a lightweight alternative to full Crews when you need simpler, focused task execution. Here's an example of how to use an Agent within a flow to perform market research:
diff --git a/docs/en/learn/human-feedback-in-flows.mdx b/docs/en/learn/human-feedback-in-flows.mdx
new file mode 100644
index 000000000..3c90c0730
--- /dev/null
+++ b/docs/en/learn/human-feedback-in-flows.mdx
@@ -0,0 +1,581 @@
+---
+title: Human Feedback in Flows
+description: Learn how to integrate human feedback directly into your CrewAI Flows using the @human_feedback decorator
+icon: user-check
+mode: "wide"
+---
+
+## Overview
+
+The `@human_feedback` decorator enables human-in-the-loop (HITL) workflows directly within CrewAI Flows. It allows you to pause flow execution, present output to a human for review, collect their feedback, and optionally route to different listeners based on the feedback outcome.
+
+This is particularly valuable for:
+
+- **Quality assurance**: Review AI-generated content before it's used downstream
+- **Decision gates**: Let humans make critical decisions in automated workflows
+- **Approval workflows**: Implement approve/reject/revise patterns
+- **Interactive refinement**: Collect feedback to improve outputs iteratively
+
+```mermaid
+flowchart LR
+ A[Flow Method] --> B[Output Generated]
+ B --> C[Human Reviews]
+ C --> D{Feedback}
+ D -->|emit specified| E[LLM Collapses to Outcome]
+ D -->|no emit| F[HumanFeedbackResult]
+ E --> G["@listen('approved')"]
+ E --> H["@listen('rejected')"]
+ F --> I[Next Listener]
+```
+
+## Quick Start
+
+Here's the simplest way to add human feedback to a flow:
+
+```python Code
+from crewai.flow.flow import Flow, start, listen
+from crewai.flow.human_feedback import human_feedback
+
+class SimpleReviewFlow(Flow):
+ @start()
+ @human_feedback(message="Please review this content:")
+ def generate_content(self):
+ return "This is AI-generated content that needs review."
+
+ @listen(generate_content)
+ def process_feedback(self, result):
+ print(f"Content: {result.output}")
+ print(f"Human said: {result.feedback}")
+
+flow = SimpleReviewFlow()
+flow.kickoff()
+```
+
+When this flow runs, it will:
+1. Execute `generate_content` and return the string
+2. Display the output to the user with the request message
+3. Wait for the user to type feedback (or press Enter to skip)
+4. Pass a `HumanFeedbackResult` object to `process_feedback`
+
+## The @human_feedback Decorator
+
+### Parameters
+
+| Parameter | Type | Required | Description |
+|-----------|------|----------|-------------|
+| `message` | `str` | Yes | The message shown to the human alongside the method output |
+| `emit` | `Sequence[str]` | No | List of possible outcomes. Feedback is collapsed to one of these, which triggers `@listen` decorators |
+| `llm` | `str \| BaseLLM` | When `emit` specified | LLM used to interpret feedback and map to an outcome |
+| `default_outcome` | `str` | No | Outcome to use if no feedback provided. Must be in `emit` |
+| `metadata` | `dict` | No | Additional data for enterprise integrations |
+| `provider` | `HumanFeedbackProvider` | No | Custom provider for async/non-blocking feedback. See [Async Human Feedback](#async-human-feedback-non-blocking) |
+
+### Basic Usage (No Routing)
+
+When you don't specify `emit`, the decorator simply collects feedback and passes a `HumanFeedbackResult` to the next listener:
+
+```python Code
+@start()
+@human_feedback(message="What do you think of this analysis?")
+def analyze_data(self):
+ return "Analysis results: Revenue up 15%, costs down 8%"
+
+@listen(analyze_data)
+def handle_feedback(self, result):
+ # result is a HumanFeedbackResult
+ print(f"Analysis: {result.output}")
+ print(f"Feedback: {result.feedback}")
+```
+
+### Routing with emit
+
+When you specify `emit`, the decorator becomes a router. The human's free-form feedback is interpreted by an LLM and collapsed into one of the specified outcomes:
+
+```python Code
+@start()
+@human_feedback(
+ message="Do you approve this content for publication?",
+ emit=["approved", "rejected", "needs_revision"],
+ llm="gpt-4o-mini",
+ default_outcome="needs_revision",
+)
+def review_content(self):
+ return "Draft blog post content here..."
+
+@listen("approved")
+def publish(self, result):
+ print(f"Publishing! User said: {result.feedback}")
+
+@listen("rejected")
+def discard(self, result):
+ print(f"Discarding. Reason: {result.feedback}")
+
+@listen("needs_revision")
+def revise(self, result):
+ print(f"Revising based on: {result.feedback}")
+```
+
+
+The LLM uses structured outputs (function calling) when available to guarantee the response is one of your specified outcomes. This makes routing reliable and predictable.
+
+
+## HumanFeedbackResult
+
+The `HumanFeedbackResult` dataclass contains all information about a human feedback interaction:
+
+```python Code
+from crewai.flow.human_feedback import HumanFeedbackResult
+
+@dataclass
+class HumanFeedbackResult:
+ output: Any # The original method output shown to the human
+ feedback: str # The raw feedback text from the human
+ outcome: str | None # The collapsed outcome (if emit was specified)
+ timestamp: datetime # When the feedback was received
+ method_name: str # Name of the decorated method
+ metadata: dict # Any metadata passed to the decorator
+```
+
+### Accessing in Listeners
+
+When a listener is triggered by a `@human_feedback` method with `emit`, it receives the `HumanFeedbackResult`:
+
+```python Code
+@listen("approved")
+def on_approval(self, result: HumanFeedbackResult):
+ print(f"Original output: {result.output}")
+ print(f"User feedback: {result.feedback}")
+ print(f"Outcome: {result.outcome}") # "approved"
+ print(f"Received at: {result.timestamp}")
+```
+
+## Accessing Feedback History
+
+The `Flow` class provides two attributes for accessing human feedback:
+
+### last_human_feedback
+
+Returns the most recent `HumanFeedbackResult`:
+
+```python Code
+@listen(some_method)
+def check_feedback(self):
+ if self.last_human_feedback:
+ print(f"Last feedback: {self.last_human_feedback.feedback}")
+```
+
+### human_feedback_history
+
+A list of all `HumanFeedbackResult` objects collected during the flow:
+
+```python Code
+@listen(final_step)
+def summarize(self):
+ print(f"Total feedback collected: {len(self.human_feedback_history)}")
+ for i, fb in enumerate(self.human_feedback_history):
+ print(f"{i+1}. {fb.method_name}: {fb.outcome or 'no routing'}")
+```
+
+
+Each `HumanFeedbackResult` is appended to `human_feedback_history`, so multiple feedback steps won't overwrite each other. Use this list to access all feedback collected during the flow.
+
+
+## Complete Example: Content Approval Workflow
+
+Here's a full example implementing a content review and approval workflow:
+
+
+
+```python Code
+from crewai.flow.flow import Flow, start, listen
+from crewai.flow.human_feedback import human_feedback, HumanFeedbackResult
+from pydantic import BaseModel
+
+
+class ContentState(BaseModel):
+ topic: str = ""
+ draft: str = ""
+ final_content: str = ""
+ revision_count: int = 0
+
+
+class ContentApprovalFlow(Flow[ContentState]):
+ """A flow that generates content and gets human approval."""
+
+ @start()
+ def get_topic(self):
+ self.state.topic = input("What topic should I write about? ")
+ return self.state.topic
+
+ @listen(get_topic)
+ def generate_draft(self, topic):
+ # In real use, this would call an LLM
+ self.state.draft = f"# {topic}\n\nThis is a draft about {topic}..."
+ return self.state.draft
+
+ @listen(generate_draft)
+ @human_feedback(
+ message="Please review this draft. Reply 'approved', 'rejected', or provide revision feedback:",
+ emit=["approved", "rejected", "needs_revision"],
+ llm="gpt-4o-mini",
+ default_outcome="needs_revision",
+ )
+ def review_draft(self, draft):
+ return draft
+
+ @listen("approved")
+ def publish_content(self, result: HumanFeedbackResult):
+ self.state.final_content = result.output
+ print("\nโ
Content approved and published!")
+ print(f"Reviewer comment: {result.feedback}")
+ return "published"
+
+ @listen("rejected")
+ def handle_rejection(self, result: HumanFeedbackResult):
+ print("\nโ Content rejected")
+ print(f"Reason: {result.feedback}")
+ return "rejected"
+
+ @listen("needs_revision")
+ def revise_content(self, result: HumanFeedbackResult):
+ self.state.revision_count += 1
+ print(f"\n๐ Revision #{self.state.revision_count} requested")
+ print(f"Feedback: {result.feedback}")
+
+ # In a real flow, you might loop back to generate_draft
+ # For this example, we just acknowledge
+ return "revision_requested"
+
+
+# Run the flow
+flow = ContentApprovalFlow()
+result = flow.kickoff()
+print(f"\nFlow completed. Revisions requested: {flow.state.revision_count}")
+```
+
+```text Output
+What topic should I write about? AI Safety
+
+==================================================
+OUTPUT FOR REVIEW:
+==================================================
+# AI Safety
+
+This is a draft about AI Safety...
+==================================================
+
+Please review this draft. Reply 'approved', 'rejected', or provide revision feedback:
+(Press Enter to skip, or type your feedback)
+
+Your feedback: Looks good, approved!
+
+โ
Content approved and published!
+Reviewer comment: Looks good, approved!
+
+Flow completed. Revisions requested: 0
+```
+
+
+
+## Combining with Other Decorators
+
+The `@human_feedback` decorator works with other flow decorators. Place it as the innermost decorator (closest to the function):
+
+```python Code
+# Correct: @human_feedback is innermost (closest to the function)
+@start()
+@human_feedback(message="Review this:")
+def my_start_method(self):
+ return "content"
+
+@listen(other_method)
+@human_feedback(message="Review this too:")
+def my_listener(self, data):
+ return f"processed: {data}"
+```
+
+
+Place `@human_feedback` as the innermost decorator (last/closest to the function) so it wraps the method directly and can capture the return value before passing to the flow system.
+
+
+## Best Practices
+
+### 1. Write Clear Request Messages
+
+The `request` parameter is what the human sees. Make it actionable:
+
+```python Code
+# โ
Good - clear and actionable
+@human_feedback(message="Does this summary accurately capture the key points? Reply 'yes' or explain what's missing:")
+
+# โ Bad - vague
+@human_feedback(message="Review this:")
+```
+
+### 2. Choose Meaningful Outcomes
+
+When using `emit`, pick outcomes that map naturally to human responses:
+
+```python Code
+# โ
Good - natural language outcomes
+emit=["approved", "rejected", "needs_more_detail"]
+
+# โ Bad - technical or unclear
+emit=["state_1", "state_2", "state_3"]
+```
+
+### 3. Always Provide a Default Outcome
+
+Use `default_outcome` to handle cases where users press Enter without typing:
+
+```python Code
+@human_feedback(
+ message="Approve? (press Enter to request revision)",
+ emit=["approved", "needs_revision"],
+ llm="gpt-4o-mini",
+ default_outcome="needs_revision", # Safe default
+)
+```
+
+### 4. Use Feedback History for Audit Trails
+
+Access `human_feedback_history` to create audit logs:
+
+```python Code
+@listen(final_step)
+def create_audit_log(self):
+ log = []
+ for fb in self.human_feedback_history:
+ log.append({
+ "step": fb.method_name,
+ "outcome": fb.outcome,
+ "feedback": fb.feedback,
+ "timestamp": fb.timestamp.isoformat(),
+ })
+ return log
+```
+
+### 5. Handle Both Routed and Non-Routed Feedback
+
+When designing flows, consider whether you need routing:
+
+| Scenario | Use |
+|----------|-----|
+| Simple review, just need the feedback text | No `emit` |
+| Need to branch to different paths based on response | Use `emit` |
+| Approval gates with approve/reject/revise | Use `emit` |
+| Collecting comments for logging only | No `emit` |
+
+## Async Human Feedback (Non-Blocking)
+
+By default, `@human_feedback` blocks execution waiting for console input. For production applications, you may need **async/non-blocking** feedback that integrates with external systems like Slack, email, webhooks, or APIs.
+
+### The Provider Abstraction
+
+Use the `provider` parameter to specify a custom feedback collection strategy:
+
+```python Code
+from crewai.flow import Flow, start, human_feedback, HumanFeedbackProvider, HumanFeedbackPending, PendingFeedbackContext
+
+class WebhookProvider(HumanFeedbackProvider):
+ """Provider that pauses flow and waits for webhook callback."""
+
+ def __init__(self, webhook_url: str):
+ self.webhook_url = webhook_url
+
+ def request_feedback(self, context: PendingFeedbackContext, flow: Flow) -> str:
+ # Notify external system (e.g., send Slack message, create ticket)
+ self.send_notification(context)
+
+ # Pause execution - framework handles persistence automatically
+ raise HumanFeedbackPending(
+ context=context,
+ callback_info={"webhook_url": f"{self.webhook_url}/{context.flow_id}"}
+ )
+
+class ReviewFlow(Flow):
+ @start()
+ @human_feedback(
+ message="Review this content:",
+ emit=["approved", "rejected"],
+ llm="gpt-4o-mini",
+ provider=WebhookProvider("https://myapp.com/api"),
+ )
+ def generate_content(self):
+ return "AI-generated content..."
+
+ @listen("approved")
+ def publish(self, result):
+ return "Published!"
+```
+
+
+The flow framework **automatically persists state** when `HumanFeedbackPending` is raised. Your provider only needs to notify the external system and raise the exceptionโno manual persistence calls required.
+
+
+### Handling Paused Flows
+
+When using an async provider, `kickoff()` returns a `HumanFeedbackPending` object instead of raising an exception:
+
+```python Code
+flow = ReviewFlow()
+result = flow.kickoff()
+
+if isinstance(result, HumanFeedbackPending):
+ # Flow is paused, state is automatically persisted
+ print(f"Waiting for feedback at: {result.callback_info['webhook_url']}")
+ print(f"Flow ID: {result.context.flow_id}")
+else:
+ # Normal completion
+ print(f"Flow completed: {result}")
+```
+
+### Resuming a Paused Flow
+
+When feedback arrives (e.g., via webhook), resume the flow:
+
+```python Code
+# Sync handler:
+def handle_feedback_webhook(flow_id: str, feedback: str):
+ flow = ReviewFlow.from_pending(flow_id)
+ result = flow.resume(feedback)
+ return result
+
+# Async handler (FastAPI, aiohttp, etc.):
+async def handle_feedback_webhook(flow_id: str, feedback: str):
+ flow = ReviewFlow.from_pending(flow_id)
+ result = await flow.resume_async(feedback)
+ return result
+```
+
+### Key Types
+
+| Type | Description |
+|------|-------------|
+| `HumanFeedbackProvider` | Protocol for custom feedback providers |
+| `PendingFeedbackContext` | Contains all info needed to resume a paused flow |
+| `HumanFeedbackPending` | Returned by `kickoff()` when flow is paused for feedback |
+| `ConsoleProvider` | Default blocking console input provider |
+
+### PendingFeedbackContext
+
+The context contains everything needed to resume:
+
+```python Code
+@dataclass
+class PendingFeedbackContext:
+ flow_id: str # Unique identifier for this flow execution
+ flow_class: str # Fully qualified class name
+ method_name: str # Method that triggered feedback
+ method_output: Any # Output shown to the human
+ message: str # The request message
+ emit: list[str] | None # Possible outcomes for routing
+ default_outcome: str | None
+ metadata: dict # Custom metadata
+ llm: str | None # LLM for outcome collapsing
+ requested_at: datetime
+```
+
+### Complete Async Flow Example
+
+```python Code
+from crewai.flow import (
+ Flow, start, listen, human_feedback,
+ HumanFeedbackProvider, HumanFeedbackPending, PendingFeedbackContext
+)
+
+class SlackNotificationProvider(HumanFeedbackProvider):
+ """Provider that sends Slack notifications and pauses for async feedback."""
+
+ def __init__(self, channel: str):
+ self.channel = channel
+
+ def request_feedback(self, context: PendingFeedbackContext, flow: Flow) -> str:
+ # Send Slack notification (implement your own)
+ slack_thread_id = self.post_to_slack(
+ channel=self.channel,
+ message=f"Review needed:\n\n{context.method_output}\n\n{context.message}",
+ )
+
+ # Pause execution - framework handles persistence automatically
+ raise HumanFeedbackPending(
+ context=context,
+ callback_info={
+ "slack_channel": self.channel,
+ "thread_id": slack_thread_id,
+ }
+ )
+
+class ContentPipeline(Flow):
+ @start()
+ @human_feedback(
+ message="Approve this content for publication?",
+ emit=["approved", "rejected", "needs_revision"],
+ llm="gpt-4o-mini",
+ default_outcome="needs_revision",
+ provider=SlackNotificationProvider("#content-reviews"),
+ )
+ def generate_content(self):
+ return "AI-generated blog post content..."
+
+ @listen("approved")
+ def publish(self, result):
+ print(f"Publishing! Reviewer said: {result.feedback}")
+ return {"status": "published"}
+
+ @listen("rejected")
+ def archive(self, result):
+ print(f"Archived. Reason: {result.feedback}")
+ return {"status": "archived"}
+
+ @listen("needs_revision")
+ def queue_revision(self, result):
+ print(f"Queued for revision: {result.feedback}")
+ return {"status": "revision_needed"}
+
+
+# Starting the flow (will pause and wait for Slack response)
+def start_content_pipeline():
+ flow = ContentPipeline()
+ result = flow.kickoff()
+
+ if isinstance(result, HumanFeedbackPending):
+ return {"status": "pending", "flow_id": result.context.flow_id}
+
+ return result
+
+
+# Resuming when Slack webhook fires (sync handler)
+def on_slack_feedback(flow_id: str, slack_message: str):
+ flow = ContentPipeline.from_pending(flow_id)
+ result = flow.resume(slack_message)
+ return result
+
+
+# If your handler is async (FastAPI, aiohttp, Slack Bolt async, etc.)
+async def on_slack_feedback_async(flow_id: str, slack_message: str):
+ flow = ContentPipeline.from_pending(flow_id)
+ result = await flow.resume_async(slack_message)
+ return result
+```
+
+
+If you're using an async web framework (FastAPI, aiohttp, Slack Bolt async mode), use `await flow.resume_async()` instead of `flow.resume()`. Calling `resume()` from within a running event loop will raise a `RuntimeError`.
+
+
+### Best Practices for Async Feedback
+
+1. **Check the return type**: `kickoff()` returns `HumanFeedbackPending` when pausedโno try/except needed
+2. **Use the right resume method**: Use `resume()` in sync code, `await resume_async()` in async code
+3. **Store callback info**: Use `callback_info` to store webhook URLs, ticket IDs, etc.
+4. **Implement idempotency**: Your resume handler should be idempotent for safety
+5. **Automatic persistence**: State is automatically saved when `HumanFeedbackPending` is raised and uses `SQLiteFlowPersistence` by default
+6. **Custom persistence**: Pass a custom persistence instance to `from_pending()` if needed
+
+## Related Documentation
+
+- [Flows Overview](/en/concepts/flows) - Learn about CrewAI Flows
+- [Flow State Management](/en/guides/flows/mastering-flow-state) - Managing state in flows
+- [Flow Persistence](/en/concepts/flows#persistence) - Persisting flow state
+- [Routing with @router](/en/concepts/flows#router) - More about conditional routing
+- [Human Input on Execution](/en/learn/human-input-on-execution) - Task-level human input
diff --git a/docs/en/learn/human-in-the-loop.mdx b/docs/en/learn/human-in-the-loop.mdx
index f1413aec8..7a74ebb08 100644
--- a/docs/en/learn/human-in-the-loop.mdx
+++ b/docs/en/learn/human-in-the-loop.mdx
@@ -5,9 +5,22 @@ icon: "user-check"
mode: "wide"
---
-Human-in-the-Loop (HITL) is a powerful approach that combines artificial intelligence with human expertise to enhance decision-making and improve task outcomes. This guide shows you how to implement HITL within CrewAI.
+Human-in-the-Loop (HITL) is a powerful approach that combines artificial intelligence with human expertise to enhance decision-making and improve task outcomes. CrewAI provides multiple ways to implement HITL depending on your needs.
-## Setting Up HITL Workflows
+## Choosing Your HITL Approach
+
+CrewAI offers two main approaches for implementing human-in-the-loop workflows:
+
+| Approach | Best For | Integration |
+|----------|----------|-------------|
+| **Flow-based** (`@human_feedback` decorator) | Local development, console-based review, synchronous workflows | [Human Feedback in Flows](/en/learn/human-feedback-in-flows) |
+| **Webhook-based** (Enterprise) | Production deployments, async workflows, external integrations (Slack, Teams, etc.) | This guide |
+
+
+If you're building flows and want to add human review steps with routing based on feedback, check out the [Human Feedback in Flows](/en/learn/human-feedback-in-flows) guide for the `@human_feedback` decorator.
+
+
+## Setting Up Webhook-Based HITL Workflows
diff --git a/docs/ko/concepts/flows.mdx b/docs/ko/concepts/flows.mdx
index 11caea5f3..328211e85 100644
--- a/docs/ko/concepts/flows.mdx
+++ b/docs/ko/concepts/flows.mdx
@@ -565,6 +565,55 @@ Fourth method running
์ด Flow๋ฅผ ์คํํ๋ฉด, `start_method`์์ ์์ฑ๋ ๋๋ค ๋ถ๋ฆฌ์ธ ๊ฐ์ ๋ฐ๋ผ ์ถ๋ ฅ๊ฐ์ด ๋ฌ๋ผ์ง๋๋ค.
+### Human in the Loop (์ธ๊ฐ ํผ๋๋ฐฑ)
+
+`@human_feedback` ๋ฐ์ฝ๋ ์ดํฐ๋ ์ธ๊ฐ์ ํผ๋๋ฐฑ์ ์์งํ๊ธฐ ์ํด ํ๋ก์ฐ ์คํ์ ์ผ์ ์ค์งํ๋ human-in-the-loop ์ํฌํ๋ก์ฐ๋ฅผ ๊ฐ๋ฅํ๊ฒ ํฉ๋๋ค. ์ด๋ ์น์ธ ๊ฒ์ดํธ, ํ์ง ๊ฒํ , ์ธ๊ฐ์ ํ๋จ์ด ํ์ํ ๊ฒฐ์ ์ง์ ์ ์ ์ฉํฉ๋๋ค.
+
+```python Code
+from crewai.flow.flow import Flow, start, listen
+from crewai.flow.human_feedback import human_feedback, HumanFeedbackResult
+
+class ReviewFlow(Flow):
+ @start()
+ @human_feedback(
+ message="์ด ์ฝํ
์ธ ๋ฅผ ์น์ธํ์๊ฒ ์ต๋๊น?",
+ emit=["approved", "rejected", "needs_revision"],
+ llm="gpt-4o-mini",
+ default_outcome="needs_revision",
+ )
+ def generate_content(self):
+ return "๊ฒํ ํ ์ฝํ
์ธ ..."
+
+ @listen("approved")
+ def on_approval(self, result: HumanFeedbackResult):
+ print(f"์น์ธ๋จ! ํผ๋๋ฐฑ: {result.feedback}")
+
+ @listen("rejected")
+ def on_rejection(self, result: HumanFeedbackResult):
+ print(f"๊ฑฐ๋ถ๋จ. ์ด์ : {result.feedback}")
+```
+
+`emit`์ด ์ง์ ๋๋ฉด, ์ธ๊ฐ์ ์์ ํ์ ํผ๋๋ฐฑ์ด LLM์ ์ํด ํด์๋์ด ์ง์ ๋ outcome ์ค ํ๋๋ก ๋งคํ๋๊ณ , ํด๋น `@listen` ๋ฐ์ฝ๋ ์ดํฐ๋ฅผ ํธ๋ฆฌ๊ฑฐํฉ๋๋ค.
+
+๋ผ์ฐํ
์์ด ๋จ์ํ ํผ๋๋ฐฑ๋ง ์์งํ ์๋ ์์ต๋๋ค:
+
+```python Code
+@start()
+@human_feedback(message="์ด ์ถ๋ ฅ์ ๋ํ ์ฝ๋ฉํธ๊ฐ ์์ผ์ ๊ฐ์?")
+def my_method(self):
+ return "๊ฒํ ํ ์ถ๋ ฅ"
+
+@listen(my_method)
+def next_step(self, result: HumanFeedbackResult):
+ # result.feedback๋ก ํผ๋๋ฐฑ์ ์ ๊ทผ
+ # result.output์ผ๋ก ์๋ ์ถ๋ ฅ์ ์ ๊ทผ
+ pass
+```
+
+ํ๋ก์ฐ ์คํ ์ค ์์ง๋ ๋ชจ๋ ํผ๋๋ฐฑ์ `self.last_human_feedback` (๊ฐ์ฅ ์ต๊ทผ) ๋๋ `self.human_feedback_history` (๋ฆฌ์คํธ ํํ์ ๋ชจ๋ ํผ๋๋ฐฑ)๋ฅผ ํตํด ์ ๊ทผํ ์ ์์ต๋๋ค.
+
+ํ๋ก์ฐ์์์ ์ธ๊ฐ ํผ๋๋ฐฑ์ ๋ํ ์์ ํ ๊ฐ์ด๋๋ ๋น๋๊ธฐ/๋
ผ๋ธ๋กํน ํผ๋๋ฐฑ๊ณผ ์ปค์คํ
ํ๋ก๋ฐ์ด๋(Slack, ์นํ
๋ฑ)๋ฅผ ํฌํจํ์ฌ [Flow์์ ์ธ๊ฐ ํผ๋๋ฐฑ](/ko/learn/human-feedback-in-flows)์ ์ฐธ์กฐํ์ธ์.
+
## ํ๋ก์ฐ์ ์์ด์ ํธ ์ถ๊ฐํ๊ธฐ
์์ด์ ํธ๋ ํ๋ก์ฐ์ ์ํํ๊ฒ ํตํฉํ ์ ์์ผ๋ฉฐ, ๋จ์ํ๊ณ ์ง์ค๋ ์์
์คํ์ด ํ์ํ ๋ ์ ์ฒด Crew์ ๊ฒฝ๋ ๋์์ผ๋ก ํ์ฉ๋ฉ๋๋ค. ์๋๋ ์์ด์ ํธ๋ฅผ ํ๋ก์ฐ ๋ด์์ ์ฌ์ฉํ์ฌ ์์ฅ ์กฐ์ฌ๋ฅผ ์ํํ๋ ์์์
๋๋ค:
diff --git a/docs/ko/learn/human-feedback-in-flows.mdx b/docs/ko/learn/human-feedback-in-flows.mdx
new file mode 100644
index 000000000..89cccde1f
--- /dev/null
+++ b/docs/ko/learn/human-feedback-in-flows.mdx
@@ -0,0 +1,581 @@
+---
+title: Flow์์ ์ธ๊ฐ ํผ๋๋ฐฑ
+description: "@human_feedback ๋ฐ์ฝ๋ ์ดํฐ๋ฅผ ์ฌ์ฉํ์ฌ CrewAI Flow์ ์ธ๊ฐ ํผ๋๋ฐฑ์ ์ง์ ํตํฉํ๋ ๋ฐฉ๋ฒ์ ์์๋ณด์ธ์"
+icon: user-check
+mode: "wide"
+---
+
+## ๊ฐ์
+
+`@human_feedback` ๋ฐ์ฝ๋ ์ดํฐ๋ CrewAI Flow ๋ด์์ ์ง์ human-in-the-loop(HITL) ์ํฌํ๋ก์ฐ๋ฅผ ๊ฐ๋ฅํ๊ฒ ํฉ๋๋ค. Flow ์คํ์ ์ผ์ ์ค์งํ๊ณ , ์ธ๊ฐ์๊ฒ ๊ฒํ ๋ฅผ ์ํด ์ถ๋ ฅ์ ์ ์ํ๊ณ , ํผ๋๋ฐฑ์ ์์งํ๊ณ , ์ ํ์ ์ผ๋ก ํผ๋๋ฐฑ ๊ฒฐ๊ณผ์ ๋ฐ๋ผ ๋ค๋ฅธ ๋ฆฌ์ค๋๋ก ๋ผ์ฐํ
ํ ์ ์์ต๋๋ค.
+
+์ด๋ ํนํ ๋ค์๊ณผ ๊ฐ์ ๊ฒฝ์ฐ์ ์ ์ฉํฉ๋๋ค:
+
+- **ํ์ง ๋ณด์ฆ**: AI๊ฐ ์์ฑํ ์ฝํ
์ธ ๋ฅผ ๋ค์ด์คํธ๋ฆผ์์ ์ฌ์ฉํ๊ธฐ ์ ์ ๊ฒํ
+- **๊ฒฐ์ ๊ฒ์ดํธ**: ์๋ํ๋ ์ํฌํ๋ก์ฐ์์ ์ธ๊ฐ์ด ์ค์ํ ๊ฒฐ์ ์ ๋ด๋ฆฌ๋๋ก ํ์ฉ
+- **์น์ธ ์ํฌํ๋ก์ฐ**: ์น์ธ/๊ฑฐ๋ถ/์์ ํจํด ๊ตฌํ
+- **๋ํํ ๊ฐ์ **: ์ถ๋ ฅ์ ๋ฐ๋ณต์ ์ผ๋ก ๊ฐ์ ํ๊ธฐ ์ํด ํผ๋๋ฐฑ ์์ง
+
+```mermaid
+flowchart LR
+ A[Flow ๋ฉ์๋] --> B[์ถ๋ ฅ ์์ฑ๋จ]
+ B --> C[์ธ๊ฐ์ด ๊ฒํ ]
+ C --> D{ํผ๋๋ฐฑ}
+ D -->|emit ์ง์ ๋จ| E[LLM์ด Outcome์ผ๋ก ๋งคํ]
+ D -->|emit ์์| F[HumanFeedbackResult]
+ E --> G["@listen('approved')"]
+ E --> H["@listen('rejected')"]
+ F --> I[๋ค์ ๋ฆฌ์ค๋]
+```
+
+## ๋น ๋ฅธ ์์
+
+Flow์ ์ธ๊ฐ ํผ๋๋ฐฑ์ ์ถ๊ฐํ๋ ๊ฐ์ฅ ๊ฐ๋จํ ๋ฐฉ๋ฒ์ ๋ค์๊ณผ ๊ฐ์ต๋๋ค:
+
+```python Code
+from crewai.flow.flow import Flow, start, listen
+from crewai.flow.human_feedback import human_feedback
+
+class SimpleReviewFlow(Flow):
+ @start()
+ @human_feedback(message="์ด ์ฝํ
์ธ ๋ฅผ ๊ฒํ ํด ์ฃผ์ธ์:")
+ def generate_content(self):
+ return "๊ฒํ ๊ฐ ํ์ํ AI ์์ฑ ์ฝํ
์ธ ์
๋๋ค."
+
+ @listen(generate_content)
+ def process_feedback(self, result):
+ print(f"์ฝํ
์ธ : {result.output}")
+ print(f"์ธ๊ฐ์ ์๊ฒฌ: {result.feedback}")
+
+flow = SimpleReviewFlow()
+flow.kickoff()
+```
+
+์ด Flow๋ฅผ ์คํํ๋ฉด:
+1. `generate_content`๋ฅผ ์คํํ๊ณ ๋ฌธ์์ด์ ๋ฐํํฉ๋๋ค
+2. ์์ฒญ ๋ฉ์์ง์ ํจ๊ป ์ฌ์ฉ์์๊ฒ ์ถ๋ ฅ์ ํ์ํฉ๋๋ค
+3. ์ฌ์ฉ์๊ฐ ํผ๋๋ฐฑ์ ์
๋ ฅํ ๋๊น์ง ๋๊ธฐํฉ๋๋ค (๋๋ Enter๋ฅผ ๋๋ฌ ๊ฑด๋๋๋๋ค)
+4. `HumanFeedbackResult` ๊ฐ์ฒด๋ฅผ `process_feedback`์ ์ ๋ฌํฉ๋๋ค
+
+## @human_feedback ๋ฐ์ฝ๋ ์ดํฐ
+
+### ๋งค๊ฐ๋ณ์
+
+| ๋งค๊ฐ๋ณ์ | ํ์
| ํ์ | ์ค๋ช
|
+|----------|------|------|------|
+| `message` | `str` | ์ | ๋ฉ์๋ ์ถ๋ ฅ๊ณผ ํจ๊ป ์ธ๊ฐ์๊ฒ ํ์๋๋ ๋ฉ์์ง |
+| `emit` | `Sequence[str]` | ์๋์ค | ๊ฐ๋ฅํ outcome ๋ชฉ๋ก. ํผ๋๋ฐฑ์ด ์ด ์ค ํ๋๋ก ๋งคํ๋์ด `@listen` ๋ฐ์ฝ๋ ์ดํฐ๋ฅผ ํธ๋ฆฌ๊ฑฐํฉ๋๋ค |
+| `llm` | `str \| BaseLLM` | `emit` ์ง์ ์ | ํผ๋๋ฐฑ์ ํด์ํ๊ณ outcome์ ๋งคํํ๋ ๋ฐ ์ฌ์ฉ๋๋ LLM |
+| `default_outcome` | `str` | ์๋์ค | ํผ๋๋ฐฑ์ด ์ ๊ณต๋์ง ์์ ๋ ์ฌ์ฉํ outcome. `emit`์ ์์ด์ผ ํฉ๋๋ค |
+| `metadata` | `dict` | ์๋์ค | ์ํฐํ๋ผ์ด์ฆ ํตํฉ์ ์ํ ์ถ๊ฐ ๋ฐ์ดํฐ |
+| `provider` | `HumanFeedbackProvider` | ์๋์ค | ๋น๋๊ธฐ/๋
ผ๋ธ๋กํน ํผ๋๋ฐฑ์ ์ํ ์ปค์คํ
ํ๋ก๋ฐ์ด๋. [๋น๋๊ธฐ ์ธ๊ฐ ํผ๋๋ฐฑ](#๋น๋๊ธฐ-์ธ๊ฐ-ํผ๋๋ฐฑ-๋
ผ๋ธ๋กํน) ์ฐธ์กฐ |
+
+### ๊ธฐ๋ณธ ์ฌ์ฉ๋ฒ (๋ผ์ฐํ
์์)
+
+`emit`์ ์ง์ ํ์ง ์์ผ๋ฉด, ๋ฐ์ฝ๋ ์ดํฐ๋ ๋จ์ํ ํผ๋๋ฐฑ์ ์์งํ๊ณ ๋ค์ ๋ฆฌ์ค๋์ `HumanFeedbackResult`๋ฅผ ์ ๋ฌํฉ๋๋ค:
+
+```python Code
+@start()
+@human_feedback(message="์ด ๋ถ์์ ๋ํด ์ด๋ป๊ฒ ์๊ฐํ์๋์?")
+def analyze_data(self):
+ return "๋ถ์ ๊ฒฐ๊ณผ: ๋งค์ถ 15% ์ฆ๊ฐ, ๋น์ฉ 8% ๊ฐ์"
+
+@listen(analyze_data)
+def handle_feedback(self, result):
+ # result๋ HumanFeedbackResult์
๋๋ค
+ print(f"๋ถ์: {result.output}")
+ print(f"ํผ๋๋ฐฑ: {result.feedback}")
+```
+
+### emit์ ์ฌ์ฉํ ๋ผ์ฐํ
+
+`emit`์ ์ง์ ํ๋ฉด, ๋ฐ์ฝ๋ ์ดํฐ๋ ๋ผ์ฐํฐ๊ฐ ๋ฉ๋๋ค. ์ธ๊ฐ์ ์์ ํ์ ํผ๋๋ฐฑ์ด LLM์ ์ํด ํด์๋์ด ์ง์ ๋ outcome ์ค ํ๋๋ก ๋งคํ๋ฉ๋๋ค:
+
+```python Code
+@start()
+@human_feedback(
+ message="์ด ์ฝํ
์ธ ์ ์ถํ์ ์น์ธํ์๊ฒ ์ต๋๊น?",
+ emit=["approved", "rejected", "needs_revision"],
+ llm="gpt-4o-mini",
+ default_outcome="needs_revision",
+)
+def review_content(self):
+ return "๋ธ๋ก๊ทธ ๊ฒ์๋ฌผ ์ด์ ๋ด์ฉ..."
+
+@listen("approved")
+def publish(self, result):
+ print(f"์ถํ ์ค! ์ฌ์ฉ์ ์๊ฒฌ: {result.feedback}")
+
+@listen("rejected")
+def discard(self, result):
+ print(f"ํ๊ธฐ๋จ. ์ด์ : {result.feedback}")
+
+@listen("needs_revision")
+def revise(self, result):
+ print(f"๋ค์์ ๊ธฐ๋ฐ์ผ๋ก ์์ ์ค: {result.feedback}")
+```
+
+
+LLM์ ๊ฐ๋ฅํ ๊ฒฝ์ฐ ๊ตฌ์กฐํ๋ ์ถ๋ ฅ(function calling)์ ์ฌ์ฉํ์ฌ ์๋ต์ด ์ง์ ๋ outcome ์ค ํ๋์์ ๋ณด์ฅํฉ๋๋ค. ์ด๋ก ์ธํด ๋ผ์ฐํ
์ด ์ ๋ขฐํ ์ ์๊ณ ์์ธก ๊ฐ๋ฅํด์ง๋๋ค.
+
+
+## HumanFeedbackResult
+
+`HumanFeedbackResult` ๋ฐ์ดํฐํด๋์ค๋ ์ธ๊ฐ ํผ๋๋ฐฑ ์ํธ์์ฉ์ ๋ํ ๋ชจ๋ ์ ๋ณด๋ฅผ ํฌํจํฉ๋๋ค:
+
+```python Code
+from crewai.flow.human_feedback import HumanFeedbackResult
+
+@dataclass
+class HumanFeedbackResult:
+ output: Any # ์ธ๊ฐ์๊ฒ ํ์๋ ์๋ ๋ฉ์๋ ์ถ๋ ฅ
+ feedback: str # ์ธ๊ฐ์ ์์ ํผ๋๋ฐฑ ํ
์คํธ
+ outcome: str | None # ๋งคํ๋ outcome (emit์ด ์ง์ ๋ ๊ฒฝ์ฐ)
+ timestamp: datetime # ํผ๋๋ฐฑ์ด ์์ ๋ ์๊ฐ
+ method_name: str # ๋ฐ์ฝ๋ ์ดํฐ๋ ๋ฉ์๋์ ์ด๋ฆ
+ metadata: dict # ๋ฐ์ฝ๋ ์ดํฐ์ ์ ๋ฌ๋ ๋ชจ๋ ๋ฉํ๋ฐ์ดํฐ
+```
+
+### ๋ฆฌ์ค๋์์ ์ ๊ทผํ๊ธฐ
+
+`emit`์ด ์๋ `@human_feedback` ๋ฉ์๋์ ์ํด ๋ฆฌ์ค๋๊ฐ ํธ๋ฆฌ๊ฑฐ๋๋ฉด, `HumanFeedbackResult`๋ฅผ ๋ฐ์ต๋๋ค:
+
+```python Code
+@listen("approved")
+def on_approval(self, result: HumanFeedbackResult):
+ print(f"์๋ ์ถ๋ ฅ: {result.output}")
+ print(f"์ฌ์ฉ์ ํผ๋๋ฐฑ: {result.feedback}")
+ print(f"Outcome: {result.outcome}") # "approved"
+ print(f"์์ ์๊ฐ: {result.timestamp}")
+```
+
+## ํผ๋๋ฐฑ ํ์คํ ๋ฆฌ ์ ๊ทผํ๊ธฐ
+
+`Flow` ํด๋์ค๋ ์ธ๊ฐ ํผ๋๋ฐฑ์ ์ ๊ทผํ๊ธฐ ์ํ ๋ ๊ฐ์ง ์์ฑ์ ์ ๊ณตํฉ๋๋ค:
+
+### last_human_feedback
+
+๊ฐ์ฅ ์ต๊ทผ์ `HumanFeedbackResult`๋ฅผ ๋ฐํํฉ๋๋ค:
+
+```python Code
+@listen(some_method)
+def check_feedback(self):
+ if self.last_human_feedback:
+ print(f"๋ง์ง๋ง ํผ๋๋ฐฑ: {self.last_human_feedback.feedback}")
+```
+
+### human_feedback_history
+
+Flow ๋์ ์์ง๋ ๋ชจ๋ `HumanFeedbackResult` ๊ฐ์ฒด์ ๋ฆฌ์คํธ์
๋๋ค:
+
+```python Code
+@listen(final_step)
+def summarize(self):
+ print(f"์์ง๋ ์ด ํผ๋๋ฐฑ: {len(self.human_feedback_history)}")
+ for i, fb in enumerate(self.human_feedback_history):
+ print(f"{i+1}. {fb.method_name}: {fb.outcome or '๋ผ์ฐํ
์์'}")
+```
+
+
+๊ฐ `HumanFeedbackResult`๋ `human_feedback_history`์ ์ถ๊ฐ๋๋ฏ๋ก, ์ฌ๋ฌ ํผ๋๋ฐฑ ๋จ๊ณ๊ฐ ์๋ก ๋ฎ์ด์ฐ์ง ์์ต๋๋ค. ์ด ๋ฆฌ์คํธ๋ฅผ ์ฌ์ฉํ์ฌ Flow ๋์ ์์ง๋ ๋ชจ๋ ํผ๋๋ฐฑ์ ์ ๊ทผํ์ธ์.
+
+
+## ์์ ํ ์์ : ์ฝํ
์ธ ์น์ธ ์ํฌํ๋ก์ฐ
+
+์ฝํ
์ธ ๊ฒํ ๋ฐ ์น์ธ ์ํฌํ๋ก์ฐ๋ฅผ ๊ตฌํํ๋ ์ ์ฒด ์์ ์
๋๋ค:
+
+
+
+```python Code
+from crewai.flow.flow import Flow, start, listen
+from crewai.flow.human_feedback import human_feedback, HumanFeedbackResult
+from pydantic import BaseModel
+
+
+class ContentState(BaseModel):
+ topic: str = ""
+ draft: str = ""
+ final_content: str = ""
+ revision_count: int = 0
+
+
+class ContentApprovalFlow(Flow[ContentState]):
+ """์ฝํ
์ธ ๋ฅผ ์์ฑํ๊ณ ์ธ๊ฐ์ ์น์ธ์ ๋ฐ๋ Flow์
๋๋ค."""
+
+ @start()
+ def get_topic(self):
+ self.state.topic = input("์ด๋ค ์ฃผ์ ์ ๋ํด ๊ธ์ ์ธ๊น์? ")
+ return self.state.topic
+
+ @listen(get_topic)
+ def generate_draft(self, topic):
+ # ์ค์ ์ฌ์ฉ์์๋ LLM์ ํธ์ถํฉ๋๋ค
+ self.state.draft = f"# {topic}\n\n{topic}์ ๋ํ ์ด์์
๋๋ค..."
+ return self.state.draft
+
+ @listen(generate_draft)
+ @human_feedback(
+ message="์ด ์ด์์ ๊ฒํ ํด ์ฃผ์ธ์. 'approved', 'rejected'๋ก ๋ตํ๊ฑฐ๋ ์์ ํผ๋๋ฐฑ์ ์ ๊ณตํด ์ฃผ์ธ์:",
+ emit=["approved", "rejected", "needs_revision"],
+ llm="gpt-4o-mini",
+ default_outcome="needs_revision",
+ )
+ def review_draft(self, draft):
+ return draft
+
+ @listen("approved")
+ def publish_content(self, result: HumanFeedbackResult):
+ self.state.final_content = result.output
+ print("\nโ
์ฝํ
์ธ ๊ฐ ์น์ธ๋์ด ์ถํ๋์์ต๋๋ค!")
+ print(f"๊ฒํ ์ ์ฝ๋ฉํธ: {result.feedback}")
+ return "published"
+
+ @listen("rejected")
+ def handle_rejection(self, result: HumanFeedbackResult):
+ print("\nโ ์ฝํ
์ธ ๊ฐ ๊ฑฐ๋ถ๋์์ต๋๋ค")
+ print(f"์ด์ : {result.feedback}")
+ return "rejected"
+
+ @listen("needs_revision")
+ def revise_content(self, result: HumanFeedbackResult):
+ self.state.revision_count += 1
+ print(f"\n๐ ์์ #{self.state.revision_count} ์์ฒญ๋จ")
+ print(f"ํผ๋๋ฐฑ: {result.feedback}")
+
+ # ์ค์ Flow์์๋ generate_draft๋ก ๋์๊ฐ ์ ์์ต๋๋ค
+ # ์ด ์์ ์์๋ ๋จ์ํ ํ์ธํฉ๋๋ค
+ return "revision_requested"
+
+
+# Flow ์คํ
+flow = ContentApprovalFlow()
+result = flow.kickoff()
+print(f"\nFlow ์๋ฃ. ์์ฒญ๋ ์์ : {flow.state.revision_count}")
+```
+
+```text Output
+์ด๋ค ์ฃผ์ ์ ๋ํด ๊ธ์ ์ธ๊น์? AI ์์
+
+==================================================
+OUTPUT FOR REVIEW:
+==================================================
+# AI ์์
+
+AI ์์ ์ ๋ํ ์ด์์
๋๋ค...
+==================================================
+
+์ด ์ด์์ ๊ฒํ ํด ์ฃผ์ธ์. 'approved', 'rejected'๋ก ๋ตํ๊ฑฐ๋ ์์ ํผ๋๋ฐฑ์ ์ ๊ณตํด ์ฃผ์ธ์:
+(Press Enter to skip, or type your feedback)
+
+Your feedback: ์ข์ ๋ณด์
๋๋ค, ์น์ธ!
+
+โ
์ฝํ
์ธ ๊ฐ ์น์ธ๋์ด ์ถํ๋์์ต๋๋ค!
+๊ฒํ ์ ์ฝ๋ฉํธ: ์ข์ ๋ณด์
๋๋ค, ์น์ธ!
+
+Flow ์๋ฃ. ์์ฒญ๋ ์์ : 0
+```
+
+
+
+## ๋ค๋ฅธ ๋ฐ์ฝ๋ ์ดํฐ์ ๊ฒฐํฉํ๊ธฐ
+
+`@human_feedback` ๋ฐ์ฝ๋ ์ดํฐ๋ ๋ค๋ฅธ Flow ๋ฐ์ฝ๋ ์ดํฐ์ ํจ๊ป ์๋ํฉ๋๋ค. ๊ฐ์ฅ ์์ชฝ ๋ฐ์ฝ๋ ์ดํฐ(ํจ์์ ๊ฐ์ฅ ๊ฐ๊น์ด)๋ก ๋ฐฐ์นํ์ธ์:
+
+```python Code
+# ์ฌ๋ฐ๋ฆ: @human_feedback์ด ๊ฐ์ฅ ์์ชฝ(ํจ์์ ๊ฐ์ฅ ๊ฐ๊น์)
+@start()
+@human_feedback(message="์ด๊ฒ์ ๊ฒํ ํด ์ฃผ์ธ์:")
+def my_start_method(self):
+ return "content"
+
+@listen(other_method)
+@human_feedback(message="์ด๊ฒ๋ ๊ฒํ ํด ์ฃผ์ธ์:")
+def my_listener(self, data):
+ return f"processed: {data}"
+```
+
+
+`@human_feedback`๋ฅผ ๊ฐ์ฅ ์์ชฝ ๋ฐ์ฝ๋ ์ดํฐ(๋ง์ง๋ง/ํจ์์ ๊ฐ์ฅ ๊ฐ๊น์)๋ก ๋ฐฐ์นํ์ฌ ๋ฉ์๋๋ฅผ ์ง์ ๋ํํ๊ณ Flow ์์คํ
์ ์ ๋ฌํ๊ธฐ ์ ์ ๋ฐํ ๊ฐ์ ์บก์ฒํ ์ ์๋๋ก ํ์ธ์.
+
+
+## ๋ชจ๋ฒ ์ฌ๋ก
+
+### 1. ๋ช
ํํ ์์ฒญ ๋ฉ์์ง ์์ฑ
+
+`message` ๋งค๊ฐ๋ณ์๋ ์ธ๊ฐ์ด ๋ณด๋ ๊ฒ์
๋๋ค. ์คํ ๊ฐ๋ฅํ๊ฒ ๋ง๋์ธ์:
+
+```python Code
+# โ
์ข์ - ๋ช
ํํ๊ณ ์คํ ๊ฐ๋ฅ
+@human_feedback(message="์ด ์์ฝ์ด ํต์ฌ ํฌ์ธํธ๋ฅผ ์ ํํ๊ฒ ์บก์ฒํ๋์? '์'๋ก ๋ตํ๊ฑฐ๋ ๋ฌด์์ด ๋น ์ก๋์ง ์ค๋ช
ํด ์ฃผ์ธ์:")
+
+# โ ๋์จ - ๋ชจํธํจ
+@human_feedback(message="์ด๊ฒ์ ๊ฒํ ํด ์ฃผ์ธ์:")
+```
+
+### 2. ์๋ฏธ ์๋ Outcome ์ ํ
+
+`emit`์ ์ฌ์ฉํ ๋, ์ธ๊ฐ์ ์๋ต์ ์์ฐ์ค๋ฝ๊ฒ ๋งคํ๋๋ outcome์ ์ ํํ์ธ์:
+
+```python Code
+# โ
์ข์ - ์์ฐ์ด outcome
+emit=["approved", "rejected", "needs_more_detail"]
+
+# โ ๋์จ - ๊ธฐ์ ์ ์ด๊ฑฐ๋ ๋ถ๋ช
ํ
+emit=["state_1", "state_2", "state_3"]
+```
+
+### 3. ํญ์ ๊ธฐ๋ณธ Outcome ์ ๊ณต
+
+์ฌ์ฉ์๊ฐ ์
๋ ฅ ์์ด Enter๋ฅผ ๋๋ฅด๋ ๊ฒฝ์ฐ๋ฅผ ์ฒ๋ฆฌํ๊ธฐ ์ํด `default_outcome`์ ์ฌ์ฉํ์ธ์:
+
+```python Code
+@human_feedback(
+ message="์น์ธํ์๊ฒ ์ต๋๊น? (์์ ์์ฒญํ๋ ค๋ฉด Enter ๋๋ฅด์ธ์)",
+ emit=["approved", "needs_revision"],
+ llm="gpt-4o-mini",
+ default_outcome="needs_revision", # ์์ ํ ๊ธฐ๋ณธ๊ฐ
+)
+```
+
+### 4. ๊ฐ์ฌ ์ถ์ ์ ์ํ ํผ๋๋ฐฑ ํ์คํ ๋ฆฌ ์ฌ์ฉ
+
+๊ฐ์ฌ ๋ก๊ทธ๋ฅผ ์์ฑํ๊ธฐ ์ํด `human_feedback_history`์ ์ ๊ทผํ์ธ์:
+
+```python Code
+@listen(final_step)
+def create_audit_log(self):
+ log = []
+ for fb in self.human_feedback_history:
+ log.append({
+ "step": fb.method_name,
+ "outcome": fb.outcome,
+ "feedback": fb.feedback,
+ "timestamp": fb.timestamp.isoformat(),
+ })
+ return log
+```
+
+### 5. ๋ผ์ฐํ
๋ ํผ๋๋ฐฑ๊ณผ ๋ผ์ฐํ
๋์ง ์์ ํผ๋๋ฐฑ ๋ชจ๋ ์ฒ๋ฆฌ
+
+Flow๋ฅผ ์ค๊ณํ ๋, ๋ผ์ฐํ
์ด ํ์ํ์ง ๊ณ ๋ คํ์ธ์:
+
+| ์๋๋ฆฌ์ค | ์ฌ์ฉ |
+|----------|------|
+| ๊ฐ๋จํ ๊ฒํ , ํผ๋๋ฐฑ ํ
์คํธ๋ง ํ์ | `emit` ์์ |
+| ์๋ต์ ๋ฐ๋ผ ๋ค๋ฅธ ๊ฒฝ๋ก๋ก ๋ถ๊ธฐ ํ์ | `emit` ์ฌ์ฉ |
+| ์น์ธ/๊ฑฐ๋ถ/์์ ์ด ์๋ ์น์ธ ๊ฒ์ดํธ | `emit` ์ฌ์ฉ |
+| ๋ก๊น
๋ง์ ์ํ ์ฝ๋ฉํธ ์์ง | `emit` ์์ |
+
+## ๋น๋๊ธฐ ์ธ๊ฐ ํผ๋๋ฐฑ (๋
ผ๋ธ๋กํน)
+
+๊ธฐ๋ณธ์ ์ผ๋ก `@human_feedback`์ ์ฝ์ ์
๋ ฅ์ ๊ธฐ๋ค๋ฆฌ๋ฉฐ ์คํ์ ์ฐจ๋จํฉ๋๋ค. ํ๋ก๋์
์ ํ๋ฆฌ์ผ์ด์
์์๋ Slack, ์ด๋ฉ์ผ, ์นํ
๋๋ API์ ๊ฐ์ ์ธ๋ถ ์์คํ
๊ณผ ํตํฉ๋๋ **๋น๋๊ธฐ/๋
ผ๋ธ๋กํน** ํผ๋๋ฐฑ์ด ํ์ํ ์ ์์ต๋๋ค.
+
+### Provider ์ถ์ํ
+
+์ปค์คํ
ํผ๋๋ฐฑ ์์ง ์ ๋ต์ ์ง์ ํ๋ ค๋ฉด `provider` ๋งค๊ฐ๋ณ์๋ฅผ ์ฌ์ฉํ์ธ์:
+
+```python Code
+from crewai.flow import Flow, start, human_feedback, HumanFeedbackProvider, HumanFeedbackPending, PendingFeedbackContext
+
+class WebhookProvider(HumanFeedbackProvider):
+ """์นํ
์ฝ๋ฐฑ์ ๊ธฐ๋ค๋ฆฌ๋ฉฐ Flow๋ฅผ ์ผ์ ์ค์งํ๋ Provider."""
+
+ def __init__(self, webhook_url: str):
+ self.webhook_url = webhook_url
+
+ def request_feedback(self, context: PendingFeedbackContext, flow: Flow) -> str:
+ # ์ธ๋ถ ์์คํ
์ ์๋ฆผ (์: Slack ๋ฉ์์ง ์ ์ก, ํฐ์ผ ์์ฑ)
+ self.send_notification(context)
+
+ # ์คํ ์ผ์ ์ค์ง - ํ๋ ์์ํฌ๊ฐ ์๋์ผ๋ก ์์์ฑ ์ฒ๋ฆฌ
+ raise HumanFeedbackPending(
+ context=context,
+ callback_info={"webhook_url": f"{self.webhook_url}/{context.flow_id}"}
+ )
+
+class ReviewFlow(Flow):
+ @start()
+ @human_feedback(
+ message="์ด ์ฝํ
์ธ ๋ฅผ ๊ฒํ ํด ์ฃผ์ธ์:",
+ emit=["approved", "rejected"],
+ llm="gpt-4o-mini",
+ provider=WebhookProvider("https://myapp.com/api"),
+ )
+ def generate_content(self):
+ return "AI๊ฐ ์์ฑํ ์ฝํ
์ธ ..."
+
+ @listen("approved")
+ def publish(self, result):
+ return "์ถํ๋จ!"
+```
+
+
+Flow ํ๋ ์์ํฌ๋ `HumanFeedbackPending`์ด ๋ฐ์ํ๋ฉด **์๋์ผ๋ก ์ํ๋ฅผ ์์ํ**ํฉ๋๋ค. Provider๋ ์ธ๋ถ ์์คํ
์ ์๋ฆฌ๊ณ ์์ธ๋ฅผ ๋ฐ์์ํค๊ธฐ๋ง ํ๋ฉด ๋ฉ๋๋คโ์๋ ์์์ฑ ํธ์ถ์ด ํ์ํ์ง ์์ต๋๋ค.
+
+
+### ์ผ์ ์ค์ง๋ Flow ์ฒ๋ฆฌ
+
+๋น๋๊ธฐ provider๋ฅผ ์ฌ์ฉํ๋ฉด `kickoff()`๋ ์์ธ๋ฅผ ๋ฐ์์ํค๋ ๋์ `HumanFeedbackPending` ๊ฐ์ฒด๋ฅผ ๋ฐํํฉ๋๋ค:
+
+```python Code
+flow = ReviewFlow()
+result = flow.kickoff()
+
+if isinstance(result, HumanFeedbackPending):
+ # Flow๊ฐ ์ผ์ ์ค์ง๋จ, ์ํ๊ฐ ์๋์ผ๋ก ์์ํ๋จ
+ print(f"ํผ๋๋ฐฑ ๋๊ธฐ ์ค: {result.callback_info['webhook_url']}")
+ print(f"Flow ID: {result.context.flow_id}")
+else:
+ # ์ ์ ์๋ฃ
+ print(f"Flow ์๋ฃ: {result}")
+```
+
+### ์ผ์ ์ค์ง๋ Flow ์ฌ๊ฐ
+
+ํผ๋๋ฐฑ์ด ๋์ฐฉํ๋ฉด (์: ์นํ
์ ํตํด) Flow๋ฅผ ์ฌ๊ฐํฉ๋๋ค:
+
+```python Code
+# ๋๊ธฐ ํธ๋ค๋ฌ:
+def handle_feedback_webhook(flow_id: str, feedback: str):
+ flow = ReviewFlow.from_pending(flow_id)
+ result = flow.resume(feedback)
+ return result
+
+# ๋น๋๊ธฐ ํธ๋ค๋ฌ (FastAPI, aiohttp ๋ฑ):
+async def handle_feedback_webhook(flow_id: str, feedback: str):
+ flow = ReviewFlow.from_pending(flow_id)
+ result = await flow.resume_async(feedback)
+ return result
+```
+
+### ์ฃผ์ ํ์
+
+| ํ์
| ์ค๋ช
|
+|------|------|
+| `HumanFeedbackProvider` | ์ปค์คํ
ํผ๋๋ฐฑ provider๋ฅผ ์ํ ํ๋กํ ์ฝ |
+| `PendingFeedbackContext` | ์ผ์ ์ค์ง๋ Flow๋ฅผ ์ฌ๊ฐํ๋ ๋ฐ ํ์ํ ๋ชจ๋ ์ ๋ณด ํฌํจ |
+| `HumanFeedbackPending` | Flow๊ฐ ํผ๋๋ฐฑ์ ์ํด ์ผ์ ์ค์ง๋๋ฉด `kickoff()`์์ ๋ฐํ๋จ |
+| `ConsoleProvider` | ๊ธฐ๋ณธ ๋ธ๋กํน ์ฝ์ ์
๋ ฅ provider |
+
+### PendingFeedbackContext
+
+์ปจํ
์คํธ๋ ์ฌ๊ฐ์ ํ์ํ ๋ชจ๋ ๊ฒ์ ํฌํจํฉ๋๋ค:
+
+```python Code
+@dataclass
+class PendingFeedbackContext:
+ flow_id: str # ์ด Flow ์คํ์ ๊ณ ์ ์๋ณ์
+ flow_class: str # ์ ๊ทํ๋ ํด๋์ค ์ด๋ฆ
+ method_name: str # ํผ๋๋ฐฑ์ ํธ๋ฆฌ๊ฑฐํ ๋ฉ์๋
+ method_output: Any # ์ธ๊ฐ์๊ฒ ํ์๋ ์ถ๋ ฅ
+ message: str # ์์ฒญ ๋ฉ์์ง
+ emit: list[str] | None # ๋ผ์ฐํ
์ ์ํ ๊ฐ๋ฅํ outcome
+ default_outcome: str | None
+ metadata: dict # ์ปค์คํ
๋ฉํ๋ฐ์ดํฐ
+ llm: str | None # outcome ๋งคํ์ ์ํ LLM
+ requested_at: datetime
+```
+
+### ์์ ํ ๋น๋๊ธฐ Flow ์์
+
+```python Code
+from crewai.flow import (
+ Flow, start, listen, human_feedback,
+ HumanFeedbackProvider, HumanFeedbackPending, PendingFeedbackContext
+)
+
+class SlackNotificationProvider(HumanFeedbackProvider):
+ """Slack ์๋ฆผ์ ๋ณด๋ด๊ณ ๋น๋๊ธฐ ํผ๋๋ฐฑ์ ์ํด ์ผ์ ์ค์งํ๋ Provider."""
+
+ def __init__(self, channel: str):
+ self.channel = channel
+
+ def request_feedback(self, context: PendingFeedbackContext, flow: Flow) -> str:
+ # Slack ์๋ฆผ ์ ์ก (์ง์ ๊ตฌํ)
+ slack_thread_id = self.post_to_slack(
+ channel=self.channel,
+ message=f"๊ฒํ ํ์:\n\n{context.method_output}\n\n{context.message}",
+ )
+
+ # ์คํ ์ผ์ ์ค์ง - ํ๋ ์์ํฌ๊ฐ ์๋์ผ๋ก ์์์ฑ ์ฒ๋ฆฌ
+ raise HumanFeedbackPending(
+ context=context,
+ callback_info={
+ "slack_channel": self.channel,
+ "thread_id": slack_thread_id,
+ }
+ )
+
+class ContentPipeline(Flow):
+ @start()
+ @human_feedback(
+ message="์ด ์ฝํ
์ธ ์ ์ถํ์ ์น์ธํ์๊ฒ ์ต๋๊น?",
+ emit=["approved", "rejected", "needs_revision"],
+ llm="gpt-4o-mini",
+ default_outcome="needs_revision",
+ provider=SlackNotificationProvider("#content-reviews"),
+ )
+ def generate_content(self):
+ return "AI๊ฐ ์์ฑํ ๋ธ๋ก๊ทธ ๊ฒ์๋ฌผ ์ฝํ
์ธ ..."
+
+ @listen("approved")
+ def publish(self, result):
+ print(f"์ถํ ์ค! ๊ฒํ ์ ์๊ฒฌ: {result.feedback}")
+ return {"status": "published"}
+
+ @listen("rejected")
+ def archive(self, result):
+ print(f"๋ณด๊ด๋จ. ์ด์ : {result.feedback}")
+ return {"status": "archived"}
+
+ @listen("needs_revision")
+ def queue_revision(self, result):
+ print(f"์์ ๋๊ธฐ์ด์ ์ถ๊ฐ๋จ: {result.feedback}")
+ return {"status": "revision_needed"}
+
+
+# Flow ์์ (Slack ์๋ต์ ๊ธฐ๋ค๋ฆฌ๋ฉฐ ์ผ์ ์ค์ง)
+def start_content_pipeline():
+ flow = ContentPipeline()
+ result = flow.kickoff()
+
+ if isinstance(result, HumanFeedbackPending):
+ return {"status": "pending", "flow_id": result.context.flow_id}
+
+ return result
+
+
+# Slack ์นํ
์ด ์คํ๋ ๋ ์ฌ๊ฐ (๋๊ธฐ ํธ๋ค๋ฌ)
+def on_slack_feedback(flow_id: str, slack_message: str):
+ flow = ContentPipeline.from_pending(flow_id)
+ result = flow.resume(slack_message)
+ return result
+
+
+# ํธ๋ค๋ฌ๊ฐ ๋น๋๊ธฐ์ธ ๊ฒฝ์ฐ (FastAPI, aiohttp, Slack Bolt ๋น๋๊ธฐ ๋ฑ)
+async def on_slack_feedback_async(flow_id: str, slack_message: str):
+ flow = ContentPipeline.from_pending(flow_id)
+ result = await flow.resume_async(slack_message)
+ return result
+```
+
+
+๋น๋๊ธฐ ์น ํ๋ ์์ํฌ(FastAPI, aiohttp, Slack Bolt ๋น๋๊ธฐ ๋ชจ๋)๋ฅผ ์ฌ์ฉํ๋ ๊ฒฝ์ฐ `flow.resume()` ๋์ `await flow.resume_async()`๋ฅผ ์ฌ์ฉํ์ธ์. ์คํ ์ค์ธ ์ด๋ฒคํธ ๋ฃจํ ๋ด์์ `resume()`์ ํธ์ถํ๋ฉด `RuntimeError`๊ฐ ๋ฐ์ํฉ๋๋ค.
+
+
+### ๋น๋๊ธฐ ํผ๋๋ฐฑ ๋ชจ๋ฒ ์ฌ๋ก
+
+1. **๋ฐํ ํ์
ํ์ธ**: `kickoff()`๋ ์ผ์ ์ค์ง๋๋ฉด `HumanFeedbackPending`์ ๋ฐํํฉ๋๋คโtry/except๊ฐ ํ์ํ์ง ์์ต๋๋ค
+2. **์ฌ๋ฐ๋ฅธ resume ๋ฉ์๋ ์ฌ์ฉ**: ๋๊ธฐ ์ฝ๋์์๋ `resume()`, ๋น๋๊ธฐ ์ฝ๋์์๋ `await resume_async()` ์ฌ์ฉ
+3. **์ฝ๋ฐฑ ์ ๋ณด ์ ์ฅ**: `callback_info`๋ฅผ ์ฌ์ฉํ์ฌ ์นํ
URL, ํฐ์ผ ID ๋ฑ์ ์ ์ฅ
+4. **๋ฉฑ๋ฑ์ฑ ๊ตฌํ**: ์์ ์ ์ํด resume ํธ๋ค๋ฌ๋ ๋ฉฑ๋ฑํด์ผ ํฉ๋๋ค
+5. **์๋ ์์์ฑ**: `HumanFeedbackPending`์ด ๋ฐ์ํ๋ฉด ์ํ๊ฐ ์๋์ผ๋ก ์ ์ฅ๋๋ฉฐ ๊ธฐ๋ณธ์ ์ผ๋ก `SQLiteFlowPersistence` ์ฌ์ฉ
+6. **์ปค์คํ
์์์ฑ**: ํ์ํ ๊ฒฝ์ฐ `from_pending()`์ ์ปค์คํ
์์์ฑ ์ธ์คํด์ค ์ ๋ฌ
+
+## ๊ด๋ จ ๋ฌธ์
+
+- [Flow ๊ฐ์](/ko/concepts/flows) - CrewAI Flow์ ๋ํด ์์๋ณด๊ธฐ
+- [Flow ์ํ ๊ด๋ฆฌ](/ko/guides/flows/mastering-flow-state) - Flow์์ ์ํ ๊ด๋ฆฌํ๊ธฐ
+- [Flow ์์์ฑ](/ko/concepts/flows#persistence) - Flow ์ํ ์์ํ
+- [@router๋ฅผ ์ฌ์ฉํ ๋ผ์ฐํ
](/ko/concepts/flows#router) - ์กฐ๊ฑด๋ถ ๋ผ์ฐํ
์ ๋ํด ๋ ์์๋ณด๊ธฐ
+- [์คํ ์ ์ธ๊ฐ ์
๋ ฅ](/ko/learn/human-input-on-execution) - ํ์คํฌ ์์ค ์ธ๊ฐ ์
๋ ฅ
diff --git a/docs/pt-BR/concepts/flows.mdx b/docs/pt-BR/concepts/flows.mdx
index c1c8ee695..c32642d6e 100644
--- a/docs/pt-BR/concepts/flows.mdx
+++ b/docs/pt-BR/concepts/flows.mdx
@@ -307,6 +307,55 @@ Os mรฉtodos `third_method` e `fourth_method` escutam a saรญda do `second_method`
Ao executar esse Flow, a saรญda serรก diferente dependendo do valor booleano aleatรณrio gerado pelo `start_method`.
+### Human in the Loop (feedback humano)
+
+O decorador `@human_feedback` permite fluxos de trabalho human-in-the-loop, pausando a execuรงรฃo do flow para coletar feedback de um humano. Isso รฉ รบtil para portรตes de aprovaรงรฃo, revisรฃo de qualidade e pontos de decisรฃo que requerem julgamento humano.
+
+```python Code
+from crewai.flow.flow import Flow, start, listen
+from crewai.flow.human_feedback import human_feedback, HumanFeedbackResult
+
+class ReviewFlow(Flow):
+ @start()
+ @human_feedback(
+ message="Vocรช aprova este conteรบdo?",
+ emit=["approved", "rejected", "needs_revision"],
+ llm="gpt-4o-mini",
+ default_outcome="needs_revision",
+ )
+ def generate_content(self):
+ return "Conteรบdo para revisรฃo..."
+
+ @listen("approved")
+ def on_approval(self, result: HumanFeedbackResult):
+ print(f"Aprovado! Feedback: {result.feedback}")
+
+ @listen("rejected")
+ def on_rejection(self, result: HumanFeedbackResult):
+ print(f"Rejeitado. Motivo: {result.feedback}")
+```
+
+Quando `emit` รฉ especificado, o feedback livre do humano รฉ interpretado por um LLM e mapeado para um dos outcomes especificados, que entรฃo dispara o decorador `@listen` correspondente.
+
+Vocรช tambรฉm pode usar `@human_feedback` sem roteamento para simplesmente coletar feedback:
+
+```python Code
+@start()
+@human_feedback(message="Algum comentรกrio sobre esta saรญda?")
+def my_method(self):
+ return "Saรญda para revisรฃo"
+
+@listen(my_method)
+def next_step(self, result: HumanFeedbackResult):
+ # Acesse o feedback via result.feedback
+ # Acesse a saรญda original via result.output
+ pass
+```
+
+Acesse todo o feedback coletado durante um flow via `self.last_human_feedback` (mais recente) ou `self.human_feedback_history` (todo o feedback em uma lista).
+
+Para um guia completo sobre feedback humano em flows, incluindo feedback assรญncrono/nรฃo-bloqueante com providers customizados (Slack, webhooks, etc.), veja [Feedback Humano em Flows](/pt-BR/learn/human-feedback-in-flows).
+
## Adicionando Agentes aos Flows
Os agentes podem ser integrados facilmente aos seus flows, oferecendo uma alternativa leve ร s crews completas quando vocรช precisar executar tarefas simples e focadas. Veja um exemplo de como utilizar um agente em um flow para realizar uma pesquisa de mercado:
diff --git a/docs/pt-BR/learn/human-feedback-in-flows.mdx b/docs/pt-BR/learn/human-feedback-in-flows.mdx
new file mode 100644
index 000000000..fbe9512dc
--- /dev/null
+++ b/docs/pt-BR/learn/human-feedback-in-flows.mdx
@@ -0,0 +1,581 @@
+---
+title: Feedback Humano em Flows
+description: Aprenda como integrar feedback humano diretamente nos seus CrewAI Flows usando o decorador @human_feedback
+icon: user-check
+mode: "wide"
+---
+
+## Visรฃo Geral
+
+O decorador `@human_feedback` permite fluxos de trabalho human-in-the-loop (HITL) diretamente nos CrewAI Flows. Ele permite pausar a execuรงรฃo do flow, apresentar a saรญda para um humano revisar, coletar seu feedback e, opcionalmente, rotear para diferentes listeners com base no resultado do feedback.
+
+Isso รฉ particularmente valioso para:
+
+- **Garantia de qualidade**: Revisar conteรบdo gerado por IA antes de ser usado downstream
+- **Portรตes de decisรฃo**: Deixar humanos tomarem decisรตes crรญticas em fluxos automatizados
+- **Fluxos de aprovaรงรฃo**: Implementar padrรตes de aprovar/rejeitar/revisar
+- **Refinamento interativo**: Coletar feedback para melhorar saรญdas iterativamente
+
+```mermaid
+flowchart LR
+ A[Mรฉtodo do Flow] --> B[Saรญda Gerada]
+ B --> C[Humano Revisa]
+ C --> D{Feedback}
+ D -->|emit especificado| E[LLM Mapeia para Outcome]
+ D -->|sem emit| F[HumanFeedbackResult]
+ E --> G["@listen('approved')"]
+ E --> H["@listen('rejected')"]
+ F --> I[Prรณximo Listener]
+```
+
+## Inรญcio Rรกpido
+
+Aqui estรก a maneira mais simples de adicionar feedback humano a um flow:
+
+```python Code
+from crewai.flow.flow import Flow, start, listen
+from crewai.flow.human_feedback import human_feedback
+
+class SimpleReviewFlow(Flow):
+ @start()
+ @human_feedback(message="Por favor, revise este conteรบdo:")
+ def generate_content(self):
+ return "Este รฉ um conteรบdo gerado por IA que precisa de revisรฃo."
+
+ @listen(generate_content)
+ def process_feedback(self, result):
+ print(f"Conteรบdo: {result.output}")
+ print(f"Humano disse: {result.feedback}")
+
+flow = SimpleReviewFlow()
+flow.kickoff()
+```
+
+Quando este flow รฉ executado, ele irรก:
+1. Executar `generate_content` e retornar a string
+2. Exibir a saรญda para o usuรกrio com a mensagem de solicitaรงรฃo
+3. Aguardar o usuรกrio digitar o feedback (ou pressionar Enter para pular)
+4. Passar um objeto `HumanFeedbackResult` para `process_feedback`
+
+## O Decorador @human_feedback
+
+### Parรขmetros
+
+| Parรขmetro | Tipo | Obrigatรณrio | Descriรงรฃo |
+|-----------|------|-------------|-----------|
+| `message` | `str` | Sim | A mensagem mostrada ao humano junto com a saรญda do mรฉtodo |
+| `emit` | `Sequence[str]` | Nรฃo | Lista de possรญveis outcomes. O feedback รฉ mapeado para um destes, que dispara decoradores `@listen` |
+| `llm` | `str \| BaseLLM` | Quando `emit` especificado | LLM usado para interpretar o feedback e mapear para um outcome |
+| `default_outcome` | `str` | Nรฃo | Outcome a usar se nenhum feedback for fornecido. Deve estar em `emit` |
+| `metadata` | `dict` | Nรฃo | Dados adicionais para integraรงรตes enterprise |
+| `provider` | `HumanFeedbackProvider` | Nรฃo | Provider customizado para feedback assรญncrono/nรฃo-bloqueante. Veja [Feedback Humano Assรญncrono](#feedback-humano-assรญncrono-nรฃo-bloqueante) |
+
+### Uso Bรกsico (Sem Roteamento)
+
+Quando vocรช nรฃo especifica `emit`, o decorador simplesmente coleta o feedback e passa um `HumanFeedbackResult` para o prรณximo listener:
+
+```python Code
+@start()
+@human_feedback(message="O que vocรช acha desta anรกlise?")
+def analyze_data(self):
+ return "Resultados da anรกlise: Receita aumentou 15%, custos diminuรญram 8%"
+
+@listen(analyze_data)
+def handle_feedback(self, result):
+ # result รฉ um HumanFeedbackResult
+ print(f"Anรกlise: {result.output}")
+ print(f"Feedback: {result.feedback}")
+```
+
+### Roteamento com emit
+
+Quando vocรช especifica `emit`, o decorador se torna um roteador. O feedback livre do humano รฉ interpretado por um LLM e mapeado para um dos outcomes especificados:
+
+```python Code
+@start()
+@human_feedback(
+ message="Vocรช aprova este conteรบdo para publicaรงรฃo?",
+ emit=["approved", "rejected", "needs_revision"],
+ llm="gpt-4o-mini",
+ default_outcome="needs_revision",
+)
+def review_content(self):
+ return "Rascunho do post do blog aqui..."
+
+@listen("approved")
+def publish(self, result):
+ print(f"Publicando! Usuรกrio disse: {result.feedback}")
+
+@listen("rejected")
+def discard(self, result):
+ print(f"Descartando. Motivo: {result.feedback}")
+
+@listen("needs_revision")
+def revise(self, result):
+ print(f"Revisando baseado em: {result.feedback}")
+```
+
+
+O LLM usa saรญdas estruturadas (function calling) quando disponรญvel para garantir que a resposta seja um dos seus outcomes especificados. Isso torna o roteamento confiรกvel e previsรญvel.
+
+
+## HumanFeedbackResult
+
+O dataclass `HumanFeedbackResult` contรฉm todas as informaรงรตes sobre uma interaรงรฃo de feedback humano:
+
+```python Code
+from crewai.flow.human_feedback import HumanFeedbackResult
+
+@dataclass
+class HumanFeedbackResult:
+ output: Any # A saรญda original do mรฉtodo mostrada ao humano
+ feedback: str # O texto bruto do feedback do humano
+ outcome: str | None # O outcome mapeado (se emit foi especificado)
+ timestamp: datetime # Quando o feedback foi recebido
+ method_name: str # Nome do mรฉtodo decorado
+ metadata: dict # Qualquer metadata passado ao decorador
+```
+
+### Acessando em Listeners
+
+Quando um listener รฉ disparado por um mรฉtodo `@human_feedback` com `emit`, ele recebe o `HumanFeedbackResult`:
+
+```python Code
+@listen("approved")
+def on_approval(self, result: HumanFeedbackResult):
+ print(f"Saรญda original: {result.output}")
+ print(f"Feedback do usuรกrio: {result.feedback}")
+ print(f"Outcome: {result.outcome}") # "approved"
+ print(f"Recebido em: {result.timestamp}")
+```
+
+## Acessando o Histรณrico de Feedback
+
+A classe `Flow` fornece dois atributos para acessar o feedback humano:
+
+### last_human_feedback
+
+Retorna o `HumanFeedbackResult` mais recente:
+
+```python Code
+@listen(some_method)
+def check_feedback(self):
+ if self.last_human_feedback:
+ print(f"รltimo feedback: {self.last_human_feedback.feedback}")
+```
+
+### human_feedback_history
+
+Uma lista de todos os objetos `HumanFeedbackResult` coletados durante o flow:
+
+```python Code
+@listen(final_step)
+def summarize(self):
+ print(f"Total de feedbacks coletados: {len(self.human_feedback_history)}")
+ for i, fb in enumerate(self.human_feedback_history):
+ print(f"{i+1}. {fb.method_name}: {fb.outcome or 'sem roteamento'}")
+```
+
+
+Cada `HumanFeedbackResult` รฉ adicionado a `human_feedback_history`, entรฃo mรบltiplos passos de feedback nรฃo sobrescrevem uns aos outros. Use esta lista para acessar todo o feedback coletado durante o flow.
+
+
+## Exemplo Completo: Fluxo de Aprovaรงรฃo de Conteรบdo
+
+Aqui estรก um exemplo completo implementando um fluxo de revisรฃo e aprovaรงรฃo de conteรบdo:
+
+
+
+```python Code
+from crewai.flow.flow import Flow, start, listen
+from crewai.flow.human_feedback import human_feedback, HumanFeedbackResult
+from pydantic import BaseModel
+
+
+class ContentState(BaseModel):
+ topic: str = ""
+ draft: str = ""
+ final_content: str = ""
+ revision_count: int = 0
+
+
+class ContentApprovalFlow(Flow[ContentState]):
+ """Um flow que gera conteรบdo e obtรฉm aprovaรงรฃo humana."""
+
+ @start()
+ def get_topic(self):
+ self.state.topic = input("Sobre qual tรณpico devo escrever? ")
+ return self.state.topic
+
+ @listen(get_topic)
+ def generate_draft(self, topic):
+ # Em uso real, isso chamaria um LLM
+ self.state.draft = f"# {topic}\n\nEste รฉ um rascunho sobre {topic}..."
+ return self.state.draft
+
+ @listen(generate_draft)
+ @human_feedback(
+ message="Por favor, revise este rascunho. Responda 'approved', 'rejected', ou forneรงa feedback de revisรฃo:",
+ emit=["approved", "rejected", "needs_revision"],
+ llm="gpt-4o-mini",
+ default_outcome="needs_revision",
+ )
+ def review_draft(self, draft):
+ return draft
+
+ @listen("approved")
+ def publish_content(self, result: HumanFeedbackResult):
+ self.state.final_content = result.output
+ print("\nโ
Conteรบdo aprovado e publicado!")
+ print(f"Comentรกrio do revisor: {result.feedback}")
+ return "published"
+
+ @listen("rejected")
+ def handle_rejection(self, result: HumanFeedbackResult):
+ print("\nโ Conteรบdo rejeitado")
+ print(f"Motivo: {result.feedback}")
+ return "rejected"
+
+ @listen("needs_revision")
+ def revise_content(self, result: HumanFeedbackResult):
+ self.state.revision_count += 1
+ print(f"\n๐ Revisรฃo #{self.state.revision_count} solicitada")
+ print(f"Feedback: {result.feedback}")
+
+ # Em um flow real, vocรช pode voltar para generate_draft
+ # Para este exemplo, apenas reconhecemos
+ return "revision_requested"
+
+
+# Executar o flow
+flow = ContentApprovalFlow()
+result = flow.kickoff()
+print(f"\nFlow concluรญdo. Revisรตes solicitadas: {flow.state.revision_count}")
+```
+
+```text Output
+Sobre qual tรณpico devo escrever? Seguranรงa em IA
+
+==================================================
+OUTPUT FOR REVIEW:
+==================================================
+# Seguranรงa em IA
+
+Este รฉ um rascunho sobre Seguranรงa em IA...
+==================================================
+
+Por favor, revise este rascunho. Responda 'approved', 'rejected', ou forneรงa feedback de revisรฃo:
+(Press Enter to skip, or type your feedback)
+
+Your feedback: Parece bom, aprovado!
+
+โ
Conteรบdo aprovado e publicado!
+Comentรกrio do revisor: Parece bom, aprovado!
+
+Flow concluรญdo. Revisรตes solicitadas: 0
+```
+
+
+
+## Combinando com Outros Decoradores
+
+O decorador `@human_feedback` funciona com outros decoradores de flow. Coloque-o como o decorador mais interno (mais prรณximo da funรงรฃo):
+
+```python Code
+# Correto: @human_feedback รฉ o mais interno (mais prรณximo da funรงรฃo)
+@start()
+@human_feedback(message="Revise isto:")
+def my_start_method(self):
+ return "content"
+
+@listen(other_method)
+@human_feedback(message="Revise isto tambรฉm:")
+def my_listener(self, data):
+ return f"processed: {data}"
+```
+
+
+Coloque `@human_feedback` como o decorador mais interno (รบltimo/mais prรณximo da funรงรฃo) para que ele envolva o mรฉtodo diretamente e possa capturar o valor de retorno antes de passar para o sistema de flow.
+
+
+## Melhores Prรกticas
+
+### 1. Escreva Mensagens de Solicitaรงรฃo Claras
+
+O parรขmetro `message` รฉ o que o humano vรช. Torne-o acionรกvel:
+
+```python Code
+# โ
Bom - claro e acionรกvel
+@human_feedback(message="Este resumo captura com precisรฃo os pontos-chave? Responda 'sim' ou explique o que estรก faltando:")
+
+# โ Ruim - vago
+@human_feedback(message="Revise isto:")
+```
+
+### 2. Escolha Outcomes Significativos
+
+Ao usar `emit`, escolha outcomes que mapeiem naturalmente para respostas humanas:
+
+```python Code
+# โ
Bom - outcomes em linguagem natural
+emit=["approved", "rejected", "needs_more_detail"]
+
+# โ Ruim - tรฉcnico ou pouco claro
+emit=["state_1", "state_2", "state_3"]
+```
+
+### 3. Sempre Forneรงa um Outcome Padrรฃo
+
+Use `default_outcome` para lidar com casos onde usuรกrios pressionam Enter sem digitar:
+
+```python Code
+@human_feedback(
+ message="Aprovar? (pressione Enter para solicitar revisรฃo)",
+ emit=["approved", "needs_revision"],
+ llm="gpt-4o-mini",
+ default_outcome="needs_revision", # Padrรฃo seguro
+)
+```
+
+### 4. Use o Histรณrico de Feedback para Trilhas de Auditoria
+
+Acesse `human_feedback_history` para criar logs de auditoria:
+
+```python Code
+@listen(final_step)
+def create_audit_log(self):
+ log = []
+ for fb in self.human_feedback_history:
+ log.append({
+ "step": fb.method_name,
+ "outcome": fb.outcome,
+ "feedback": fb.feedback,
+ "timestamp": fb.timestamp.isoformat(),
+ })
+ return log
+```
+
+### 5. Trate Feedback Roteado e Nรฃo Roteado
+
+Ao projetar flows, considere se vocรช precisa de roteamento:
+
+| Cenรกrio | Use |
+|---------|-----|
+| Revisรฃo simples, sรณ precisa do texto do feedback | Sem `emit` |
+| Precisa ramificar para caminhos diferentes baseado na resposta | Use `emit` |
+| Portรตes de aprovaรงรฃo com aprovar/rejeitar/revisar | Use `emit` |
+| Coletando comentรกrios apenas para logging | Sem `emit` |
+
+## Feedback Humano Assรญncrono (Nรฃo-Bloqueante - Human in the loop)
+
+Por padrรฃo, `@human_feedback` bloqueia a execuรงรฃo aguardando entrada no console. Para aplicaรงรตes de produรงรฃo, vocรช pode precisar de feedback **assรญncrono/nรฃo-bloqueante** que se integre com sistemas externos como Slack, email, webhooks ou APIs.
+
+### A Abstraรงรฃo de Provider
+
+Use o parรขmetro `provider` para especificar uma estratรฉgia customizada de coleta de feedback:
+
+```python Code
+from crewai.flow import Flow, start, human_feedback, HumanFeedbackProvider, HumanFeedbackPending, PendingFeedbackContext
+
+class WebhookProvider(HumanFeedbackProvider):
+ """Provider que pausa o flow e aguarda callback de webhook."""
+
+ def __init__(self, webhook_url: str):
+ self.webhook_url = webhook_url
+
+ def request_feedback(self, context: PendingFeedbackContext, flow: Flow) -> str:
+ # Notifica sistema externo (ex: envia mensagem Slack, cria ticket)
+ self.send_notification(context)
+
+ # Pausa execuรงรฃo - framework cuida da persistรชncia automaticamente
+ raise HumanFeedbackPending(
+ context=context,
+ callback_info={"webhook_url": f"{self.webhook_url}/{context.flow_id}"}
+ )
+
+class ReviewFlow(Flow):
+ @start()
+ @human_feedback(
+ message="Revise este conteรบdo:",
+ emit=["approved", "rejected"],
+ llm="gpt-4o-mini",
+ provider=WebhookProvider("https://myapp.com/api"),
+ )
+ def generate_content(self):
+ return "Conteรบdo gerado por IA..."
+
+ @listen("approved")
+ def publish(self, result):
+ return "Publicado!"
+```
+
+
+O framework de flow **persiste automaticamente o estado** quando `HumanFeedbackPending` รฉ lanรงado. Seu provider sรณ precisa notificar o sistema externo e lanรงar a exceรงรฃoโnรฃo sรฃo necessรกrias chamadas manuais de persistรชncia.
+
+
+### Tratando Flows Pausados
+
+Ao usar um provider assรญncrono, `kickoff()` retorna um objeto `HumanFeedbackPending` em vez de lanรงar uma exceรงรฃo:
+
+```python Code
+flow = ReviewFlow()
+result = flow.kickoff()
+
+if isinstance(result, HumanFeedbackPending):
+ # Flow estรก pausado, estado รฉ automaticamente persistido
+ print(f"Aguardando feedback em: {result.callback_info['webhook_url']}")
+ print(f"Flow ID: {result.context.flow_id}")
+else:
+ # Conclusรฃo normal
+ print(f"Flow concluรญdo: {result}")
+```
+
+### Retomando um Flow Pausado
+
+Quando o feedback chega (ex: via webhook), retome o flow:
+
+```python Code
+# Handler sรญncrono:
+def handle_feedback_webhook(flow_id: str, feedback: str):
+ flow = ReviewFlow.from_pending(flow_id)
+ result = flow.resume(feedback)
+ return result
+
+# Handler assรญncrono (FastAPI, aiohttp, etc.):
+async def handle_feedback_webhook(flow_id: str, feedback: str):
+ flow = ReviewFlow.from_pending(flow_id)
+ result = await flow.resume_async(feedback)
+ return result
+```
+
+### Tipos Principais
+
+| Tipo | Descriรงรฃo |
+|------|-----------|
+| `HumanFeedbackProvider` | Protocolo para providers de feedback customizados |
+| `PendingFeedbackContext` | Contรฉm todas as informaรงรตes necessรกrias para retomar um flow pausado |
+| `HumanFeedbackPending` | Retornado por `kickoff()` quando o flow estรก pausado para feedback |
+| `ConsoleProvider` | Provider padrรฃo de entrada bloqueante no console |
+
+### PendingFeedbackContext
+
+O contexto contรฉm tudo necessรกrio para retomar:
+
+```python Code
+@dataclass
+class PendingFeedbackContext:
+ flow_id: str # Identificador รบnico desta execuรงรฃo de flow
+ flow_class: str # Nome qualificado completo da classe
+ method_name: str # Mรฉtodo que disparou o feedback
+ method_output: Any # Saรญda mostrada ao humano
+ message: str # A mensagem de solicitaรงรฃo
+ emit: list[str] | None # Outcomes possรญveis para roteamento
+ default_outcome: str | None
+ metadata: dict # Metadata customizado
+ llm: str | None # LLM para mapeamento de outcome
+ requested_at: datetime
+```
+
+### Exemplo Completo de Flow Assรญncrono
+
+```python Code
+from crewai.flow import (
+ Flow, start, listen, human_feedback,
+ HumanFeedbackProvider, HumanFeedbackPending, PendingFeedbackContext
+)
+
+class SlackNotificationProvider(HumanFeedbackProvider):
+ """Provider que envia notificaรงรตes Slack e pausa para feedback assรญncrono."""
+
+ def __init__(self, channel: str):
+ self.channel = channel
+
+ def request_feedback(self, context: PendingFeedbackContext, flow: Flow) -> str:
+ # Envia notificaรงรฃo Slack (implemente vocรช mesmo)
+ slack_thread_id = self.post_to_slack(
+ channel=self.channel,
+ message=f"Revisรฃo necessรกria:\n\n{context.method_output}\n\n{context.message}",
+ )
+
+ # Pausa execuรงรฃo - framework cuida da persistรชncia automaticamente
+ raise HumanFeedbackPending(
+ context=context,
+ callback_info={
+ "slack_channel": self.channel,
+ "thread_id": slack_thread_id,
+ }
+ )
+
+class ContentPipeline(Flow):
+ @start()
+ @human_feedback(
+ message="Aprova este conteรบdo para publicaรงรฃo?",
+ emit=["approved", "rejected", "needs_revision"],
+ llm="gpt-4o-mini",
+ default_outcome="needs_revision",
+ provider=SlackNotificationProvider("#content-reviews"),
+ )
+ def generate_content(self):
+ return "Conteรบdo de blog post gerado por IA..."
+
+ @listen("approved")
+ def publish(self, result):
+ print(f"Publicando! Revisor disse: {result.feedback}")
+ return {"status": "published"}
+
+ @listen("rejected")
+ def archive(self, result):
+ print(f"Arquivado. Motivo: {result.feedback}")
+ return {"status": "archived"}
+
+ @listen("needs_revision")
+ def queue_revision(self, result):
+ print(f"Na fila para revisรฃo: {result.feedback}")
+ return {"status": "revision_needed"}
+
+
+# Iniciando o flow (vai pausar e aguardar resposta do Slack)
+def start_content_pipeline():
+ flow = ContentPipeline()
+ result = flow.kickoff()
+
+ if isinstance(result, HumanFeedbackPending):
+ return {"status": "pending", "flow_id": result.context.flow_id}
+
+ return result
+
+
+# Retomando quando webhook do Slack dispara (handler sรญncrono)
+def on_slack_feedback(flow_id: str, slack_message: str):
+ flow = ContentPipeline.from_pending(flow_id)
+ result = flow.resume(slack_message)
+ return result
+
+
+# Se seu handler รฉ assรญncrono (FastAPI, aiohttp, Slack Bolt async, etc.)
+async def on_slack_feedback_async(flow_id: str, slack_message: str):
+ flow = ContentPipeline.from_pending(flow_id)
+ result = await flow.resume_async(slack_message)
+ return result
+```
+
+
+Se vocรช estรก usando um framework web assรญncrono (FastAPI, aiohttp, Slack Bolt modo async), use `await flow.resume_async()` em vez de `flow.resume()`. Chamar `resume()` de dentro de um event loop em execuรงรฃo vai lanรงar um `RuntimeError`.
+
+
+### Melhores Prรกticas para Feedback Assรญncrono
+
+1. **Verifique o tipo de retorno**: `kickoff()` retorna `HumanFeedbackPending` quando pausadoโnรฃo precisa de try/except
+2. **Use o mรฉtodo resume correto**: Use `resume()` em cรณdigo sรญncrono, `await resume_async()` em cรณdigo assรญncrono
+3. **Armazene informaรงรตes de callback**: Use `callback_info` para armazenar URLs de webhook, IDs de tickets, etc.
+4. **Implemente idempotรชncia**: Seu handler de resume deve ser idempotente por seguranรงa
+5. **Persistรชncia automรกtica**: O estado รฉ automaticamente salvo quando `HumanFeedbackPending` รฉ lanรงado e usa `SQLiteFlowPersistence` por padrรฃo
+6. **Persistรชncia customizada**: Passe uma instรขncia de persistรชncia customizada para `from_pending()` se necessรกrio
+
+## Documentaรงรฃo Relacionada
+
+- [Visรฃo Geral de Flows](/pt-BR/concepts/flows) - Aprenda sobre CrewAI Flows
+- [Gerenciamento de Estado em Flows](/pt-BR/guides/flows/mastering-flow-state) - Gerenciando estado em flows
+- [Persistรชncia de Flows](/pt-BR/concepts/flows#persistence) - Persistindo estado de flows
+- [Roteamento com @router](/pt-BR/concepts/flows#router) - Mais sobre roteamento condicional
+- [Input Humano na Execuรงรฃo](/pt-BR/learn/human-input-on-execution) - Input humano no nรญvel de task
diff --git a/lib/crewai/src/crewai/events/event_listener.py b/lib/crewai/src/crewai/events/event_listener.py
index 820e5dc99..1c7602587 100644
--- a/lib/crewai/src/crewai/events/event_listener.py
+++ b/lib/crewai/src/crewai/events/event_listener.py
@@ -38,9 +38,11 @@ from crewai.events.types.crew_events import (
from crewai.events.types.flow_events import (
FlowCreatedEvent,
FlowFinishedEvent,
+ FlowPausedEvent,
FlowStartedEvent,
MethodExecutionFailedEvent,
MethodExecutionFinishedEvent,
+ MethodExecutionPausedEvent,
MethodExecutionStartedEvent,
)
from crewai.events.types.knowledge_events import (
@@ -363,6 +365,28 @@ class EventListener(BaseEventListener):
)
self.method_branches[event.method_name] = updated_branch
+ @crewai_event_bus.on(MethodExecutionPausedEvent)
+ def on_method_execution_paused(
+ _: Any, event: MethodExecutionPausedEvent
+ ) -> None:
+ method_branch = self.method_branches.get(event.method_name)
+ updated_branch = self.formatter.update_method_status(
+ method_branch,
+ self.formatter.current_flow_tree,
+ event.method_name,
+ "paused",
+ )
+ self.method_branches[event.method_name] = updated_branch
+
+ @crewai_event_bus.on(FlowPausedEvent)
+ def on_flow_paused(_: Any, event: FlowPausedEvent) -> None:
+ self.formatter.update_flow_status(
+ self.formatter.current_flow_tree,
+ event.flow_name,
+ event.flow_id,
+ "paused",
+ )
+
# ----------- TOOL USAGE EVENTS -----------
@crewai_event_bus.on(ToolUsageStartedEvent)
diff --git a/lib/crewai/src/crewai/events/types/flow_events.py b/lib/crewai/src/crewai/events/types/flow_events.py
index a35254192..826722762 100644
--- a/lib/crewai/src/crewai/events/types/flow_events.py
+++ b/lib/crewai/src/crewai/events/types/flow_events.py
@@ -58,6 +58,29 @@ class MethodExecutionFailedEvent(FlowEvent):
model_config = ConfigDict(arbitrary_types_allowed=True)
+class MethodExecutionPausedEvent(FlowEvent):
+ """Event emitted when a flow method is paused waiting for human feedback.
+
+ This event is emitted when a @human_feedback decorated method with an
+ async provider raises HumanFeedbackPending to pause execution.
+
+ Attributes:
+ flow_name: Name of the flow that is paused.
+ method_name: Name of the method waiting for feedback.
+ state: Current flow state when paused.
+ flow_id: Unique identifier for this flow execution.
+ message: The message shown when requesting feedback.
+ emit: Optional list of possible outcomes for routing.
+ """
+
+ method_name: str
+ state: dict[str, Any] | BaseModel
+ flow_id: str
+ message: str
+ emit: list[str] | None = None
+ type: str = "method_execution_paused"
+
+
class FlowFinishedEvent(FlowEvent):
"""Event emitted when a flow completes execution"""
@@ -67,8 +90,71 @@ class FlowFinishedEvent(FlowEvent):
state: dict[str, Any] | BaseModel
+class FlowPausedEvent(FlowEvent):
+ """Event emitted when a flow is paused waiting for human feedback.
+
+ This event is emitted when a flow is paused due to a @human_feedback
+ decorated method with an async provider raising HumanFeedbackPending.
+
+ Attributes:
+ flow_name: Name of the flow that is paused.
+ flow_id: Unique identifier for this flow execution.
+ method_name: Name of the method waiting for feedback.
+ state: Current flow state when paused.
+ message: The message shown when requesting feedback.
+ emit: Optional list of possible outcomes for routing.
+ """
+
+ flow_id: str
+ method_name: str
+ state: dict[str, Any] | BaseModel
+ message: str
+ emit: list[str] | None = None
+ type: str = "flow_paused"
+
+
class FlowPlotEvent(FlowEvent):
"""Event emitted when a flow plot is created"""
flow_name: str
type: str = "flow_plot"
+
+
+class HumanFeedbackRequestedEvent(FlowEvent):
+ """Event emitted when human feedback is requested.
+
+ This event is emitted when a @human_feedback decorated method
+ requires input from a human reviewer.
+
+ Attributes:
+ flow_name: Name of the flow requesting feedback.
+ method_name: Name of the method decorated with @human_feedback.
+ output: The method output shown to the human for review.
+ message: The message displayed when requesting feedback.
+ emit: Optional list of possible outcomes for routing.
+ """
+
+ method_name: str
+ output: Any
+ message: str
+ emit: list[str] | None = None
+ type: str = "human_feedback_requested"
+
+
+class HumanFeedbackReceivedEvent(FlowEvent):
+ """Event emitted when human feedback is received.
+
+ This event is emitted after a human provides feedback in response
+ to a @human_feedback decorated method.
+
+ Attributes:
+ flow_name: Name of the flow that received feedback.
+ method_name: Name of the method that received feedback.
+ feedback: The raw text feedback provided by the human.
+ outcome: The collapsed outcome string (if emit was specified).
+ """
+
+ method_name: str
+ feedback: str
+ outcome: str | None = None
+ type: str = "human_feedback_received"
diff --git a/lib/crewai/src/crewai/events/utils/console_formatter.py b/lib/crewai/src/crewai/events/utils/console_formatter.py
index 2ec0d69ce..b6a5c7d6b 100644
--- a/lib/crewai/src/crewai/events/utils/console_formatter.py
+++ b/lib/crewai/src/crewai/events/utils/console_formatter.py
@@ -453,41 +453,48 @@ To enable tracing, do any one of these:
if flow_tree is None:
return
+ # Determine status-specific labels and styles
+ if status == "completed":
+ label_prefix = "โ
Flow Finished:"
+ style = "green"
+ node_text = "โ
Flow Completed"
+ content_text = "Flow Execution Completed"
+ panel_title = "Flow Completion"
+ elif status == "paused":
+ label_prefix = "โณ Flow Paused:"
+ style = "cyan"
+ node_text = "โณ Waiting for Human Feedback"
+ content_text = "Flow Paused - Waiting for Feedback"
+ panel_title = "Flow Paused"
+ else:
+ label_prefix = "โ Flow Failed:"
+ style = "red"
+ node_text = "โ Flow Failed"
+ content_text = "Flow Execution Failed"
+ panel_title = "Flow Failure"
+
# Update main flow label
self.update_tree_label(
flow_tree,
- "โ
Flow Finished:" if status == "completed" else "โ Flow Failed:",
+ label_prefix,
flow_name,
- "green" if status == "completed" else "red",
+ style,
)
# Update initialization node status
for child in flow_tree.children:
if "Starting Flow" in str(child.label):
- child.label = Text(
- (
- "โ
Flow Completed"
- if status == "completed"
- else "โ Flow Failed"
- ),
- style="green" if status == "completed" else "red",
- )
+ child.label = Text(node_text, style=style)
break
content = self.create_status_content(
- (
- "Flow Execution Completed"
- if status == "completed"
- else "Flow Execution Failed"
- ),
+ content_text,
flow_name,
- "green" if status == "completed" else "red",
+ style,
ID=flow_id,
)
self.print(flow_tree)
- self.print_panel(
- content, "Flow Completion", "green" if status == "completed" else "red"
- )
+ self.print_panel(content, panel_title, style)
def update_method_status(
self,
@@ -508,6 +515,12 @@ To enable tracing, do any one of these:
if "Starting Flow" in str(child.label):
child.label = Text("Flow Method Step", style="white")
break
+ elif status == "paused":
+ prefix, style = "โณ Paused:", "cyan"
+ for child in flow_tree.children:
+ if "Starting Flow" in str(child.label):
+ child.label = Text("โณ Waiting for Feedback", style="cyan")
+ break
else:
prefix, style = "โ Failed:", "red"
for child in flow_tree.children:
diff --git a/lib/crewai/src/crewai/flow/__init__.py b/lib/crewai/src/crewai/flow/__init__.py
index bdb28fabd..8a27685da 100644
--- a/lib/crewai/src/crewai/flow/__init__.py
+++ b/lib/crewai/src/crewai/flow/__init__.py
@@ -1,4 +1,11 @@
+from crewai.flow.async_feedback import (
+ ConsoleProvider,
+ HumanFeedbackPending,
+ HumanFeedbackProvider,
+ PendingFeedbackContext,
+)
from crewai.flow.flow import Flow, and_, listen, or_, router, start
+from crewai.flow.human_feedback import HumanFeedbackResult, human_feedback
from crewai.flow.persistence import persist
from crewai.flow.visualization import (
FlowStructure,
@@ -8,10 +15,16 @@ from crewai.flow.visualization import (
__all__ = [
+ "ConsoleProvider",
"Flow",
"FlowStructure",
+ "HumanFeedbackPending",
+ "HumanFeedbackProvider",
+ "HumanFeedbackResult",
+ "PendingFeedbackContext",
"and_",
"build_flow_structure",
+ "human_feedback",
"listen",
"or_",
"persist",
diff --git a/lib/crewai/src/crewai/flow/async_feedback/__init__.py b/lib/crewai/src/crewai/flow/async_feedback/__init__.py
new file mode 100644
index 000000000..612a54657
--- /dev/null
+++ b/lib/crewai/src/crewai/flow/async_feedback/__init__.py
@@ -0,0 +1,41 @@
+"""Async human feedback support for CrewAI Flows.
+
+This module provides abstractions for non-blocking human-in-the-loop workflows,
+allowing integration with external systems like Slack, Teams, webhooks, or APIs.
+
+Example:
+ ```python
+ from crewai.flow import Flow, start, human_feedback
+ from crewai.flow.async_feedback import HumanFeedbackProvider, HumanFeedbackPending
+
+ class SlackProvider(HumanFeedbackProvider):
+ def request_feedback(self, context, flow):
+ self.send_slack_notification(context)
+ raise HumanFeedbackPending(context=context)
+
+ class MyFlow(Flow):
+ @start()
+ @human_feedback(
+ message="Review this:",
+ emit=["approved", "rejected"],
+ llm="gpt-4o-mini",
+ provider=SlackProvider(),
+ )
+ def review(self):
+ return "Content to review"
+ ```
+"""
+
+from crewai.flow.async_feedback.types import (
+ HumanFeedbackPending,
+ HumanFeedbackProvider,
+ PendingFeedbackContext,
+)
+from crewai.flow.async_feedback.providers import ConsoleProvider
+
+__all__ = [
+ "ConsoleProvider",
+ "HumanFeedbackPending",
+ "HumanFeedbackProvider",
+ "PendingFeedbackContext",
+]
diff --git a/lib/crewai/src/crewai/flow/async_feedback/providers.py b/lib/crewai/src/crewai/flow/async_feedback/providers.py
new file mode 100644
index 000000000..19207c8ef
--- /dev/null
+++ b/lib/crewai/src/crewai/flow/async_feedback/providers.py
@@ -0,0 +1,124 @@
+"""Default provider implementations for human feedback.
+
+This module provides the ConsoleProvider, which is the default synchronous
+provider that collects feedback via console input.
+"""
+
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+from crewai.flow.async_feedback.types import PendingFeedbackContext
+
+if TYPE_CHECKING:
+ from crewai.flow.flow import Flow
+
+
+class ConsoleProvider:
+ """Default synchronous console-based feedback provider.
+
+ This provider blocks execution and waits for console input from the user.
+ It displays the method output with formatting and prompts for feedback.
+
+ This is the default provider used when no custom provider is specified
+ in the @human_feedback decorator.
+
+ Example:
+ ```python
+ from crewai.flow.async_feedback import ConsoleProvider
+
+ # Explicitly use console provider
+ @human_feedback(
+ message="Review this:",
+ provider=ConsoleProvider(),
+ )
+ def my_method(self):
+ return "Content to review"
+ ```
+ """
+
+ def __init__(self, verbose: bool = True):
+ """Initialize the console provider.
+
+ Args:
+ verbose: Whether to display formatted output. If False, only
+ shows the prompt message.
+ """
+ self.verbose = verbose
+
+ def request_feedback(
+ self,
+ context: PendingFeedbackContext,
+ flow: Flow,
+ ) -> str:
+ """Request feedback via console input (blocking).
+
+ Displays the method output with formatting and waits for the user
+ to type their feedback. Press Enter to skip (returns empty string).
+
+ Args:
+ context: The pending feedback context with output and message.
+ flow: The Flow instance (used for event emission).
+
+ Returns:
+ The user's feedback as a string, or empty string if skipped.
+ """
+ from crewai.events.event_bus import crewai_event_bus
+ from crewai.events.event_listener import event_listener
+ from crewai.events.types.flow_events import (
+ HumanFeedbackReceivedEvent,
+ HumanFeedbackRequestedEvent,
+ )
+
+ # Emit feedback requested event
+ crewai_event_bus.emit(
+ flow,
+ HumanFeedbackRequestedEvent(
+ type="human_feedback_requested",
+ flow_name=flow.name or flow.__class__.__name__,
+ method_name=context.method_name,
+ output=context.method_output,
+ message=context.message,
+ emit=context.emit,
+ ),
+ )
+
+ # Pause live updates during human input
+ formatter = event_listener.formatter
+ formatter.pause_live_updates()
+
+ try:
+ console = formatter.console
+
+ if self.verbose:
+ # Display output with formatting using Rich console
+ console.print("\n" + "โ" * 50, style="bold cyan")
+ console.print(" OUTPUT FOR REVIEW", style="bold cyan")
+ console.print("โ" * 50 + "\n", style="bold cyan")
+ console.print(context.method_output)
+ console.print("\n" + "โ" * 50 + "\n", style="bold cyan")
+
+ # Show message and prompt for feedback
+ console.print(context.message, style="yellow")
+ console.print(
+ "(Press Enter to skip, or type your feedback)\n", style="cyan"
+ )
+
+ feedback = input("Your feedback: ").strip()
+
+ # Emit feedback received event
+ crewai_event_bus.emit(
+ flow,
+ HumanFeedbackReceivedEvent(
+ type="human_feedback_received",
+ flow_name=flow.name or flow.__class__.__name__,
+ method_name=context.method_name,
+ feedback=feedback,
+ outcome=None, # Will be determined after collapsing
+ ),
+ )
+
+ return feedback
+ finally:
+ # Resume live updates
+ formatter.resume_live_updates()
diff --git a/lib/crewai/src/crewai/flow/async_feedback/types.py b/lib/crewai/src/crewai/flow/async_feedback/types.py
new file mode 100644
index 000000000..dc6cd91f7
--- /dev/null
+++ b/lib/crewai/src/crewai/flow/async_feedback/types.py
@@ -0,0 +1,264 @@
+"""Core types for async human feedback in Flows.
+
+This module defines the protocol, exception, and context types used for
+non-blocking human-in-the-loop workflows.
+"""
+
+from __future__ import annotations
+
+from dataclasses import dataclass, field
+from datetime import datetime
+from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable
+
+if TYPE_CHECKING:
+ from crewai.flow.flow import Flow
+
+
+@dataclass
+class PendingFeedbackContext:
+ """Context capturing everything needed to resume a paused flow.
+
+ When a flow is paused waiting for async human feedback, this dataclass
+ stores all the information needed to:
+ 1. Identify which flow execution is waiting
+ 2. What method triggered the feedback request
+ 3. What was shown to the human
+ 4. How to route the response when it arrives
+
+ Attributes:
+ flow_id: Unique identifier for the flow instance (from state.id)
+ flow_class: Fully qualified class name (e.g., "myapp.flows.ReviewFlow")
+ method_name: Name of the method that triggered feedback request
+ method_output: The output that was shown to the human for review
+ message: The message displayed when requesting feedback
+ emit: Optional list of outcome strings for routing
+ default_outcome: Outcome to use when no feedback is provided
+ metadata: Optional metadata for external system integration
+ llm: LLM model string for outcome collapsing
+ requested_at: When the feedback was requested
+
+ Example:
+ ```python
+ context = PendingFeedbackContext(
+ flow_id="abc-123",
+ flow_class="myapp.ReviewFlow",
+ method_name="review_content",
+ method_output={"title": "Draft", "body": "..."},
+ message="Please review and approve or reject:",
+ emit=["approved", "rejected"],
+ llm="gpt-4o-mini",
+ )
+ ```
+ """
+
+ flow_id: str
+ flow_class: str
+ method_name: str
+ method_output: Any
+ message: str
+ emit: list[str] | None = None
+ default_outcome: str | None = None
+ metadata: dict[str, Any] = field(default_factory=dict)
+ llm: str | None = None
+ requested_at: datetime = field(default_factory=datetime.now)
+
+ def to_dict(self) -> dict[str, Any]:
+ """Serialize context to a dictionary for persistence.
+
+ Returns:
+ Dictionary representation suitable for JSON serialization.
+ """
+ return {
+ "flow_id": self.flow_id,
+ "flow_class": self.flow_class,
+ "method_name": self.method_name,
+ "method_output": self.method_output,
+ "message": self.message,
+ "emit": self.emit,
+ "default_outcome": self.default_outcome,
+ "metadata": self.metadata,
+ "llm": self.llm,
+ "requested_at": self.requested_at.isoformat(),
+ }
+
+ @classmethod
+ def from_dict(cls, data: dict[str, Any]) -> PendingFeedbackContext:
+ """Deserialize context from a dictionary.
+
+ Args:
+ data: Dictionary representation of the context.
+
+ Returns:
+ Reconstructed PendingFeedbackContext instance.
+ """
+ requested_at = data.get("requested_at")
+ if isinstance(requested_at, str):
+ requested_at = datetime.fromisoformat(requested_at)
+ elif requested_at is None:
+ requested_at = datetime.now()
+
+ return cls(
+ flow_id=data["flow_id"],
+ flow_class=data["flow_class"],
+ method_name=data["method_name"],
+ method_output=data.get("method_output"),
+ message=data.get("message", ""),
+ emit=data.get("emit"),
+ default_outcome=data.get("default_outcome"),
+ metadata=data.get("metadata", {}),
+ llm=data.get("llm"),
+ requested_at=requested_at,
+ )
+
+
+class HumanFeedbackPending(Exception): # noqa: N818 - Not an error, a control flow signal
+ """Signal that flow execution should pause for async human feedback.
+
+ When raised by a provider, the flow framework will:
+ 1. Stop execution at the current method
+ 2. Automatically persist state and context (if persistence is configured)
+ 3. Return this object to the caller (not re-raise it)
+
+ The caller receives this as a return value from `flow.kickoff()`, enabling
+ graceful handling of the paused state without try/except blocks:
+
+ ```python
+ result = flow.kickoff()
+ if isinstance(result, HumanFeedbackPending):
+ # Flow is paused, handle async feedback
+ print(f"Waiting for feedback: {result.context.flow_id}")
+ else:
+ # Normal completion
+ print(f"Flow completed: {result}")
+ ```
+
+ Note:
+ The flow framework automatically saves pending feedback when this
+ exception is raised. Providers do NOT need to call `save_pending_feedback`
+ manually - just raise this exception and the framework handles persistence.
+
+ Attributes:
+ context: The PendingFeedbackContext with all details needed to resume
+ callback_info: Optional dict with information for external systems
+ (e.g., webhook URL, ticket ID, Slack thread ID)
+
+ Example:
+ ```python
+ class SlackProvider(HumanFeedbackProvider):
+ def request_feedback(self, context, flow):
+ # Send notification to external system
+ ticket_id = self.create_slack_thread(context)
+
+ # Raise to pause - framework handles persistence automatically
+ raise HumanFeedbackPending(
+ context=context,
+ callback_info={
+ "slack_channel": "#reviews",
+ "thread_id": ticket_id,
+ }
+ )
+ ```
+ """
+
+ def __init__(
+ self,
+ context: PendingFeedbackContext,
+ callback_info: dict[str, Any] | None = None,
+ message: str | None = None,
+ ):
+ """Initialize the pending feedback exception.
+
+ Args:
+ context: The pending feedback context with flow details
+ callback_info: Optional information for external system callbacks
+ message: Optional custom message (defaults to descriptive message)
+ """
+ self.context = context
+ self.callback_info = callback_info or {}
+
+ if message is None:
+ message = (
+ f"Human feedback pending for flow '{context.flow_id}' "
+ f"at method '{context.method_name}'"
+ )
+ super().__init__(message)
+
+
+@runtime_checkable
+class HumanFeedbackProvider(Protocol):
+ """Protocol for human feedback collection strategies.
+
+ Implement this protocol to create custom feedback providers that integrate
+ with external systems like Slack, Teams, email, or custom APIs.
+
+ Providers can be either:
+ - **Synchronous (blocking)**: Return feedback string directly
+ - **Asynchronous (non-blocking)**: Raise HumanFeedbackPending to pause
+
+ The default ConsoleProvider is synchronous and blocks waiting for input.
+ For async workflows, implement a provider that raises HumanFeedbackPending.
+
+ Note:
+ The flow framework automatically handles state persistence when
+ HumanFeedbackPending is raised. Providers only need to:
+ 1. Notify the external system (Slack, email, webhook, etc.)
+ 2. Raise HumanFeedbackPending with the context and callback info
+
+ Example synchronous provider:
+ ```python
+ class ConsoleProvider(HumanFeedbackProvider):
+ def request_feedback(self, context, flow):
+ print(context.method_output)
+ return input("Your feedback: ")
+ ```
+
+ Example async provider:
+ ```python
+ class SlackProvider(HumanFeedbackProvider):
+ def __init__(self, channel: str):
+ self.channel = channel
+
+ def request_feedback(self, context, flow):
+ # Send notification to Slack
+ thread_id = self.post_to_slack(
+ channel=self.channel,
+ message=context.message,
+ content=context.method_output,
+ )
+
+ # Raise to pause - framework handles persistence automatically
+ raise HumanFeedbackPending(
+ context=context,
+ callback_info={
+ "channel": self.channel,
+ "thread_id": thread_id,
+ }
+ )
+ ```
+ """
+
+ def request_feedback(
+ self,
+ context: PendingFeedbackContext,
+ flow: Flow,
+ ) -> str:
+ """Request feedback from a human.
+
+ For synchronous providers, block and return the feedback string.
+ For async providers, notify the external system and raise
+ HumanFeedbackPending to pause the flow.
+
+ Args:
+ context: The pending feedback context containing all details
+ about what feedback is needed and how to route the response.
+ flow: The Flow instance, providing access to state and name.
+
+ Returns:
+ The human's feedback as a string (synchronous providers only).
+
+ Raises:
+ HumanFeedbackPending: To signal that the flow should pause and
+ wait for external feedback. The framework will automatically
+ persist state when this is raised.
+ """
+ ...
diff --git a/lib/crewai/src/crewai/flow/flow.py b/lib/crewai/src/crewai/flow/flow.py
index b6467ba51..26b8c7190 100644
--- a/lib/crewai/src/crewai/flow/flow.py
+++ b/lib/crewai/src/crewai/flow/flow.py
@@ -7,12 +7,13 @@ for building event-driven workflows with conditional execution and routing.
from __future__ import annotations
import asyncio
-from collections.abc import Callable
+from collections.abc import Callable, Sequence
from concurrent.futures import Future
import copy
import inspect
import logging
from typing import (
+ TYPE_CHECKING,
Any,
ClassVar,
Generic,
@@ -41,10 +42,12 @@ from crewai.events.listeners.tracing.utils import (
from crewai.events.types.flow_events import (
FlowCreatedEvent,
FlowFinishedEvent,
+ FlowPausedEvent,
FlowPlotEvent,
FlowStartedEvent,
MethodExecutionFailedEvent,
MethodExecutionFinishedEvent,
+ MethodExecutionPausedEvent,
MethodExecutionStartedEvent,
)
from crewai.flow.constants import AND_CONDITION, OR_CONDITION
@@ -69,9 +72,14 @@ from crewai.flow.utils import (
is_flow_method_name,
is_simple_flow_condition,
)
+
+if TYPE_CHECKING:
+ from crewai.flow.async_feedback.types import PendingFeedbackContext
+ from crewai.flow.human_feedback import HumanFeedbackResult
+ from crewai.llms.base_llm import BaseLLM
+
from crewai.flow.visualization import build_flow_structure, render_interactive
from crewai.types.streaming import CrewStreamingOutput, FlowStreamingOutput
-from crewai.utilities.printer import Printer, PrinterColor
from crewai.utilities.streaming import (
TaskInfo,
create_async_chunk_generator,
@@ -443,6 +451,23 @@ class FlowMeta(type):
else:
router_paths[attr_name] = []
+ # Handle start methods that are also routers (e.g., @human_feedback with emit)
+ if (
+ hasattr(attr_value, "__is_start_method__")
+ and hasattr(attr_value, "__is_router__")
+ and attr_value.__is_router__
+ ):
+ routers.add(attr_name)
+ # Get router paths from the decorator attribute
+ if hasattr(attr_value, "__router_paths__") and attr_value.__router_paths__:
+ router_paths[attr_name] = attr_value.__router_paths__
+ else:
+ possible_returns = get_possible_return_constants(attr_value)
+ if possible_returns:
+ router_paths[attr_name] = possible_returns
+ else:
+ router_paths[attr_name] = []
+
cls._start_methods = start_methods # type: ignore[attr-defined]
cls._listeners = listeners # type: ignore[attr-defined]
cls._routers = routers # type: ignore[attr-defined]
@@ -456,8 +481,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
type parameter T must be either dict[str, Any] or a subclass of BaseModel."""
- _printer: ClassVar[Printer] = Printer()
-
_start_methods: ClassVar[list[FlowMethodName]] = []
_listeners: ClassVar[dict[FlowMethodName, SimpleFlowCondition | FlowCondition]] = {}
_routers: ClassVar[set[FlowMethodName]] = set()
@@ -499,6 +522,11 @@ class Flow(Generic[T], metaclass=FlowMeta):
self._is_execution_resuming: bool = False
self._event_futures: list[Future[None]] = []
+ # Human feedback storage
+ self.human_feedback_history: list[HumanFeedbackResult] = []
+ self.last_human_feedback: HumanFeedbackResult | None = None
+ self._pending_feedback_context: PendingFeedbackContext | None = None
+
# Initialize state with initial values
self._state = self._create_initial_state()
self.tracing = tracing
@@ -529,6 +557,295 @@ class Flow(Generic[T], metaclass=FlowMeta):
method = method.__get__(self, self.__class__)
self._methods[method.__name__] = method
+ @classmethod
+ def from_pending(
+ cls,
+ flow_id: str,
+ persistence: FlowPersistence | None = None,
+ **kwargs: Any,
+ ) -> "Flow[Any]":
+ """Create a Flow instance from a pending feedback state.
+
+ This classmethod is used to restore a flow that was paused waiting
+ for async human feedback. It loads the persisted state and pending
+ feedback context, then returns a flow instance ready to resume.
+
+ Args:
+ flow_id: The unique identifier of the paused flow (from state.id)
+ persistence: The persistence backend where the state was saved.
+ If not provided, defaults to SQLiteFlowPersistence().
+ **kwargs: Additional keyword arguments passed to the Flow constructor
+
+ Returns:
+ A new Flow instance with restored state, ready to call resume()
+
+ Raises:
+ ValueError: If no pending feedback exists for the given flow_id
+
+ Example:
+ ```python
+ # Simple usage with default persistence:
+ flow = MyFlow.from_pending("abc-123")
+ result = flow.resume("looks good!")
+
+ # Or with custom persistence:
+ persistence = SQLiteFlowPersistence("custom.db")
+ flow = MyFlow.from_pending("abc-123", persistence)
+ result = flow.resume("looks good!")
+ ```
+ """
+ if persistence is None:
+ from crewai.flow.persistence import SQLiteFlowPersistence
+
+ persistence = SQLiteFlowPersistence()
+
+ # Load pending feedback context and state
+ loaded = persistence.load_pending_feedback(flow_id)
+ if loaded is None:
+ raise ValueError(f"No pending feedback found for flow_id: {flow_id}")
+
+ state_data, pending_context = loaded
+
+ # Create flow instance with persistence
+ instance = cls(persistence=persistence, **kwargs)
+
+ # Restore state
+ instance._initialize_state(state_data)
+
+ # Store pending context for resume
+ instance._pending_feedback_context = pending_context
+
+ # Mark that we're resuming execution
+ instance._is_execution_resuming = True
+
+ # Mark the method as completed (it ran before pausing)
+ instance._completed_methods.add(FlowMethodName(pending_context.method_name))
+
+ return instance
+
+ @property
+ def pending_feedback(self) -> "PendingFeedbackContext | None":
+ """Get the pending feedback context if this flow is waiting for feedback.
+
+ Returns:
+ The PendingFeedbackContext if the flow is paused waiting for feedback,
+ None otherwise.
+
+ Example:
+ ```python
+ flow = MyFlow.from_pending("abc-123", persistence)
+ if flow.pending_feedback:
+ print(f"Waiting for feedback on: {flow.pending_feedback.method_name}")
+ ```
+ """
+ return self._pending_feedback_context
+
+ def resume(self, feedback: str = "") -> Any:
+ """Resume flow execution, optionally with human feedback.
+
+ This method continues flow execution after a flow was paused for
+ async human feedback. It processes the feedback (including LLM-based
+ outcome collapsing if emit was specified), stores the result, and
+ triggers downstream listeners.
+
+ Note:
+ If called from within an async context (running event loop),
+ use `await flow.resume_async(feedback)` instead.
+
+ Args:
+ feedback: The human's feedback as a string. If empty, uses
+ default_outcome or the first emit option.
+
+ Returns:
+ The final output from the flow execution, or HumanFeedbackPending
+ if another feedback point is reached.
+
+ Raises:
+ ValueError: If no pending feedback context exists (flow wasn't paused)
+ RuntimeError: If called from within a running event loop (use resume_async instead)
+
+ Example:
+ ```python
+ # In a sync webhook handler:
+ def handle_feedback(flow_id: str, feedback: str):
+ flow = MyFlow.from_pending(flow_id)
+ result = flow.resume(feedback)
+ return result
+
+ # In an async handler, use resume_async instead:
+ async def handle_feedback_async(flow_id: str, feedback: str):
+ flow = MyFlow.from_pending(flow_id)
+ result = await flow.resume_async(feedback)
+ return result
+ ```
+ """
+ try:
+ loop = asyncio.get_running_loop()
+ except RuntimeError:
+ loop = None
+
+ if loop is not None:
+ raise RuntimeError(
+ "resume() cannot be called from within an async context. "
+ "Use 'await flow.resume_async(feedback)' instead."
+ )
+
+ return asyncio.run(self.resume_async(feedback))
+
+ async def resume_async(self, feedback: str = "") -> Any:
+ """Async version of resume.
+
+ Resume flow execution, optionally with human feedback asynchronously.
+
+ Args:
+ feedback: The human's feedback as a string. If empty, uses
+ default_outcome or the first emit option.
+
+ Returns:
+ The final output from the flow execution, or HumanFeedbackPending
+ if another feedback point is reached.
+
+ Raises:
+ ValueError: If no pending feedback context exists
+ """
+ from crewai.flow.human_feedback import HumanFeedbackResult
+ from datetime import datetime
+
+ if self._pending_feedback_context is None:
+ raise ValueError(
+ "No pending feedback context. Use from_pending() to restore a paused flow."
+ )
+
+ context = self._pending_feedback_context
+ emit = context.emit
+ default_outcome = context.default_outcome
+ llm = context.llm
+
+ # Determine outcome
+ collapsed_outcome: str | None = None
+
+ if not feedback.strip():
+ # Empty feedback
+ if default_outcome:
+ collapsed_outcome = default_outcome
+ elif emit:
+ # No default and no feedback - use first outcome
+ collapsed_outcome = emit[0]
+ elif emit:
+ # Collapse feedback to outcome using LLM
+ collapsed_outcome = self._collapse_to_outcome(
+ feedback=feedback,
+ outcomes=emit,
+ llm=llm,
+ )
+
+ # Create result
+ result = HumanFeedbackResult(
+ output=context.method_output,
+ feedback=feedback,
+ outcome=collapsed_outcome,
+ timestamp=datetime.now(),
+ method_name=context.method_name,
+ metadata=context.metadata,
+ )
+
+ # Store in flow instance
+ self.human_feedback_history.append(result)
+ self.last_human_feedback = result
+
+ # Clear pending context after processing
+ self._pending_feedback_context = None
+
+ # Clear pending feedback from persistence
+ if self._persistence:
+ self._persistence.clear_pending_feedback(context.flow_id)
+
+ # Emit feedback received event
+ crewai_event_bus.emit(
+ self,
+ MethodExecutionFinishedEvent(
+ type="method_execution_finished",
+ flow_name=self.name or self.__class__.__name__,
+ method_name=context.method_name,
+ result=collapsed_outcome if emit else result,
+ state=self._state,
+ ),
+ )
+
+ # Clear resumption flag before triggering listeners
+ # This allows methods to re-execute in loops (e.g., implement_changes โ suggest_changes โ implement_changes)
+ self._is_execution_resuming = False
+
+ # Determine what to pass to listeners
+ try:
+ if emit and collapsed_outcome:
+ # Router behavior - the outcome itself triggers listeners
+ # First, add the outcome to method outputs as a router would
+ self._method_outputs.append(collapsed_outcome)
+
+ # Then trigger listeners for the outcome (e.g., "approved" triggers @listen("approved"))
+ final_result = await self._execute_listeners(
+ FlowMethodName(collapsed_outcome), # Use outcome as trigger
+ result, # Pass HumanFeedbackResult to listeners
+ )
+ else:
+ # Normal behavior - pass the HumanFeedbackResult
+ final_result = await self._execute_listeners(
+ FlowMethodName(context.method_name),
+ result,
+ )
+ except Exception as e:
+ # Check if flow was paused again for human feedback (loop case)
+ from crewai.flow.async_feedback.types import HumanFeedbackPending
+
+ if isinstance(e, HumanFeedbackPending):
+ # Auto-save pending feedback (create default persistence if needed)
+ if self._persistence is None:
+ from crewai.flow.persistence import SQLiteFlowPersistence
+
+ self._persistence = SQLiteFlowPersistence()
+
+ state_data = (
+ self._state
+ if isinstance(self._state, dict)
+ else self._state.model_dump()
+ )
+ self._persistence.save_pending_feedback(
+ flow_uuid=e.context.flow_id,
+ context=e.context,
+ state_data=state_data,
+ )
+
+ # Emit flow paused event
+ crewai_event_bus.emit(
+ self,
+ FlowPausedEvent(
+ type="flow_paused",
+ flow_name=self.name or self.__class__.__name__,
+ flow_id=e.context.flow_id,
+ method_name=e.context.method_name,
+ state=self._copy_and_serialize_state(),
+ message=e.context.message,
+ emit=e.context.emit,
+ ),
+ )
+ # Return the pending exception instead of raising
+ return e
+ raise
+
+ # Emit flow finished
+ crewai_event_bus.emit(
+ self,
+ FlowFinishedEvent(
+ type="flow_finished",
+ flow_name=self.name or self.__class__.__name__,
+ result=final_result,
+ state=self._state,
+ ),
+ )
+
+ return final_result
+
def _create_initial_state(self) -> T:
"""Create and initialize flow state with UUID and default values.
@@ -544,19 +861,21 @@ class Flow(Generic[T], metaclass=FlowMeta):
state_type = self._initial_state_t
if isinstance(state_type, type):
if issubclass(state_type, FlowState):
- # Create instance without id, then set it
+ # Create instance - FlowState auto-generates id via default_factory
instance = state_type()
- if not hasattr(instance, "id"):
- instance.id = str(uuid4())
+ # Ensure id is set - generate UUID if empty
+ if not getattr(instance, "id", None):
+ object.__setattr__(instance, "id", str(uuid4()))
return cast(T, instance)
if issubclass(state_type, BaseModel):
- # Create a new type that includes the ID field
- class StateWithId(state_type, FlowState): # type: ignore
+ # Create a new type with FlowState first for proper id default
+ class StateWithId(FlowState, state_type): # type: ignore
pass
instance = StateWithId()
- if not hasattr(instance, "id"):
- instance.id = str(uuid4())
+ # Ensure id is set - generate UUID if empty
+ if not getattr(instance, "id", None):
+ object.__setattr__(instance, "id", str(uuid4()))
return cast(T, instance)
if state_type is dict:
return cast(T, {"id": str(uuid4())})
@@ -574,7 +893,11 @@ class Flow(Generic[T], metaclass=FlowMeta):
model_fields = getattr(self.initial_state, "model_fields", None)
if not model_fields or "id" not in model_fields:
raise ValueError("Flow state model must have an 'id' field")
- return self.initial_state() # Uses model defaults
+ instance = self.initial_state()
+ # Ensure id is set - generate UUID if empty
+ if not getattr(instance, "id", None):
+ object.__setattr__(instance, "id", str(uuid4()))
+ return instance
if self.initial_state is dict:
return cast(T, {"id": str(uuid4())})
@@ -604,6 +927,10 @@ class Flow(Generic[T], metaclass=FlowMeta):
k: v for k, v in model.__dict__.items() if not k.startswith("_")
}
+ # Ensure id is set - generate UUID if empty
+ if not state_dict.get("id"):
+ state_dict["id"] = str(uuid4())
+
# Create new instance of the same class
model_class = type(model)
return cast(T, model_class(**state_dict))
@@ -686,16 +1013,22 @@ class Flow(Generic[T], metaclass=FlowMeta):
TypeError: If state is neither BaseModel nor dictionary
"""
if isinstance(self._state, dict):
- # For dict states, preserve existing fields unless overridden
+ # For dict states, update with inputs
+ # If inputs contains an id, use it (for restoring from persistence)
+ # Otherwise preserve the current id or generate a new one
current_id = self._state.get("id")
- # Only update specified fields
+ inputs_has_id = "id" in inputs
+
+ # Update specified fields
for k, v in inputs.items():
self._state[k] = v
- # Ensure ID is preserved or generated
- if current_id:
- self._state["id"] = current_id
- elif "id" not in self._state:
- self._state["id"] = str(uuid4())
+
+ # Ensure ID is set: prefer inputs id, then current id, then generate
+ if not inputs_has_id:
+ if current_id:
+ self._state["id"] = current_id
+ elif "id" not in self._state:
+ self._state["id"] = str(uuid4())
elif isinstance(self._state, BaseModel):
# For BaseModel states, preserve existing fields unless overridden
try:
@@ -985,17 +1318,73 @@ class Flow(Generic[T], metaclass=FlowMeta):
if future:
self._event_futures.append(future)
self._log_flow_event(
- f"Flow started with ID: {self.flow_id}", color="bold_magenta"
+ f"Flow started with ID: {self.flow_id}", color="bold magenta"
)
if inputs is not None and "id" not in inputs:
self._initialize_state(inputs)
- tasks = [
- self._execute_start_method(start_method)
- for start_method in self._start_methods
- ]
- await asyncio.gather(*tasks)
+ try:
+ tasks = [
+ self._execute_start_method(start_method)
+ for start_method in self._start_methods
+ ]
+ await asyncio.gather(*tasks)
+ except Exception as e:
+ # Check if flow was paused for human feedback
+ from crewai.flow.async_feedback.types import HumanFeedbackPending
+
+ if isinstance(e, HumanFeedbackPending):
+ # Auto-save pending feedback (create default persistence if needed)
+ if self._persistence is None:
+ from crewai.flow.persistence import SQLiteFlowPersistence
+
+ self._persistence = SQLiteFlowPersistence()
+
+ state_data = (
+ self._state
+ if isinstance(self._state, dict)
+ else self._state.model_dump()
+ )
+ self._persistence.save_pending_feedback(
+ flow_uuid=e.context.flow_id,
+ context=e.context,
+ state_data=state_data,
+ )
+
+ # Emit flow paused event
+ future = crewai_event_bus.emit(
+ self,
+ FlowPausedEvent(
+ type="flow_paused",
+ flow_name=self.name or self.__class__.__name__,
+ flow_id=e.context.flow_id,
+ method_name=e.context.method_name,
+ state=self._copy_and_serialize_state(),
+ message=e.context.message,
+ emit=e.context.emit,
+ ),
+ )
+ if future and isinstance(future, Future):
+ self._event_futures.append(future)
+
+ # Wait for events to be processed
+ if self._event_futures:
+ await asyncio.gather(
+ *[
+ asyncio.wrap_future(f)
+ for f in self._event_futures
+ if isinstance(f, Future)
+ ]
+ )
+ self._event_futures.clear()
+
+ # Return the pending exception instead of raising
+ # This allows the caller to handle the paused state gracefully
+ return e
+
+ # Re-raise other exceptions
+ raise
# Clear the resumption flag after initial execution completes
self._is_execution_resuming = False
@@ -1075,7 +1464,30 @@ class Flow(Generic[T], metaclass=FlowMeta):
enhanced_method = self._inject_trigger_payload_for_start_method(method)
result = await self._execute_method(start_method_name, enhanced_method)
- await self._execute_listeners(start_method_name, result)
+
+ # If start method is a router, use its result as an additional trigger
+ if start_method_name in self._routers and result is not None:
+ # Execute listeners for the start method name first
+ await self._execute_listeners(start_method_name, result)
+ # Then execute listeners for the router result (e.g., "approved")
+ router_result_trigger = FlowMethodName(str(result))
+ listeners_for_result = self._find_triggered_methods(
+ router_result_trigger, router_only=False
+ )
+ if listeners_for_result:
+ # Pass the HumanFeedbackResult if available
+ listener_result = (
+ self.last_human_feedback
+ if self.last_human_feedback is not None
+ else result
+ )
+ tasks = [
+ self._execute_single_listener(listener_name, listener_result)
+ for listener_name in listeners_for_result
+ ]
+ await asyncio.gather(*tasks)
+ else:
+ await self._execute_listeners(start_method_name, result)
def _inject_trigger_payload_for_start_method(
self, original_method: Callable[..., Any]
@@ -1166,6 +1578,28 @@ class Flow(Generic[T], metaclass=FlowMeta):
return result
except Exception as e:
+ # Check if this is a HumanFeedbackPending exception (paused, not failed)
+ from crewai.flow.async_feedback.types import HumanFeedbackPending
+
+ if isinstance(e, HumanFeedbackPending):
+ # Emit paused event instead of failed
+ future = crewai_event_bus.emit(
+ self,
+ MethodExecutionPausedEvent(
+ type="method_execution_paused",
+ method_name=method_name,
+ flow_name=self.name or self.__class__.__name__,
+ state=self._copy_and_serialize_state(),
+ flow_id=e.context.flow_id,
+ message=e.context.message,
+ emit=e.context.emit,
+ ),
+ )
+ if future:
+ self._event_futures.append(future)
+ raise e
+
+ # Regular failure
future = crewai_event_bus.emit(
self,
MethodExecutionFailedEvent(
@@ -1210,7 +1644,9 @@ class Flow(Generic[T], metaclass=FlowMeta):
"""
# First, handle routers repeatedly until no router triggers anymore
router_results = []
+ router_result_to_feedback: dict[str, Any] = {} # Map outcome -> HumanFeedbackResult
current_trigger = trigger_method
+ current_result = result # Track the result to pass to each router
while True:
routers_triggered = self._find_triggered_methods(
@@ -1220,13 +1656,22 @@ class Flow(Generic[T], metaclass=FlowMeta):
break
for router_name in routers_triggered:
- await self._execute_single_listener(router_name, result)
+ # For routers triggered by a router outcome, pass the HumanFeedbackResult
+ router_input = router_result_to_feedback.get(
+ str(current_trigger), current_result
+ )
+ await self._execute_single_listener(router_name, router_input)
# After executing router, the router's result is the path
router_result = (
self._method_outputs[-1] if self._method_outputs else None
)
if router_result: # Only add non-None results
router_results.append(router_result)
+ # If this was a human_feedback router, map the outcome to the feedback
+ if self.last_human_feedback is not None:
+ router_result_to_feedback[str(router_result)] = (
+ self.last_human_feedback
+ )
current_trigger = (
FlowMethodName(str(router_result))
if router_result is not None
@@ -1242,8 +1687,13 @@ class Flow(Generic[T], metaclass=FlowMeta):
current_trigger, router_only=False
)
if listeners_triggered:
+ # Determine what result to pass to listeners
+ # For router outcomes, pass the HumanFeedbackResult if available
+ listener_result = router_result_to_feedback.get(
+ str(current_trigger), result
+ )
tasks = [
- self._execute_single_listener(listener_name, result)
+ self._execute_single_listener(listener_name, listener_result)
for listener_name in listeners_triggered
]
await asyncio.gather(*tasks)
@@ -1435,14 +1885,223 @@ class Flow(Generic[T], metaclass=FlowMeta):
# Execute listeners (and possibly routers) of this listener
await self._execute_listeners(listener_name, listener_result)
+ # If this listener is also a router (e.g., has @human_feedback with emit),
+ # we need to trigger listeners for the router result as well
+ if listener_name in self._routers and listener_result is not None:
+ router_result_trigger = FlowMethodName(str(listener_result))
+ listeners_for_result = self._find_triggered_methods(
+ router_result_trigger, router_only=False
+ )
+ if listeners_for_result:
+ # Pass the HumanFeedbackResult if available
+ feedback_result = (
+ self.last_human_feedback
+ if self.last_human_feedback is not None
+ else listener_result
+ )
+ tasks = [
+ self._execute_single_listener(name, feedback_result)
+ for name in listeners_for_result
+ ]
+ await asyncio.gather(*tasks)
+
except Exception as e:
- logger.error(f"Error executing listener {listener_name}: {e}")
+ # Don't log HumanFeedbackPending as an error - it's expected control flow
+ from crewai.flow.async_feedback.types import HumanFeedbackPending
+
+ if not isinstance(e, HumanFeedbackPending):
+ logger.error(f"Error executing listener {listener_name}: {e}")
raise
+ def _request_human_feedback(
+ self,
+ message: str,
+ output: Any,
+ metadata: dict[str, Any] | None = None,
+ emit: Sequence[str] | None = None,
+ ) -> str:
+ """Request feedback from a human.
+ Args:
+ message: The message to display when requesting feedback.
+ output: The method output to show the human for review.
+ metadata: Optional metadata for enterprise integrations.
+ emit: Optional list of possible outcomes for routing.
+
+ Returns:
+ The human's feedback as a string. Empty string if no feedback provided.
+ """
+ from crewai.events.event_listener import event_listener
+ from crewai.events.types.flow_events import (
+ HumanFeedbackReceivedEvent,
+ HumanFeedbackRequestedEvent,
+ )
+
+ # Emit feedback requested event
+ crewai_event_bus.emit(
+ self,
+ HumanFeedbackRequestedEvent(
+ type="human_feedback_requested",
+ flow_name=self.name or self.__class__.__name__,
+ method_name="", # Will be set by decorator if needed
+ output=output,
+ message=message,
+ emit=list(emit) if emit else None,
+ ),
+ )
+
+ # Pause live updates during human input
+ formatter = event_listener.formatter
+ formatter.pause_live_updates()
+
+ try:
+ # Display output with formatting using centralized Rich console
+ formatter.console.print("\n" + "โ" * 50, style="bold cyan")
+ formatter.console.print(" OUTPUT FOR REVIEW", style="bold cyan")
+ formatter.console.print("โ" * 50 + "\n", style="bold cyan")
+ formatter.console.print(output)
+ formatter.console.print("\n" + "โ" * 50 + "\n", style="bold cyan")
+
+ # Show message and prompt for feedback
+ formatter.console.print(message, style="yellow")
+ formatter.console.print("(Press Enter to skip, or type your feedback)\n", style="cyan")
+
+ feedback = input("Your feedback: ").strip()
+
+ # Emit feedback received event
+ crewai_event_bus.emit(
+ self,
+ HumanFeedbackReceivedEvent(
+ type="human_feedback_received",
+ flow_name=self.name or self.__class__.__name__,
+ method_name="", # Will be set by decorator if needed
+ feedback=feedback,
+ outcome=None, # Will be determined after collapsing
+ ),
+ )
+
+ return feedback
+ finally:
+ # Resume live updates
+ formatter.resume_live_updates()
+
+ def _collapse_to_outcome(
+ self,
+ feedback: str,
+ outcomes: Sequence[str],
+ llm: str | BaseLLM,
+ ) -> str:
+ """Collapse free-form feedback to a predefined outcome using LLM.
+
+ This method uses the specified LLM to interpret the human's feedback
+ and map it to one of the predefined outcomes for routing purposes.
+
+ Uses structured outputs (function calling) when supported by the LLM
+ to guarantee the response is one of the valid outcomes. Falls back
+ to simple prompting if structured outputs fail.
+
+ Args:
+ feedback: The raw human feedback text.
+ outcomes: Sequence of valid outcome strings to choose from.
+ llm: The LLM model to use. Can be a model string or BaseLLM instance.
+
+ Returns:
+ One of the outcome strings that best matches the feedback intent.
+ """
+ from typing import Literal
+
+ from pydantic import BaseModel, Field
+
+ from crewai.llm import LLM
+ from crewai.llms.base_llm import BaseLLM as BaseLLMClass
+ from crewai.utilities.i18n import get_i18n
+
+ # Get or create LLM instance
+ if isinstance(llm, str):
+ llm_instance = LLM(model=llm)
+ elif isinstance(llm, BaseLLMClass):
+ llm_instance = llm
+ else:
+ raise ValueError(f"Invalid llm type: {type(llm)}. Expected str or BaseLLM.")
+
+ # Dynamically create a Pydantic model with constrained outcomes
+ outcomes_tuple = tuple(outcomes)
+
+ class FeedbackOutcome(BaseModel):
+ """The outcome that best matches the human's feedback intent."""
+
+ outcome: Literal[outcomes_tuple] = Field( # type: ignore[valid-type]
+ description=f"The outcome that best matches the feedback. Must be one of: {', '.join(outcomes)}"
+ )
+
+ # Load prompt from translations (using cached instance)
+ i18n = get_i18n()
+ prompt_template = i18n.slice("human_feedback_collapse")
+
+ prompt = prompt_template.format(
+ feedback=feedback,
+ outcomes=", ".join(outcomes),
+ )
+
+ try:
+ # Try structured output first (function calling)
+ # Note: LLM.call with response_model returns JSON string, not Pydantic model
+ response = llm_instance.call(
+ messages=[{"role": "user", "content": prompt}],
+ response_model=FeedbackOutcome,
+ )
+
+ # Parse the response - LLM returns JSON string when using response_model
+ if isinstance(response, str):
+ import json
+
+ try:
+ parsed = json.loads(response)
+ return parsed.get("outcome", outcomes[0])
+ except json.JSONDecodeError:
+ # Not valid JSON, might be raw outcome string
+ response_clean = response.strip()
+ for outcome in outcomes:
+ if outcome.lower() == response_clean.lower():
+ return outcome
+ return outcomes[0]
+ elif isinstance(response, FeedbackOutcome):
+ return response.outcome
+ elif hasattr(response, "outcome"):
+ return response.outcome
+ else:
+ # Unexpected type, fall back to first outcome
+ logger.warning(f"Unexpected response type: {type(response)}")
+ return outcomes[0]
+
+ except Exception as e:
+ # Fallback to simple prompting if structured output fails
+ logger.warning(
+ f"Structured output failed, falling back to simple prompting: {e}"
+ )
+ response = llm_instance.call(messages=prompt)
+ response_clean = str(response).strip()
+
+ # Exact match (case-insensitive)
+ for outcome in outcomes:
+ if outcome.lower() == response_clean.lower():
+ return outcome
+
+ # Partial match
+ for outcome in outcomes:
+ if outcome.lower() in response_clean.lower():
+ return outcome
+
+ # Fallback to first outcome
+ logger.warning(
+ f"Could not match LLM response '{response_clean}' to outcomes {list(outcomes)}. "
+ f"Falling back to first outcome: {outcomes[0]}"
+ )
+ return outcomes[0]
+
def _log_flow_event(
self,
message: str,
- color: PrinterColor = "yellow",
+ color: str = "yellow",
level: Literal["info", "warning"] = "info",
) -> None:
"""Centralized logging method for flow events.
@@ -1452,20 +2111,22 @@ class Flow(Generic[T], metaclass=FlowMeta):
Args:
message: The message to log
- color: Color to use for console output (default: yellow)
- Available colors: purple, red, bold_green, bold_purple,
- bold_blue, yellow, yellow
+ color: Rich style for console output (default: "yellow")
+ Examples: "yellow", "red", "bold green", "bold magenta"
level: Log level to use (default: info)
Supported levels: info, warning
Note:
- This method uses the Printer utility for colored console output
+ This method uses the centralized Rich console formatter for output
and the standard logging module for log level support.
"""
- self._printer.print(message, color=color)
+ from crewai.events.event_listener import event_listener
+
+ event_listener.formatter.console.print(message, style=color)
if level == "info":
logger.info(message)
- logger.warning(message)
+ else:
+ logger.warning(message)
def plot(self, filename: str = "crewai_flow.html", show: bool = True) -> str:
"""Create interactive HTML visualization of Flow structure.
diff --git a/lib/crewai/src/crewai/flow/flow_wrappers.py b/lib/crewai/src/crewai/flow/flow_wrappers.py
index 8d81d677a..ace2fe727 100644
--- a/lib/crewai/src/crewai/flow/flow_wrappers.py
+++ b/lib/crewai/src/crewai/flow/flow_wrappers.py
@@ -70,6 +70,15 @@ class FlowMethod(Generic[P, R]):
self._is_coroutine = asyncio.coroutines._is_coroutine # type: ignore[attr-defined]
+ # Preserve flow-related attributes from wrapped method (e.g., from @human_feedback)
+ for attr in [
+ "__is_router__",
+ "__router_paths__",
+ "__human_feedback_config__",
+ ]:
+ if hasattr(meth, attr):
+ setattr(self, attr, getattr(meth, attr))
+
def __call__(self, *args: P.args, **kwargs: P.kwargs) -> R:
"""Call the wrapped method.
diff --git a/lib/crewai/src/crewai/flow/human_feedback.py b/lib/crewai/src/crewai/flow/human_feedback.py
new file mode 100644
index 000000000..df433b5db
--- /dev/null
+++ b/lib/crewai/src/crewai/flow/human_feedback.py
@@ -0,0 +1,400 @@
+"""Human feedback decorator for Flow methods.
+
+This module provides the @human_feedback decorator that enables human-in-the-loop
+workflows within CrewAI Flows. It allows collecting human feedback on method outputs
+and optionally routing to different listeners based on the feedback.
+
+Supports both synchronous (blocking) and asynchronous (non-blocking) feedback
+collection through the provider parameter.
+
+Example (synchronous, default):
+ ```python
+ from crewai.flow import Flow, start, listen, human_feedback
+
+ class ReviewFlow(Flow):
+ @start()
+ @human_feedback(
+ message="Please review this content:",
+ emit=["approved", "rejected"],
+ llm="gpt-4o-mini",
+ )
+ def generate_content(self):
+ return {"title": "Article", "body": "Content..."}
+
+ @listen("approved")
+ def publish(self):
+ result = self.human_feedback
+ print(f"Publishing: {result.output}")
+ ```
+
+Example (asynchronous with custom provider):
+ ```python
+ from crewai.flow import Flow, start, human_feedback
+ from crewai.flow.async_feedback import HumanFeedbackProvider, HumanFeedbackPending
+
+ class SlackProvider(HumanFeedbackProvider):
+ def request_feedback(self, context, flow):
+ self.send_notification(context)
+ raise HumanFeedbackPending(context=context)
+
+ class ReviewFlow(Flow):
+ @start()
+ @human_feedback(
+ message="Review this:",
+ emit=["approved", "rejected"],
+ llm="gpt-4o-mini",
+ provider=SlackProvider(),
+ )
+ def generate_content(self):
+ return "Content..."
+ ```
+"""
+
+from __future__ import annotations
+
+import asyncio
+from collections.abc import Callable, Sequence
+from dataclasses import dataclass, field
+from datetime import datetime
+from functools import wraps
+from typing import TYPE_CHECKING, Any, TypeVar
+
+from crewai.flow.flow_wrappers import FlowMethod
+
+
+if TYPE_CHECKING:
+ from crewai.flow.async_feedback.types import HumanFeedbackProvider
+ from crewai.flow.flow import Flow
+ from crewai.llms.base_llm import BaseLLM
+
+
+F = TypeVar("F", bound=Callable[..., Any])
+
+
+@dataclass
+class HumanFeedbackResult:
+ """Result from a @human_feedback decorated method.
+
+ This dataclass captures all information about a human feedback interaction,
+ including the original method output, the human's feedback, and any
+ collapsed outcome for routing purposes.
+
+ Attributes:
+ output: The original return value from the decorated method that was
+ shown to the human for review.
+ feedback: The raw text feedback provided by the human. Empty string
+ if no feedback was provided.
+ outcome: The collapsed outcome string when emit is specified.
+ This is determined by the LLM based on the human's feedback.
+ None if emit was not specified.
+ timestamp: When the feedback was received.
+ method_name: The name of the decorated method that triggered feedback.
+ metadata: Optional metadata for enterprise integrations. Can be used
+ to pass additional context like channel, assignee, etc.
+
+ Example:
+ ```python
+ @listen("approved")
+ def handle_approval(self):
+ result = self.human_feedback
+ print(f"Output: {result.output}")
+ print(f"Feedback: {result.feedback}")
+ print(f"Outcome: {result.outcome}") # "approved"
+ ```
+ """
+
+ output: Any
+ feedback: str
+ outcome: str | None = None
+ timestamp: datetime = field(default_factory=datetime.now)
+ method_name: str = ""
+ metadata: dict[str, Any] = field(default_factory=dict)
+
+
+@dataclass
+class HumanFeedbackConfig:
+ """Configuration for the @human_feedback decorator.
+
+ Stores the parameters passed to the decorator for later use during
+ method execution and for introspection by visualization tools.
+
+ Attributes:
+ message: The message shown to the human when requesting feedback.
+ emit: Optional sequence of outcome strings for routing.
+ llm: The LLM model to use for collapsing feedback to outcomes.
+ default_outcome: The outcome to use when no feedback is provided.
+ metadata: Optional metadata for enterprise integrations.
+ provider: Optional custom feedback provider for async workflows.
+ """
+
+ message: str
+ emit: Sequence[str] | None = None
+ llm: str | BaseLLM | None = None
+ default_outcome: str | None = None
+ metadata: dict[str, Any] | None = None
+ provider: HumanFeedbackProvider | None = None
+
+
+class HumanFeedbackMethod(FlowMethod[Any, Any]):
+ """Wrapper for methods decorated with @human_feedback.
+
+ This wrapper extends FlowMethod to add human feedback specific attributes
+ that are used by FlowMeta for routing and by visualization tools.
+
+ Attributes:
+ __is_router__: True when emit is specified, enabling router behavior.
+ __router_paths__: List of possible outcomes when acting as a router.
+ __human_feedback_config__: The HumanFeedbackConfig for this method.
+ """
+
+ __is_router__: bool = False
+ __router_paths__: list[str] | None = None
+ __human_feedback_config__: HumanFeedbackConfig | None = None
+
+
+def human_feedback(
+ message: str,
+ emit: Sequence[str] | None = None,
+ llm: str | BaseLLM | None = None,
+ default_outcome: str | None = None,
+ metadata: dict[str, Any] | None = None,
+ provider: HumanFeedbackProvider | None = None,
+) -> Callable[[F], F]:
+ """Decorator for Flow methods that require human feedback.
+
+ This decorator wraps a Flow method to:
+ 1. Execute the method and capture its output
+ 2. Display the output to the human with a feedback request
+ 3. Collect the human's free-form feedback
+ 4. Optionally collapse the feedback to a predefined outcome using an LLM
+ 5. Store the result for access by downstream methods
+
+ When `emit` is specified, the decorator acts as a router, and the
+ collapsed outcome triggers the appropriate @listen decorated method.
+
+ Supports both synchronous (blocking) and asynchronous (non-blocking)
+ feedback collection through the `provider` parameter. If no provider
+ is specified, defaults to synchronous console input.
+
+ Args:
+ message: The message shown to the human when requesting feedback.
+ This should clearly explain what kind of feedback is expected.
+ emit: Optional sequence of outcome strings. When provided, the
+ human's feedback will be collapsed to one of these outcomes
+ using the specified LLM. The outcome then triggers @listen
+ methods that match.
+ llm: The LLM model to use for collapsing feedback to outcomes.
+ Required when emit is specified. Can be a model string
+ like "gpt-4o-mini" or a BaseLLM instance.
+ default_outcome: The outcome to use when the human provides no
+ feedback (empty input). Must be one of the emit values
+ if emit is specified.
+ metadata: Optional metadata for enterprise integrations. This is
+ passed through to the HumanFeedbackResult and can be used
+ by enterprise forks for features like Slack/Teams integration.
+ provider: Optional HumanFeedbackProvider for custom feedback
+ collection. Use this for async workflows that integrate with
+ external systems like Slack, Teams, or webhooks. When the
+ provider raises HumanFeedbackPending, the flow pauses and
+ can be resumed later with Flow.resume().
+
+ Returns:
+ A decorator function that wraps the method with human feedback
+ collection logic.
+
+ Raises:
+ ValueError: If emit is specified but llm is not provided.
+ ValueError: If default_outcome is specified but emit is not.
+ ValueError: If default_outcome is not in the emit list.
+ HumanFeedbackPending: When an async provider pauses execution.
+
+ Example:
+ Basic feedback without routing:
+ ```python
+ @start()
+ @human_feedback(message="Please review this output:")
+ def generate_content(self):
+ return "Generated content..."
+ ```
+
+ With routing based on feedback:
+ ```python
+ @start()
+ @human_feedback(
+ message="Review and approve or reject:",
+ emit=["approved", "rejected", "needs_revision"],
+ llm="gpt-4o-mini",
+ default_outcome="needs_revision",
+ )
+ def review_document(self):
+ return document_content
+
+ @listen("approved")
+ def publish(self):
+ print(f"Publishing: {self.last_human_feedback.output}")
+ ```
+
+ Async feedback with custom provider:
+ ```python
+ @start()
+ @human_feedback(
+ message="Review this content:",
+ emit=["approved", "rejected"],
+ llm="gpt-4o-mini",
+ provider=SlackProvider(channel="#reviews"),
+ )
+ def generate_content(self):
+ return "Content to review..."
+ ```
+ """
+ # Validation at decoration time
+ if emit is not None:
+ if not llm:
+ raise ValueError(
+ "llm is required when emit is specified. "
+ "Provide an LLM model string (e.g., 'gpt-4o-mini') or a BaseLLM instance."
+ )
+ if default_outcome is not None and default_outcome not in emit:
+ raise ValueError(
+ f"default_outcome '{default_outcome}' must be one of the "
+ f"emit options: {list(emit)}"
+ )
+ elif default_outcome is not None:
+ raise ValueError("default_outcome requires emit to be specified.")
+
+ def decorator(func: F) -> F:
+ """Inner decorator that wraps the function."""
+
+ def _request_feedback(flow_instance: Flow, method_output: Any) -> str:
+ """Request feedback using provider or default console."""
+ from crewai.flow.async_feedback.types import PendingFeedbackContext
+
+ # Build context for provider
+ # Use flow_id property which handles both dict and BaseModel states
+ context = PendingFeedbackContext(
+ flow_id=flow_instance.flow_id or "unknown",
+ flow_class=f"{flow_instance.__class__.__module__}.{flow_instance.__class__.__name__}",
+ method_name=func.__name__,
+ method_output=method_output,
+ message=message,
+ emit=list(emit) if emit else None,
+ default_outcome=default_outcome,
+ metadata=metadata or {},
+ llm=llm if isinstance(llm, str) else None,
+ )
+
+ if provider is not None:
+ # Use custom provider (may raise HumanFeedbackPending)
+ return provider.request_feedback(context, flow_instance)
+ else:
+ # Use default console input
+ return flow_instance._request_human_feedback(
+ message=message,
+ output=method_output,
+ metadata=metadata,
+ emit=emit,
+ )
+
+ def _process_feedback(
+ flow_instance: Flow,
+ method_output: Any,
+ raw_feedback: str,
+ ) -> HumanFeedbackResult | str:
+ """Process feedback and return result or outcome."""
+ # Determine outcome
+ collapsed_outcome: str | None = None
+
+ if not raw_feedback.strip():
+ # Empty feedback
+ if default_outcome:
+ collapsed_outcome = default_outcome
+ elif emit:
+ # No default and no feedback - use first outcome
+ collapsed_outcome = emit[0]
+ elif emit:
+ # Collapse feedback to outcome using LLM
+ collapsed_outcome = flow_instance._collapse_to_outcome(
+ feedback=raw_feedback,
+ outcomes=emit,
+ llm=llm,
+ )
+
+ # Create result
+ result = HumanFeedbackResult(
+ output=method_output,
+ feedback=raw_feedback,
+ outcome=collapsed_outcome,
+ timestamp=datetime.now(),
+ method_name=func.__name__,
+ metadata=metadata or {},
+ )
+
+ # Store in flow instance
+ flow_instance.human_feedback_history.append(result)
+ flow_instance.last_human_feedback = result
+
+ # Return based on mode
+ if emit:
+ # Return outcome for routing
+ return collapsed_outcome # type: ignore[return-value]
+ return result
+
+ if asyncio.iscoroutinefunction(func):
+ # Async wrapper
+ @wraps(func)
+ async def async_wrapper(self: Flow, *args: Any, **kwargs: Any) -> Any:
+ # Execute the original method
+ method_output = await func(self, *args, **kwargs)
+
+ # Request human feedback (may raise HumanFeedbackPending)
+ raw_feedback = _request_feedback(self, method_output)
+
+ # Process and return
+ return _process_feedback(self, method_output, raw_feedback)
+
+ wrapper: Any = async_wrapper
+ else:
+ # Sync wrapper
+ @wraps(func)
+ def sync_wrapper(self: Flow, *args: Any, **kwargs: Any) -> Any:
+ # Execute the original method
+ method_output = func(self, *args, **kwargs)
+
+ # Request human feedback (may raise HumanFeedbackPending)
+ raw_feedback = _request_feedback(self, method_output)
+
+ # Process and return
+ return _process_feedback(self, method_output, raw_feedback)
+
+ wrapper = sync_wrapper
+
+ # Preserve existing Flow decorator attributes
+ for attr in [
+ "__is_start_method__",
+ "__trigger_methods__",
+ "__condition_type__",
+ "__trigger_condition__",
+ "__is_flow_method__",
+ ]:
+ if hasattr(func, attr):
+ setattr(wrapper, attr, getattr(func, attr))
+
+ # Add human feedback specific attributes (create config inline to avoid race conditions)
+ wrapper.__human_feedback_config__ = HumanFeedbackConfig(
+ message=message,
+ emit=emit,
+ llm=llm,
+ default_outcome=default_outcome,
+ metadata=metadata,
+ provider=provider,
+ )
+ wrapper.__is_flow_method__ = True
+
+ # Make it a router if emit specified
+ if emit:
+ wrapper.__is_router__ = True
+ wrapper.__router_paths__ = list(emit)
+
+ return wrapper # type: ignore[return-value]
+
+ return decorator
diff --git a/lib/crewai/src/crewai/flow/persistence/base.py b/lib/crewai/src/crewai/flow/persistence/base.py
index fd7b27566..a2f66c7a9 100644
--- a/lib/crewai/src/crewai/flow/persistence/base.py
+++ b/lib/crewai/src/crewai/flow/persistence/base.py
@@ -1,16 +1,26 @@
"""Base class for flow state persistence."""
+from __future__ import annotations
+
from abc import ABC, abstractmethod
-from typing import Any
+from typing import TYPE_CHECKING, Any
from pydantic import BaseModel
+if TYPE_CHECKING:
+ from crewai.flow.async_feedback.types import PendingFeedbackContext
+
class FlowPersistence(ABC):
"""Abstract base class for flow state persistence.
This class defines the interface that all persistence implementations must follow.
It supports both structured (Pydantic BaseModel) and unstructured (dict) states.
+
+ For async human feedback support, implementations can optionally override:
+ - save_pending_feedback(): Saves state with pending feedback context
+ - load_pending_feedback(): Loads state and pending feedback context
+ - clear_pending_feedback(): Clears pending feedback after resume
"""
@abstractmethod
@@ -45,3 +55,52 @@ class FlowPersistence(ABC):
Returns:
The most recent state as a dictionary, or None if no state exists
"""
+
+ def save_pending_feedback(
+ self,
+ flow_uuid: str,
+ context: PendingFeedbackContext,
+ state_data: dict[str, Any] | BaseModel,
+ ) -> None:
+ """Save state with a pending feedback marker.
+
+ This method is called when a flow is paused waiting for async human
+ feedback. The default implementation just saves the state without
+ the pending feedback context. Override to store the context.
+
+ Args:
+ flow_uuid: Unique identifier for the flow instance
+ context: The pending feedback context with all resume information
+ state_data: Current state data
+ """
+ # Default: just save the state without pending context
+ self.save_state(flow_uuid, context.method_name, state_data)
+
+ def load_pending_feedback(
+ self,
+ flow_uuid: str,
+ ) -> tuple[dict[str, Any], PendingFeedbackContext] | None:
+ """Load state and pending feedback context.
+
+ This method is called when resuming a paused flow. Override to
+ load both the state and the pending feedback context.
+
+ Args:
+ flow_uuid: Unique identifier for the flow instance
+
+ Returns:
+ Tuple of (state_data, pending_context) if pending feedback exists,
+ None otherwise.
+ """
+ return None
+
+ def clear_pending_feedback(self, flow_uuid: str) -> None: # noqa: B027
+ """Clear the pending feedback marker after successful resume.
+
+ This is called after feedback is received and the flow resumes.
+ Optional override to remove the pending feedback marker.
+
+ Args:
+ flow_uuid: Unique identifier for the flow instance
+ """
+ pass
diff --git a/lib/crewai/src/crewai/flow/persistence/sqlite.py b/lib/crewai/src/crewai/flow/persistence/sqlite.py
index a8016c606..6189e2043 100644
--- a/lib/crewai/src/crewai/flow/persistence/sqlite.py
+++ b/lib/crewai/src/crewai/flow/persistence/sqlite.py
@@ -2,17 +2,22 @@
SQLite-based implementation of flow state persistence.
"""
+from __future__ import annotations
+
from datetime import datetime, timezone
import json
from pathlib import Path
import sqlite3
-from typing import Any
+from typing import TYPE_CHECKING, Any
from pydantic import BaseModel
from crewai.flow.persistence.base import FlowPersistence
from crewai.utilities.paths import db_storage_path
+if TYPE_CHECKING:
+ from crewai.flow.async_feedback.types import PendingFeedbackContext
+
class SQLiteFlowPersistence(FlowPersistence):
"""SQLite-based implementation of flow state persistence.
@@ -20,6 +25,28 @@ class SQLiteFlowPersistence(FlowPersistence):
This class provides a simple, file-based persistence implementation using SQLite.
It's suitable for development and testing, or for production use cases with
moderate performance requirements.
+
+ This implementation supports async human feedback by storing pending feedback
+ context in a separate table. When a flow is paused waiting for feedback,
+ use save_pending_feedback() to persist the context. Later, use
+ load_pending_feedback() to retrieve it when resuming.
+
+ Example:
+ ```python
+ persistence = SQLiteFlowPersistence("flows.db")
+
+ # Start a flow with async feedback
+ try:
+ flow = MyFlow(persistence=persistence)
+ result = flow.kickoff()
+ except HumanFeedbackPending as e:
+ # Flow is paused, state is already persisted
+ print(f"Waiting for feedback: {e.context.flow_id}")
+
+ # Later, resume with feedback
+ flow = MyFlow.from_pending("abc-123", persistence)
+ result = flow.resume("looks good!")
+ ```
"""
def __init__(self, db_path: str | None = None) -> None:
@@ -45,6 +72,7 @@ class SQLiteFlowPersistence(FlowPersistence):
def init_db(self) -> None:
"""Create the necessary tables if they don't exist."""
with sqlite3.connect(self.db_path) as conn:
+ # Main state table
conn.execute(
"""
CREATE TABLE IF NOT EXISTS flow_states (
@@ -64,6 +92,26 @@ class SQLiteFlowPersistence(FlowPersistence):
"""
)
+ # Pending feedback table for async HITL
+ conn.execute(
+ """
+ CREATE TABLE IF NOT EXISTS pending_feedback (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ flow_uuid TEXT NOT NULL UNIQUE,
+ context_json TEXT NOT NULL,
+ state_json TEXT NOT NULL,
+ created_at DATETIME NOT NULL
+ )
+ """
+ )
+ # Add index for faster UUID lookups on pending feedback
+ conn.execute(
+ """
+ CREATE INDEX IF NOT EXISTS idx_pending_feedback_uuid
+ ON pending_feedback(flow_uuid)
+ """
+ )
+
def save_state(
self,
flow_uuid: str,
@@ -130,3 +178,104 @@ class SQLiteFlowPersistence(FlowPersistence):
if row:
return json.loads(row[0])
return None
+
+ def save_pending_feedback(
+ self,
+ flow_uuid: str,
+ context: PendingFeedbackContext,
+ state_data: dict[str, Any] | BaseModel,
+ ) -> None:
+ """Save state with a pending feedback marker.
+
+ This method stores both the flow state and the pending feedback context,
+ allowing the flow to be resumed later when feedback is received.
+
+ Args:
+ flow_uuid: Unique identifier for the flow instance
+ context: The pending feedback context with all resume information
+ state_data: Current state data
+ """
+ # Import here to avoid circular imports
+ from crewai.flow.async_feedback.types import PendingFeedbackContext
+
+ # Convert state_data to dict
+ if isinstance(state_data, BaseModel):
+ state_dict = state_data.model_dump()
+ elif isinstance(state_data, dict):
+ state_dict = state_data
+ else:
+ raise ValueError(
+ f"state_data must be either a Pydantic BaseModel or dict, got {type(state_data)}"
+ )
+
+ # Also save to regular state table for consistency
+ self.save_state(flow_uuid, context.method_name, state_data)
+
+ # Save pending feedback context
+ with sqlite3.connect(self.db_path) as conn:
+ # Use INSERT OR REPLACE to handle re-triggering feedback on same flow
+ conn.execute(
+ """
+ INSERT OR REPLACE INTO pending_feedback (
+ flow_uuid,
+ context_json,
+ state_json,
+ created_at
+ ) VALUES (?, ?, ?, ?)
+ """,
+ (
+ flow_uuid,
+ json.dumps(context.to_dict()),
+ json.dumps(state_dict),
+ datetime.now(timezone.utc).isoformat(),
+ ),
+ )
+
+ def load_pending_feedback(
+ self,
+ flow_uuid: str,
+ ) -> tuple[dict[str, Any], PendingFeedbackContext] | None:
+ """Load state and pending feedback context.
+
+ Args:
+ flow_uuid: Unique identifier for the flow instance
+
+ Returns:
+ Tuple of (state_data, pending_context) if pending feedback exists,
+ None otherwise.
+ """
+ # Import here to avoid circular imports
+ from crewai.flow.async_feedback.types import PendingFeedbackContext
+
+ with sqlite3.connect(self.db_path) as conn:
+ cursor = conn.execute(
+ """
+ SELECT state_json, context_json
+ FROM pending_feedback
+ WHERE flow_uuid = ?
+ """,
+ (flow_uuid,),
+ )
+ row = cursor.fetchone()
+
+ if row:
+ state_dict = json.loads(row[0])
+ context_dict = json.loads(row[1])
+ context = PendingFeedbackContext.from_dict(context_dict)
+ return (state_dict, context)
+ return None
+
+ def clear_pending_feedback(self, flow_uuid: str) -> None:
+ """Clear the pending feedback marker after successful resume.
+
+ Args:
+ flow_uuid: Unique identifier for the flow instance
+ """
+ with sqlite3.connect(self.db_path) as conn:
+ conn.execute(
+ """
+ DELETE FROM pending_feedback
+ WHERE flow_uuid = ?
+ """,
+ (flow_uuid,),
+ )
diff --git a/lib/crewai/src/crewai/translations/en.json b/lib/crewai/src/crewai/translations/en.json
index 47bec8af2..bed1407a5 100644
--- a/lib/crewai/src/crewai/translations/en.json
+++ b/lib/crewai/src/crewai/translations/en.json
@@ -29,7 +29,8 @@
"lite_agent_system_prompt_without_tools": "You are {role}. {backstory}\nYour personal goal is: {goal}\n\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
"lite_agent_response_format": "Ensure your final answer strictly adheres to the following OpenAPI schema: {response_format}\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python.",
"knowledge_search_query": "The original query is: {task_prompt}.",
- "knowledge_search_query_system_prompt": "Your goal is to rewrite the user query so that it is optimized for retrieval from a vector database. Consider how the query will be used to find relevant documents, and aim to make it more specific and context-aware. \n\n Do not include any other text than the rewritten query, especially any preamble or postamble and only add expected output format if its relevant to the rewritten query. \n\n Focus on the key words of the intended task and to retrieve the most relevant information. \n\n There will be some extra context provided that might need to be removed such as expected_output formats structured_outputs and other instructions."
+ "knowledge_search_query_system_prompt": "Your goal is to rewrite the user query so that it is optimized for retrieval from a vector database. Consider how the query will be used to find relevant documents, and aim to make it more specific and context-aware. \n\n Do not include any other text than the rewritten query, especially any preamble or postamble and only add expected output format if its relevant to the rewritten query. \n\n Focus on the key words of the intended task and to retrieve the most relevant information. \n\n There will be some extra context provided that might need to be removed such as expected_output formats structured_outputs and other instructions.",
+ "human_feedback_collapse": "Based on the following human feedback, determine which outcome best matches their intent.\n\nFeedback: {feedback}\n\nPossible outcomes: {outcomes}\n\nRespond with ONLY one of the exact outcome values listed above, nothing else."
},
"errors": {
"force_final_answer_error": "You can't keep going, here is the best final answer you generated:\n\n {formatted_answer}",
diff --git a/lib/crewai/tests/test_async_human_feedback.py b/lib/crewai/tests/test_async_human_feedback.py
new file mode 100644
index 000000000..9bb3d0045
--- /dev/null
+++ b/lib/crewai/tests/test_async_human_feedback.py
@@ -0,0 +1,1069 @@
+"""Tests for async human feedback functionality.
+
+This module tests the async/non-blocking human feedback flow, including:
+- PendingFeedbackContext creation and serialization
+- HumanFeedbackPending exception handling
+- HumanFeedbackProvider protocol
+- ConsoleProvider
+- Flow.from_pending() and Flow.resume()
+- SQLite persistence with pending feedback
+"""
+
+from __future__ import annotations
+
+import json
+import os
+import tempfile
+from datetime import datetime
+from typing import Any
+from unittest.mock import MagicMock, patch
+
+import pytest
+from pydantic import BaseModel
+
+from crewai.flow import Flow, start, listen, human_feedback
+from crewai.flow.async_feedback import (
+ ConsoleProvider,
+ HumanFeedbackPending,
+ HumanFeedbackProvider,
+ PendingFeedbackContext,
+)
+from crewai.flow.persistence import SQLiteFlowPersistence
+
+
+# =============================================================================
+# PendingFeedbackContext Tests
+# =============================================================================
+
+
+class TestPendingFeedbackContext:
+ """Tests for PendingFeedbackContext dataclass."""
+
+ def test_create_basic_context(self) -> None:
+ """Test creating a basic pending feedback context."""
+ context = PendingFeedbackContext(
+ flow_id="test-flow-123",
+ flow_class="myapp.flows.ReviewFlow",
+ method_name="review_content",
+ method_output="Content to review",
+ message="Please review this content:",
+ )
+
+ assert context.flow_id == "test-flow-123"
+ assert context.flow_class == "myapp.flows.ReviewFlow"
+ assert context.method_name == "review_content"
+ assert context.method_output == "Content to review"
+ assert context.message == "Please review this content:"
+ assert context.emit is None
+ assert context.default_outcome is None
+ assert context.metadata == {}
+ assert isinstance(context.requested_at, datetime)
+
+ def test_create_context_with_emit(self) -> None:
+ """Test creating context with routing outcomes."""
+ context = PendingFeedbackContext(
+ flow_id="test-flow-456",
+ flow_class="myapp.flows.ApprovalFlow",
+ method_name="submit_for_approval",
+ method_output={"document": "content"},
+ message="Approve or reject:",
+ emit=["approved", "rejected", "needs_revision"],
+ default_outcome="needs_revision",
+ llm="gpt-4o-mini",
+ )
+
+ assert context.emit == ["approved", "rejected", "needs_revision"]
+ assert context.default_outcome == "needs_revision"
+ assert context.llm == "gpt-4o-mini"
+
+ def test_to_dict_serialization(self) -> None:
+ """Test serializing context to dictionary."""
+ context = PendingFeedbackContext(
+ flow_id="test-flow-789",
+ flow_class="myapp.flows.TestFlow",
+ method_name="test_method",
+ method_output={"key": "value"},
+ message="Test message",
+ emit=["yes", "no"],
+ metadata={"channel": "#reviews"},
+ )
+
+ result = context.to_dict()
+
+ assert result["flow_id"] == "test-flow-789"
+ assert result["flow_class"] == "myapp.flows.TestFlow"
+ assert result["method_name"] == "test_method"
+ assert result["method_output"] == {"key": "value"}
+ assert result["message"] == "Test message"
+ assert result["emit"] == ["yes", "no"]
+ assert result["metadata"] == {"channel": "#reviews"}
+ assert "requested_at" in result
+
+ def test_from_dict_deserialization(self) -> None:
+ """Test deserializing context from dictionary."""
+ data = {
+ "flow_id": "test-flow-abc",
+ "flow_class": "myapp.flows.TestFlow",
+ "method_name": "my_method",
+ "method_output": "output value",
+ "message": "Feedback message",
+ "emit": ["option_a", "option_b"],
+ "default_outcome": "option_a",
+ "metadata": {"user_id": "123"},
+ "llm": "gpt-4o-mini",
+ "requested_at": "2024-01-15T10:30:00",
+ }
+
+ context = PendingFeedbackContext.from_dict(data)
+
+ assert context.flow_id == "test-flow-abc"
+ assert context.flow_class == "myapp.flows.TestFlow"
+ assert context.method_name == "my_method"
+ assert context.emit == ["option_a", "option_b"]
+ assert context.default_outcome == "option_a"
+ assert context.llm == "gpt-4o-mini"
+
+ def test_roundtrip_serialization(self) -> None:
+ """Test that to_dict/from_dict roundtrips correctly."""
+ original = PendingFeedbackContext(
+ flow_id="roundtrip-test",
+ flow_class="test.TestFlow",
+ method_name="test",
+ method_output={"nested": {"data": [1, 2, 3]}},
+ message="Test",
+ emit=["a", "b"],
+ metadata={"key": "value"},
+ )
+
+ serialized = original.to_dict()
+ restored = PendingFeedbackContext.from_dict(serialized)
+
+ assert restored.flow_id == original.flow_id
+ assert restored.flow_class == original.flow_class
+ assert restored.method_name == original.method_name
+ assert restored.method_output == original.method_output
+ assert restored.emit == original.emit
+ assert restored.metadata == original.metadata
+
+
+# =============================================================================
+# HumanFeedbackPending Exception Tests
+# =============================================================================
+
+
+class TestHumanFeedbackPending:
+ """Tests for HumanFeedbackPending exception."""
+
+ def test_basic_exception(self) -> None:
+ """Test creating basic pending exception."""
+ context = PendingFeedbackContext(
+ flow_id="exc-test",
+ flow_class="test.Flow",
+ method_name="method",
+ method_output="output",
+ message="message",
+ )
+
+ exc = HumanFeedbackPending(context=context)
+
+ assert exc.context == context
+ assert exc.callback_info == {}
+ assert "exc-test" in str(exc)
+ assert "method" in str(exc)
+
+ def test_exception_with_callback_info(self) -> None:
+ """Test pending exception with callback information."""
+ context = PendingFeedbackContext(
+ flow_id="callback-test",
+ flow_class="test.Flow",
+ method_name="method",
+ method_output="output",
+ message="message",
+ )
+
+ exc = HumanFeedbackPending(
+ context=context,
+ callback_info={
+ "webhook_url": "https://example.com/webhook",
+ "slack_thread": "123456",
+ },
+ )
+
+ assert exc.callback_info["webhook_url"] == "https://example.com/webhook"
+ assert exc.callback_info["slack_thread"] == "123456"
+
+ def test_exception_with_custom_message(self) -> None:
+ """Test pending exception with custom message."""
+ context = PendingFeedbackContext(
+ flow_id="msg-test",
+ flow_class="test.Flow",
+ method_name="method",
+ method_output="output",
+ message="message",
+ )
+
+ exc = HumanFeedbackPending(
+ context=context,
+ message="Custom pending message",
+ )
+
+ assert str(exc) == "Custom pending message"
+
+ def test_exception_is_catchable(self) -> None:
+ """Test that exception can be caught and handled."""
+ context = PendingFeedbackContext(
+ flow_id="catch-test",
+ flow_class="test.Flow",
+ method_name="method",
+ method_output="output",
+ message="message",
+ )
+
+ with pytest.raises(HumanFeedbackPending) as exc_info:
+ raise HumanFeedbackPending(context=context)
+
+ assert exc_info.value.context.flow_id == "catch-test"
+
+
+# =============================================================================
+# HumanFeedbackProvider Protocol Tests
+# =============================================================================
+
+
+class TestHumanFeedbackProvider:
+ """Tests for HumanFeedbackProvider protocol."""
+
+ def test_protocol_compliance_sync_provider(self) -> None:
+ """Test that sync provider complies with protocol."""
+
+ class SyncProvider:
+ def request_feedback(
+ self, context: PendingFeedbackContext, flow: Flow
+ ) -> str:
+ return "sync feedback"
+
+ provider = SyncProvider()
+ assert isinstance(provider, HumanFeedbackProvider)
+
+ def test_protocol_compliance_async_provider(self) -> None:
+ """Test that async provider complies with protocol."""
+
+ class AsyncProvider:
+ def request_feedback(
+ self, context: PendingFeedbackContext, flow: Flow
+ ) -> str:
+ raise HumanFeedbackPending(context=context)
+
+ provider = AsyncProvider()
+ assert isinstance(provider, HumanFeedbackProvider)
+
+
+# =============================================================================
+# ConsoleProvider Tests
+# =============================================================================
+
+
+class TestConsoleProvider:
+ """Tests for ConsoleProvider."""
+
+ def test_provider_initialization(self) -> None:
+ """Test console provider initialization."""
+ provider = ConsoleProvider()
+ assert provider.verbose is True
+
+ quiet_provider = ConsoleProvider(verbose=False)
+ assert quiet_provider.verbose is False
+
+
+
+# =============================================================================
+# SQLite Persistence Tests for Async Feedback
+# =============================================================================
+
+
+class TestSQLitePendingFeedback:
+ """Tests for SQLite persistence with pending feedback."""
+
+ def test_save_and_load_pending_feedback(self) -> None:
+ """Test saving and loading pending feedback context."""
+ with tempfile.TemporaryDirectory() as tmpdir:
+ db_path = os.path.join(tmpdir, "test_flows.db")
+ persistence = SQLiteFlowPersistence(db_path)
+
+ context = PendingFeedbackContext(
+ flow_id="persist-test-123",
+ flow_class="test.TestFlow",
+ method_name="review",
+ method_output={"data": "test"},
+ message="Review this:",
+ emit=["approved", "rejected"],
+ llm="gpt-4o-mini",
+ )
+
+ state_data = {"counter": 10, "items": ["a", "b"]}
+
+ # Save pending feedback
+ persistence.save_pending_feedback(
+ flow_uuid="persist-test-123",
+ context=context,
+ state_data=state_data,
+ )
+
+ # Load pending feedback
+ result = persistence.load_pending_feedback("persist-test-123")
+
+ assert result is not None
+ loaded_state, loaded_context = result
+ assert loaded_state["counter"] == 10
+ assert loaded_state["items"] == ["a", "b"]
+ assert loaded_context.flow_id == "persist-test-123"
+ assert loaded_context.emit == ["approved", "rejected"]
+
+ def test_load_nonexistent_pending_feedback(self) -> None:
+ """Test loading pending feedback that doesn't exist."""
+ with tempfile.TemporaryDirectory() as tmpdir:
+ db_path = os.path.join(tmpdir, "test_flows.db")
+ persistence = SQLiteFlowPersistence(db_path)
+
+ result = persistence.load_pending_feedback("nonexistent-id")
+ assert result is None
+
+ def test_clear_pending_feedback(self) -> None:
+ """Test clearing pending feedback after resume."""
+ with tempfile.TemporaryDirectory() as tmpdir:
+ db_path = os.path.join(tmpdir, "test_flows.db")
+ persistence = SQLiteFlowPersistence(db_path)
+
+ context = PendingFeedbackContext(
+ flow_id="clear-test",
+ flow_class="test.Flow",
+ method_name="method",
+ method_output="output",
+ message="message",
+ )
+
+ persistence.save_pending_feedback(
+ flow_uuid="clear-test",
+ context=context,
+ state_data={"key": "value"},
+ )
+
+ # Verify it exists
+ assert persistence.load_pending_feedback("clear-test") is not None
+
+ # Clear it
+ persistence.clear_pending_feedback("clear-test")
+
+ # Verify it's gone
+ assert persistence.load_pending_feedback("clear-test") is None
+
+ def test_replace_existing_pending_feedback(self) -> None:
+ """Test that saving pending feedback replaces existing entry."""
+ with tempfile.TemporaryDirectory() as tmpdir:
+ db_path = os.path.join(tmpdir, "test_flows.db")
+ persistence = SQLiteFlowPersistence(db_path)
+
+ flow_id = "replace-test"
+
+ # Save first version
+ context1 = PendingFeedbackContext(
+ flow_id=flow_id,
+ flow_class="test.Flow",
+ method_name="method1",
+ method_output="output1",
+ message="message1",
+ )
+ persistence.save_pending_feedback(
+ flow_uuid=flow_id,
+ context=context1,
+ state_data={"version": 1},
+ )
+
+ # Save second version (should replace)
+ context2 = PendingFeedbackContext(
+ flow_id=flow_id,
+ flow_class="test.Flow",
+ method_name="method2",
+ method_output="output2",
+ message="message2",
+ )
+ persistence.save_pending_feedback(
+ flow_uuid=flow_id,
+ context=context2,
+ state_data={"version": 2},
+ )
+
+ # Load and verify it's the second version
+ result = persistence.load_pending_feedback(flow_id)
+ assert result is not None
+ state, context = result
+ assert state["version"] == 2
+ assert context.method_name == "method2"
+
+
+# =============================================================================
+# Custom Async Provider Tests
+# =============================================================================
+
+
+class TestCustomAsyncProvider:
+ """Tests for custom async providers."""
+
+ def test_provider_raises_pending_exception(self) -> None:
+ """Test that async provider raises HumanFeedbackPending."""
+
+ class WebhookProvider:
+ def __init__(self, webhook_url: str):
+ self.webhook_url = webhook_url
+
+ def request_feedback(
+ self, context: PendingFeedbackContext, flow: Flow
+ ) -> str:
+ raise HumanFeedbackPending(
+ context=context,
+ callback_info={"url": f"{self.webhook_url}/{context.flow_id}"},
+ )
+
+ provider = WebhookProvider("https://example.com/api")
+ context = PendingFeedbackContext(
+ flow_id="webhook-test",
+ flow_class="test.Flow",
+ method_name="method",
+ method_output="output",
+ message="message",
+ )
+ mock_flow = MagicMock()
+
+ with pytest.raises(HumanFeedbackPending) as exc_info:
+ provider.request_feedback(context, mock_flow)
+
+ assert exc_info.value.callback_info["url"] == (
+ "https://example.com/api/webhook-test"
+ )
+
+
+# =============================================================================
+# Flow.from_pending and resume Tests
+# =============================================================================
+
+
+class TestFlowResumeWithFeedback:
+ """Tests for Flow.from_pending and resume."""
+
+ def test_from_pending_uses_default_persistence(self) -> None:
+ """Test that from_pending uses SQLiteFlowPersistence by default."""
+
+ class TestFlow(Flow):
+ @start()
+ def begin(self):
+ return "started"
+
+ # When no persistence is provided, it uses default SQLiteFlowPersistence
+ # This will raise "No pending feedback found" (not a persistence error)
+ with pytest.raises(ValueError, match="No pending feedback found"):
+ TestFlow.from_pending("nonexistent-id")
+
+ def test_from_pending_raises_for_missing_flow(self) -> None:
+ """Test that from_pending raises error for nonexistent flow."""
+ with tempfile.TemporaryDirectory() as tmpdir:
+ db_path = os.path.join(tmpdir, "test_flows.db")
+ persistence = SQLiteFlowPersistence(db_path)
+
+ class TestFlow(Flow):
+ @start()
+ def begin(self):
+ return "started"
+
+ with pytest.raises(ValueError, match="No pending feedback found"):
+ TestFlow.from_pending("nonexistent-id", persistence)
+
+ def test_from_pending_restores_state(self) -> None:
+ """Test that from_pending correctly restores flow state."""
+ with tempfile.TemporaryDirectory() as tmpdir:
+ db_path = os.path.join(tmpdir, "test_flows.db")
+ persistence = SQLiteFlowPersistence(db_path)
+
+ class TestState(BaseModel):
+ id: str = "test-restore-123"
+ counter: int = 0
+
+ class TestFlow(Flow[TestState]):
+ @start()
+ def begin(self):
+ return "started"
+
+ # Manually save pending feedback
+ context = PendingFeedbackContext(
+ flow_id="test-restore-123",
+ flow_class="test.TestFlow",
+ method_name="review",
+ method_output="content",
+ message="Review:",
+ )
+ persistence.save_pending_feedback(
+ flow_uuid="test-restore-123",
+ context=context,
+ state_data={"id": "test-restore-123", "counter": 42},
+ )
+
+ # Restore flow
+ flow = TestFlow.from_pending("test-restore-123", persistence)
+
+ assert flow._pending_feedback_context is not None
+ assert flow._pending_feedback_context.flow_id == "test-restore-123"
+ assert flow._is_execution_resuming is True
+ assert flow.state.counter == 42
+
+ def test_resume_without_pending_raises_error(self) -> None:
+ """Test that resume raises error without pending context."""
+
+ class TestFlow(Flow):
+ @start()
+ def begin(self):
+ return "started"
+
+ flow = TestFlow()
+
+ with pytest.raises(ValueError, match="No pending feedback context"):
+ flow.resume("some feedback")
+
+ def test_resume_from_async_context_raises_error(self) -> None:
+ """Test that resume() raises RuntimeError when called from async context."""
+ import asyncio
+
+ class TestFlow(Flow):
+ @start()
+ def begin(self):
+ return "started"
+
+ async def call_resume_from_async():
+ with tempfile.TemporaryDirectory() as tmpdir:
+ db_path = os.path.join(tmpdir, "test.db")
+ persistence = SQLiteFlowPersistence(db_path)
+
+ # Save pending feedback
+ context = PendingFeedbackContext(
+ flow_id="async-context-test",
+ flow_class="TestFlow",
+ method_name="begin",
+ method_output="output",
+ message="Review:",
+ )
+ persistence.save_pending_feedback(
+ flow_uuid="async-context-test",
+ context=context,
+ state_data={"id": "async-context-test"},
+ )
+
+ flow = TestFlow.from_pending("async-context-test", persistence)
+
+ # This should raise RuntimeError because we're in an async context
+ with pytest.raises(RuntimeError, match="cannot be called from within an async context"):
+ flow.resume("feedback")
+
+ asyncio.run(call_resume_from_async())
+
+ @pytest.mark.asyncio
+ async def test_resume_async_direct(self) -> None:
+ """Test resume_async() can be called directly in async context."""
+ with tempfile.TemporaryDirectory() as tmpdir:
+ db_path = os.path.join(tmpdir, "test.db")
+ persistence = SQLiteFlowPersistence(db_path)
+
+ class TestFlow(Flow):
+ @start()
+ @human_feedback(message="Review:")
+ def generate(self):
+ return "content"
+
+ @listen(generate)
+ def process(self, result):
+ return f"processed: {result.feedback}"
+
+ # Save pending feedback
+ context = PendingFeedbackContext(
+ flow_id="async-direct-test",
+ flow_class="TestFlow",
+ method_name="generate",
+ method_output="content",
+ message="Review:",
+ )
+ persistence.save_pending_feedback(
+ flow_uuid="async-direct-test",
+ context=context,
+ state_data={"id": "async-direct-test"},
+ )
+
+ flow = TestFlow.from_pending("async-direct-test", persistence)
+
+ with patch("crewai.flow.flow.crewai_event_bus.emit"):
+ result = await flow.resume_async("async feedback")
+
+ assert flow.last_human_feedback is not None
+ assert flow.last_human_feedback.feedback == "async feedback"
+
+ @patch("crewai.flow.flow.crewai_event_bus.emit")
+ def test_resume_basic(self, mock_emit: MagicMock) -> None:
+ """Test basic resume functionality."""
+ with tempfile.TemporaryDirectory() as tmpdir:
+ db_path = os.path.join(tmpdir, "test_flows.db")
+ persistence = SQLiteFlowPersistence(db_path)
+
+ class TestFlow(Flow):
+ @start()
+ @human_feedback(message="Review this:")
+ def generate(self):
+ return "generated content"
+
+ @listen(generate)
+ def process(self, feedback_result):
+ return f"Processed: {feedback_result.feedback}"
+
+ # Manually save pending feedback (simulating async pause)
+ context = PendingFeedbackContext(
+ flow_id="resume-test-123",
+ flow_class="test.TestFlow",
+ method_name="generate",
+ method_output="generated content",
+ message="Review this:",
+ )
+ persistence.save_pending_feedback(
+ flow_uuid="resume-test-123",
+ context=context,
+ state_data={"id": "resume-test-123"},
+ )
+
+ # Restore and resume
+ flow = TestFlow.from_pending("resume-test-123", persistence)
+ result = flow.resume("looks good!")
+
+ # Verify feedback was processed
+ assert flow.last_human_feedback is not None
+ assert flow.last_human_feedback.feedback == "looks good!"
+ assert flow.last_human_feedback.output == "generated content"
+
+ # Verify pending feedback was cleared
+ assert persistence.load_pending_feedback("resume-test-123") is None
+
+ @patch("crewai.flow.flow.crewai_event_bus.emit")
+ def test_resume_routing(self, mock_emit: MagicMock) -> None:
+ """Test resume with routing."""
+ with tempfile.TemporaryDirectory() as tmpdir:
+ db_path = os.path.join(tmpdir, "test_flows.db")
+ persistence = SQLiteFlowPersistence(db_path)
+
+ class TestFlow(Flow):
+ result_path: str = ""
+
+ @start()
+ @human_feedback(
+ message="Approve?",
+ emit=["approved", "rejected"],
+ llm="gpt-4o-mini",
+ )
+ def review(self):
+ return "content"
+
+ @listen("approved")
+ def handle_approved(self):
+ self.result_path = "approved"
+ return "Approved!"
+
+ @listen("rejected")
+ def handle_rejected(self):
+ self.result_path = "rejected"
+ return "Rejected!"
+
+ # Save pending feedback
+ context = PendingFeedbackContext(
+ flow_id="route-test-123",
+ flow_class="test.TestFlow",
+ method_name="review",
+ method_output="content",
+ message="Approve?",
+ emit=["approved", "rejected"],
+ llm="gpt-4o-mini",
+ )
+ persistence.save_pending_feedback(
+ flow_uuid="route-test-123",
+ context=context,
+ state_data={"id": "route-test-123"},
+ )
+
+ # Restore and resume - mock _collapse_to_outcome directly
+ flow = TestFlow.from_pending("route-test-123", persistence)
+
+ with patch.object(flow, "_collapse_to_outcome", return_value="approved"):
+ result = flow.resume("yes, this looks great")
+
+ # Verify routing worked
+ assert flow.last_human_feedback.outcome == "approved"
+ assert flow.result_path == "approved"
+
+
+# =============================================================================
+# Integration Tests with @human_feedback decorator
+# =============================================================================
+
+
+class TestAsyncHumanFeedbackIntegration:
+ """Integration tests for async human feedback with decorator."""
+
+ def test_decorator_with_provider_parameter(self) -> None:
+ """Test that decorator accepts provider parameter."""
+
+ class MockProvider:
+ def request_feedback(
+ self, context: PendingFeedbackContext, flow: Flow
+ ) -> str:
+ raise HumanFeedbackPending(context=context)
+
+ # This should not raise
+ class TestFlow(Flow):
+ @start()
+ @human_feedback(
+ message="Review:",
+ provider=MockProvider(),
+ )
+ def review(self):
+ return "content"
+
+ flow = TestFlow()
+ # Verify the method has the provider config
+ method = getattr(flow, "review")
+ assert hasattr(method, "__human_feedback_config__")
+ assert method.__human_feedback_config__.provider is not None
+
+ @patch("crewai.flow.flow.crewai_event_bus.emit")
+ def test_async_provider_pauses_flow(self, mock_emit: MagicMock) -> None:
+ """Test that async provider pauses flow execution."""
+ with tempfile.TemporaryDirectory() as tmpdir:
+ db_path = os.path.join(tmpdir, "test_flows.db")
+ persistence = SQLiteFlowPersistence(db_path)
+
+ class PausingProvider:
+ def __init__(self, persistence: SQLiteFlowPersistence):
+ self.persistence = persistence
+
+ def request_feedback(
+ self, context: PendingFeedbackContext, flow: Flow
+ ) -> str:
+ # Save pending state
+ self.persistence.save_pending_feedback(
+ flow_uuid=context.flow_id,
+ context=context,
+ state_data=flow.state if isinstance(flow.state, dict) else flow.state.model_dump(),
+ )
+ raise HumanFeedbackPending(
+ context=context,
+ callback_info={"saved": True},
+ )
+
+ class TestFlow(Flow):
+ @start()
+ @human_feedback(
+ message="Review:",
+ provider=PausingProvider(persistence),
+ )
+ def generate(self):
+ return "generated content"
+
+ flow = TestFlow(persistence=persistence)
+
+ # kickoff now returns HumanFeedbackPending instead of raising it
+ result = flow.kickoff()
+
+ assert isinstance(result, HumanFeedbackPending)
+ assert result.callback_info["saved"] is True
+
+ # Get flow ID from the returned pending context
+ flow_id = result.context.flow_id
+
+ # Verify state was persisted
+ persisted = persistence.load_pending_feedback(flow_id)
+ assert persisted is not None
+
+ @patch("crewai.flow.flow.crewai_event_bus.emit")
+ def test_full_async_flow_cycle(self, mock_emit: MagicMock) -> None:
+ """Test complete async flow: start -> pause -> resume."""
+ with tempfile.TemporaryDirectory() as tmpdir:
+ db_path = os.path.join(tmpdir, "test_flows.db")
+ persistence = SQLiteFlowPersistence(db_path)
+
+ flow_id_holder: list[str] = []
+
+ class SaveAndPauseProvider:
+ def __init__(self, persistence: SQLiteFlowPersistence):
+ self.persistence = persistence
+
+ def request_feedback(
+ self, context: PendingFeedbackContext, flow: Flow
+ ) -> str:
+ flow_id_holder.append(context.flow_id)
+ self.persistence.save_pending_feedback(
+ flow_uuid=context.flow_id,
+ context=context,
+ state_data=flow.state if isinstance(flow.state, dict) else flow.state.model_dump(),
+ )
+ raise HumanFeedbackPending(context=context)
+
+ class ReviewFlow(Flow):
+ processed_feedback: str = ""
+
+ @start()
+ @human_feedback(
+ message="Review this content:",
+ provider=SaveAndPauseProvider(persistence),
+ )
+ def generate(self):
+ return "AI generated content"
+
+ @listen(generate)
+ def process(self, feedback_result):
+ self.processed_feedback = feedback_result.feedback
+ return f"Final: {feedback_result.feedback}"
+
+ # Phase 1: Start flow (should pause)
+ flow1 = ReviewFlow(persistence=persistence)
+ result = flow1.kickoff()
+
+ # kickoff now returns HumanFeedbackPending instead of raising it
+ assert isinstance(result, HumanFeedbackPending)
+ assert len(flow_id_holder) == 1
+ paused_flow_id = flow_id_holder[0]
+
+ # Phase 2: Resume flow
+ flow2 = ReviewFlow.from_pending(paused_flow_id, persistence)
+ result = flow2.resume("This is my feedback")
+
+ # Verify feedback was processed
+ assert flow2.last_human_feedback.feedback == "This is my feedback"
+ assert flow2.processed_feedback == "This is my feedback"
+
+
+# =============================================================================
+# Edge Case Tests
+# =============================================================================
+
+
+class TestAutoPersistence:
+ """Tests for automatic persistence when no persistence is provided."""
+
+ @patch("crewai.flow.flow.crewai_event_bus.emit")
+ def test_auto_persistence_when_none_provided(self, mock_emit: MagicMock) -> None:
+ """Test that persistence is auto-created when HumanFeedbackPending is raised."""
+
+ class PausingProvider:
+ def request_feedback(
+ self, context: PendingFeedbackContext, flow: Flow
+ ) -> str:
+ raise HumanFeedbackPending(
+ context=context,
+ callback_info={"paused": True},
+ )
+
+ class TestFlow(Flow):
+ @start()
+ @human_feedback(
+ message="Review:",
+ provider=PausingProvider(),
+ )
+ def generate(self):
+ return "content"
+
+ # Create flow WITHOUT persistence
+ flow = TestFlow()
+ assert flow._persistence is None # No persistence initially
+
+ # kickoff should auto-create persistence when HumanFeedbackPending is raised
+ result = flow.kickoff()
+
+ # Should return HumanFeedbackPending (not raise it)
+ assert isinstance(result, HumanFeedbackPending)
+
+ # Persistence should have been auto-created
+ assert flow._persistence is not None
+
+ # The pending feedback should be saved
+ flow_id = result.context.flow_id
+ loaded = flow._persistence.load_pending_feedback(flow_id)
+ assert loaded is not None
+
+
+class TestCollapseToOutcomeJsonParsing:
+ """Tests for _collapse_to_outcome JSON parsing edge cases."""
+
+ def test_json_string_response_is_parsed(self) -> None:
+ """Test that JSON string response from LLM is correctly parsed."""
+ flow = Flow()
+
+ with patch("crewai.llm.LLM") as MockLLM:
+ mock_llm = MagicMock()
+ # Simulate LLM returning JSON string (the bug we fixed)
+ mock_llm.call.return_value = '{"outcome": "approved"}'
+ MockLLM.return_value = mock_llm
+
+ result = flow._collapse_to_outcome(
+ feedback="I approve this",
+ outcomes=["approved", "rejected"],
+ llm="gpt-4o-mini",
+ )
+
+ assert result == "approved"
+
+ def test_plain_string_response_is_matched(self) -> None:
+ """Test that plain string response is correctly matched."""
+ flow = Flow()
+
+ with patch("crewai.llm.LLM") as MockLLM:
+ mock_llm = MagicMock()
+ # Simulate LLM returning plain outcome string
+ mock_llm.call.return_value = "rejected"
+ MockLLM.return_value = mock_llm
+
+ result = flow._collapse_to_outcome(
+ feedback="This is not good",
+ outcomes=["approved", "rejected"],
+ llm="gpt-4o-mini",
+ )
+
+ assert result == "rejected"
+
+ def test_invalid_json_falls_back_to_matching(self) -> None:
+ """Test that invalid JSON falls back to string matching."""
+ flow = Flow()
+
+ with patch("crewai.llm.LLM") as MockLLM:
+ mock_llm = MagicMock()
+ # Invalid JSON that contains "approved"
+ mock_llm.call.return_value = "{invalid json but says approved"
+ MockLLM.return_value = mock_llm
+
+ result = flow._collapse_to_outcome(
+ feedback="looks good",
+ outcomes=["approved", "rejected"],
+ llm="gpt-4o-mini",
+ )
+
+ assert result == "approved"
+
+ def test_llm_exception_falls_back_to_simple_prompting(self) -> None:
+ """Test that LLM exception triggers fallback to simple prompting."""
+ flow = Flow()
+
+ with patch("crewai.llm.LLM") as MockLLM:
+ mock_llm = MagicMock()
+ # First call raises, second call succeeds (fallback)
+ mock_llm.call.side_effect = [
+ Exception("Structured output failed"),
+ "approved",
+ ]
+ MockLLM.return_value = mock_llm
+
+ result = flow._collapse_to_outcome(
+ feedback="I approve",
+ outcomes=["approved", "rejected"],
+ llm="gpt-4o-mini",
+ )
+
+ assert result == "approved"
+ # Verify it was called twice (initial + fallback)
+ assert mock_llm.call.call_count == 2
+
+
+class TestAsyncHumanFeedbackEdgeCases:
+ """Edge case tests for async human feedback."""
+
+ def test_pending_context_with_complex_output(self) -> None:
+ """Test context with complex nested output."""
+ complex_output = {
+ "items": [{"id": 1, "name": "Item 1"}, {"id": 2, "name": "Item 2"}],
+ "metadata": {"total": 2, "page": 1},
+ "nested": {"deep": {"value": "test"}},
+ }
+
+ context = PendingFeedbackContext(
+ flow_id="complex-test",
+ flow_class="test.Flow",
+ method_name="method",
+ method_output=complex_output,
+ message="Review:",
+ )
+
+ # Serialize and deserialize
+ serialized = context.to_dict()
+ json_str = json.dumps(serialized) # Should be JSON serializable
+ restored = PendingFeedbackContext.from_dict(json.loads(json_str))
+
+ assert restored.method_output == complex_output
+
+ def test_empty_feedback_uses_default_outcome(self) -> None:
+ """Test that empty feedback uses default outcome during resume."""
+ with tempfile.TemporaryDirectory() as tmpdir:
+ db_path = os.path.join(tmpdir, "test_flows.db")
+ persistence = SQLiteFlowPersistence(db_path)
+
+ class TestFlow(Flow):
+ @start()
+ def generate(self):
+ return "content"
+
+ # Save pending feedback with default_outcome
+ context = PendingFeedbackContext(
+ flow_id="default-test",
+ flow_class="test.Flow",
+ method_name="generate",
+ method_output="content",
+ message="Review:",
+ emit=["approved", "rejected"],
+ default_outcome="approved",
+ llm="gpt-4o-mini",
+ )
+ persistence.save_pending_feedback(
+ flow_uuid="default-test",
+ context=context,
+ state_data={"id": "default-test"},
+ )
+
+ flow = TestFlow.from_pending("default-test", persistence)
+
+ with patch("crewai.flow.flow.crewai_event_bus.emit"):
+ result = flow.resume("") # Empty feedback
+
+ assert flow.last_human_feedback.outcome == "approved"
+
+ def test_resume_without_feedback_uses_default(self) -> None:
+ """Test that resume() can be called without feedback argument."""
+ with tempfile.TemporaryDirectory() as tmpdir:
+ db_path = os.path.join(tmpdir, "test.db")
+ persistence = SQLiteFlowPersistence(db_path)
+
+ class TestFlow(Flow):
+ @start()
+ def step(self):
+ return "output"
+
+ context = PendingFeedbackContext(
+ flow_id="no-feedback-test",
+ flow_class="TestFlow",
+ method_name="step",
+ method_output="test output",
+ message="Review:",
+ emit=["approved", "rejected"],
+ default_outcome="approved",
+ llm="gpt-4o-mini",
+ )
+ persistence.save_pending_feedback(
+ flow_uuid="no-feedback-test",
+ context=context,
+ state_data={"id": "no-feedback-test"},
+ )
+
+ flow = TestFlow.from_pending("no-feedback-test", persistence)
+
+ with patch("crewai.flow.flow.crewai_event_bus.emit"):
+ # Call resume() with no arguments - should use default
+ result = flow.resume()
+
+ assert flow.last_human_feedback.outcome == "approved"
+ assert flow.last_human_feedback.feedback == ""
diff --git a/lib/crewai/tests/test_human_feedback_decorator.py b/lib/crewai/tests/test_human_feedback_decorator.py
new file mode 100644
index 000000000..0ae6adbbe
--- /dev/null
+++ b/lib/crewai/tests/test_human_feedback_decorator.py
@@ -0,0 +1,401 @@
+"""Unit tests for the @human_feedback decorator.
+
+This module tests the @human_feedback decorator's validation logic,
+async support, and attribute preservation functionality.
+"""
+
+from __future__ import annotations
+
+import asyncio
+from datetime import datetime
+from typing import Any
+from unittest.mock import MagicMock, patch
+
+import pytest
+
+from crewai.flow import Flow, human_feedback, listen, start
+from crewai.flow.human_feedback import (
+ HumanFeedbackConfig,
+ HumanFeedbackResult,
+)
+
+
+class TestHumanFeedbackValidation:
+ """Tests for decorator parameter validation."""
+
+ def test_emit_requires_llm(self):
+ """Test that specifying emit without llm raises ValueError."""
+ with pytest.raises(ValueError) as exc_info:
+
+ @human_feedback(
+ message="Review this:",
+ emit=["approve", "reject"],
+ # llm not provided
+ )
+ def test_method(self):
+ return "output"
+
+ assert "llm is required" in str(exc_info.value)
+
+ def test_default_outcome_requires_emit(self):
+ """Test that specifying default_outcome without emit raises ValueError."""
+ with pytest.raises(ValueError) as exc_info:
+
+ @human_feedback(
+ message="Review this:",
+ default_outcome="approve",
+ # emit not provided
+ )
+ def test_method(self):
+ return "output"
+
+ assert "requires emit" in str(exc_info.value)
+
+ def test_default_outcome_must_be_in_emit(self):
+ """Test that default_outcome must be one of the emit values."""
+ with pytest.raises(ValueError) as exc_info:
+
+ @human_feedback(
+ message="Review this:",
+ emit=["approve", "reject"],
+ llm="gpt-4o-mini",
+ default_outcome="invalid_outcome",
+ )
+ def test_method(self):
+ return "output"
+
+ assert "must be one of" in str(exc_info.value)
+
+ def test_valid_configuration_with_routing(self):
+ """Test that valid configuration with routing doesn't raise."""
+
+ @human_feedback(
+ message="Review this:",
+ emit=["approve", "reject"],
+ llm="gpt-4o-mini",
+ default_outcome="reject",
+ )
+ def test_method(self):
+ return "output"
+
+ # Should not raise
+ assert hasattr(test_method, "__human_feedback_config__")
+ assert test_method.__is_router__ is True
+ assert test_method.__router_paths__ == ["approve", "reject"]
+
+ def test_valid_configuration_without_routing(self):
+ """Test that valid configuration without routing doesn't raise."""
+
+ @human_feedback(message="Review this:")
+ def test_method(self):
+ return "output"
+
+ # Should not raise
+ assert hasattr(test_method, "__human_feedback_config__")
+ assert not hasattr(test_method, "__is_router__") or not test_method.__is_router__
+
+
+class TestHumanFeedbackConfig:
+ """Tests for HumanFeedbackConfig dataclass."""
+
+ def test_config_creation(self):
+ """Test HumanFeedbackConfig can be created with all parameters."""
+ config = HumanFeedbackConfig(
+ message="Test message",
+ emit=["a", "b"],
+ llm="gpt-4",
+ default_outcome="a",
+ metadata={"key": "value"},
+ )
+
+ assert config.message == "Test message"
+ assert config.emit == ["a", "b"]
+ assert config.llm == "gpt-4"
+ assert config.default_outcome == "a"
+ assert config.metadata == {"key": "value"}
+
+
+class TestHumanFeedbackResult:
+ """Tests for HumanFeedbackResult dataclass."""
+
+ def test_result_creation(self):
+ """Test HumanFeedbackResult can be created with all fields."""
+ result = HumanFeedbackResult(
+ output={"title": "Test"},
+ feedback="Looks good",
+ outcome="approved",
+ method_name="test_method",
+ )
+
+ assert result.output == {"title": "Test"}
+ assert result.feedback == "Looks good"
+ assert result.outcome == "approved"
+ assert result.method_name == "test_method"
+ assert isinstance(result.timestamp, datetime)
+ assert result.metadata == {}
+
+ def test_result_with_metadata(self):
+ """Test HumanFeedbackResult with custom metadata."""
+ result = HumanFeedbackResult(
+ output="test",
+ feedback="feedback",
+ metadata={"channel": "slack", "user": "test_user"},
+ )
+
+ assert result.metadata == {"channel": "slack", "user": "test_user"}
+
+
+class TestDecoratorAttributePreservation:
+ """Tests for preserving Flow decorator attributes."""
+
+ def test_preserves_start_method_attributes(self):
+ """Test that @human_feedback preserves @start decorator attributes."""
+
+ class TestFlow(Flow):
+ @start()
+ @human_feedback(message="Review:")
+ def my_start_method(self):
+ return "output"
+
+ # Check that start method attributes are preserved
+ flow = TestFlow()
+ method = flow._methods.get("my_start_method")
+ assert method is not None
+ assert hasattr(method, "__is_start_method__") or "my_start_method" in flow._start_methods
+
+ def test_preserves_listen_method_attributes(self):
+ """Test that @human_feedback preserves @listen decorator attributes."""
+
+ class TestFlow(Flow):
+ @start()
+ def begin(self):
+ return "start"
+
+ @listen("begin")
+ @human_feedback(message="Review:")
+ def review(self):
+ return "review output"
+
+ flow = TestFlow()
+ # The method should be registered as a listener
+ assert "review" in flow._listeners or any(
+ "review" in str(v) for v in flow._listeners.values()
+ )
+
+ def test_sets_router_attributes_when_emit_specified(self):
+ """Test that router attributes are set when emit is specified."""
+
+ # Test the decorator directly without @start wrapping
+ @human_feedback(
+ message="Review:",
+ emit=["approved", "rejected"],
+ llm="gpt-4o-mini",
+ )
+ def review_method(self):
+ return "output"
+
+ assert review_method.__is_router__ is True
+ assert review_method.__router_paths__ == ["approved", "rejected"]
+
+
+class TestAsyncSupport:
+ """Tests for async method support."""
+
+ def test_async_method_detection(self):
+ """Test that async methods are properly detected and wrapped."""
+
+ @human_feedback(message="Review:")
+ async def async_method(self):
+ return "async output"
+
+ assert asyncio.iscoroutinefunction(async_method)
+
+ def test_sync_method_remains_sync(self):
+ """Test that sync methods remain synchronous."""
+
+ @human_feedback(message="Review:")
+ def sync_method(self):
+ return "sync output"
+
+ assert not asyncio.iscoroutinefunction(sync_method)
+
+
+class TestHumanFeedbackExecution:
+ """Tests for actual human feedback execution."""
+
+ @patch("builtins.input", return_value="This looks great!")
+ @patch("builtins.print")
+ def test_basic_feedback_collection(self, mock_print, mock_input):
+ """Test basic feedback collection without routing."""
+
+ class TestFlow(Flow):
+ @start()
+ @human_feedback(message="Please review:")
+ def generate(self):
+ return "Generated content"
+
+ flow = TestFlow()
+
+ with patch.object(flow, "_request_human_feedback", return_value="Great job!"):
+ result = flow.kickoff()
+
+ assert flow.last_human_feedback is not None
+ assert flow.last_human_feedback.output == "Generated content"
+ assert flow.last_human_feedback.feedback == "Great job!"
+
+ @patch("builtins.input", return_value="")
+ @patch("builtins.print")
+ def test_empty_feedback_with_default_outcome(self, mock_print, mock_input):
+ """Test empty feedback uses default_outcome."""
+
+ class TestFlow(Flow):
+ @start()
+ @human_feedback(
+ message="Review:",
+ emit=["approved", "needs_work"],
+ llm="gpt-4o-mini",
+ default_outcome="needs_work",
+ )
+ def review(self):
+ return "Content"
+
+ flow = TestFlow()
+
+ with patch.object(flow, "_request_human_feedback", return_value=""):
+ result = flow.kickoff()
+
+ assert result == "needs_work"
+ assert flow.last_human_feedback is not None
+ assert flow.last_human_feedback.outcome == "needs_work"
+
+ @patch("builtins.input", return_value="Approved!")
+ @patch("builtins.print")
+ def test_feedback_collapsing(self, mock_print, mock_input):
+ """Test that feedback is collapsed to an outcome."""
+
+ class TestFlow(Flow):
+ @start()
+ @human_feedback(
+ message="Review:",
+ emit=["approved", "rejected"],
+ llm="gpt-4o-mini",
+ )
+ def review(self):
+ return "Content"
+
+ flow = TestFlow()
+
+ with (
+ patch.object(flow, "_request_human_feedback", return_value="Looks great, approved!"),
+ patch.object(flow, "_collapse_to_outcome", return_value="approved"),
+ ):
+ result = flow.kickoff()
+
+ assert result == "approved"
+ assert flow.last_human_feedback is not None
+ assert flow.last_human_feedback.outcome == "approved"
+
+
+class TestHumanFeedbackHistory:
+ """Tests for human feedback history tracking."""
+
+ @patch("builtins.input", return_value="feedback")
+ @patch("builtins.print")
+ def test_history_accumulates(self, mock_print, mock_input):
+ """Test that multiple feedbacks are stored in history."""
+
+ class TestFlow(Flow):
+ @start()
+ @human_feedback(message="Review step 1:")
+ def step1(self):
+ return "Step 1 output"
+
+ @listen(step1)
+ @human_feedback(message="Review step 2:")
+ def step2(self, prev):
+ return "Step 2 output"
+
+ flow = TestFlow()
+
+ with patch.object(flow, "_request_human_feedback", return_value="feedback"):
+ flow.kickoff()
+
+ # Both feedbacks should be in history
+ assert len(flow.human_feedback_history) == 2
+ assert flow.human_feedback_history[0].method_name == "step1"
+ assert flow.human_feedback_history[1].method_name == "step2"
+
+ @patch("builtins.input", return_value="")
+ @patch("builtins.print")
+ def test_human_feedback_property_returns_last(self, mock_print, mock_input):
+ """Test that human_feedback property returns the last result."""
+
+ class TestFlow(Flow):
+ @start()
+ @human_feedback(message="Review:")
+ def generate(self):
+ return "output"
+
+ flow = TestFlow()
+
+ with patch.object(flow, "_request_human_feedback", return_value="last feedback"):
+ flow.kickoff()
+
+ assert flow.last_human_feedback is not None
+ assert flow.last_human_feedback.feedback == "last feedback"
+ assert flow.last_human_feedback is flow.last_human_feedback
+
+
+class TestCollapseToOutcome:
+ """Tests for the _collapse_to_outcome method."""
+
+ def test_exact_match(self):
+ """Test exact match returns the correct outcome."""
+ flow = Flow()
+
+ with patch("crewai.llm.LLM") as MockLLM:
+ mock_llm = MagicMock()
+ mock_llm.call.return_value = "approved"
+ MockLLM.return_value = mock_llm
+
+ result = flow._collapse_to_outcome(
+ feedback="I approve this",
+ outcomes=["approved", "rejected"],
+ llm="gpt-4o-mini",
+ )
+
+ assert result == "approved"
+
+ def test_partial_match(self):
+ """Test partial match finds the outcome in the response."""
+ flow = Flow()
+
+ with patch("crewai.llm.LLM") as MockLLM:
+ mock_llm = MagicMock()
+ mock_llm.call.return_value = "The outcome is approved based on the feedback"
+ MockLLM.return_value = mock_llm
+
+ result = flow._collapse_to_outcome(
+ feedback="Looks good",
+ outcomes=["approved", "rejected"],
+ llm="gpt-4o-mini",
+ )
+
+ assert result == "approved"
+
+ def test_fallback_to_first(self):
+ """Test that unmatched response falls back to first outcome."""
+ flow = Flow()
+
+ with patch("crewai.llm.LLM") as MockLLM:
+ mock_llm = MagicMock()
+ mock_llm.call.return_value = "something completely different"
+ MockLLM.return_value = mock_llm
+
+ result = flow._collapse_to_outcome(
+ feedback="Unclear feedback",
+ outcomes=["approved", "rejected"],
+ llm="gpt-4o-mini",
+ )
+
+ assert result == "approved" # First in list
diff --git a/lib/crewai/tests/test_human_feedback_integration.py b/lib/crewai/tests/test_human_feedback_integration.py
new file mode 100644
index 000000000..dd21724b4
--- /dev/null
+++ b/lib/crewai/tests/test_human_feedback_integration.py
@@ -0,0 +1,428 @@
+"""Integration tests for the @human_feedback decorator with Flow.
+
+This module tests the integration of @human_feedback with @listen,
+routing behavior, multi-step flows, and state management.
+"""
+
+from __future__ import annotations
+
+import asyncio
+from datetime import datetime
+from typing import Any
+from unittest.mock import MagicMock, patch
+
+import pytest
+from pydantic import BaseModel
+
+from crewai.flow import Flow, HumanFeedbackResult, human_feedback, listen, start
+from crewai.flow.flow import FlowState
+
+
+class TestRoutingIntegration:
+ """Tests for routing integration with @listen decorators."""
+
+ @patch("builtins.input", return_value="I approve")
+ @patch("builtins.print")
+ def test_routes_to_matching_listener(self, mock_print, mock_input):
+ """Test that collapsed outcome routes to the matching @listen method."""
+ execution_order = []
+
+ class ReviewFlow(Flow):
+ @start()
+ @human_feedback(
+ message="Review:",
+ emit=["approved", "rejected"],
+ llm="gpt-4o-mini",
+ )
+ def generate(self):
+ execution_order.append("generate")
+ return "content"
+
+ @listen("approved")
+ def on_approved(self):
+ execution_order.append("on_approved")
+ return "published"
+
+ @listen("rejected")
+ def on_rejected(self):
+ execution_order.append("on_rejected")
+ return "discarded"
+
+ flow = ReviewFlow()
+
+ with (
+ patch.object(flow, "_request_human_feedback", return_value="Approved!"),
+ patch.object(flow, "_collapse_to_outcome", return_value="approved"),
+ ):
+ result = flow.kickoff()
+
+ assert "generate" in execution_order
+ assert "on_approved" in execution_order
+ assert "on_rejected" not in execution_order
+
+ @patch("builtins.input", return_value="")
+ @patch("builtins.print")
+ def test_default_outcome_routes_correctly(self, mock_print, mock_input):
+ """Test that default_outcome routes when no feedback provided."""
+ executed_listener = []
+
+ class ReviewFlow(Flow):
+ @start()
+ @human_feedback(
+ message="Review:",
+ emit=["approved", "needs_work"],
+ llm="gpt-4o-mini",
+ default_outcome="needs_work",
+ )
+ def generate(self):
+ return "content"
+
+ @listen("approved")
+ def on_approved(self):
+ executed_listener.append("approved")
+
+ @listen("needs_work")
+ def on_needs_work(self):
+ executed_listener.append("needs_work")
+
+ flow = ReviewFlow()
+
+ with patch.object(flow, "_request_human_feedback", return_value=""):
+ flow.kickoff()
+
+ assert "needs_work" in executed_listener
+ assert "approved" not in executed_listener
+
+
+class TestMultiStepFlows:
+ """Tests for multi-step flows with multiple @human_feedback decorators."""
+
+ @patch("builtins.input", side_effect=["Good draft", "Final approved"])
+ @patch("builtins.print")
+ def test_multiple_feedback_steps(self, mock_print, mock_input):
+ """Test a flow with multiple human feedback steps."""
+
+ class MultiStepFlow(Flow):
+ @start()
+ @human_feedback(message="Review draft:")
+ def draft(self):
+ return "Draft content"
+
+ @listen(draft)
+ @human_feedback(message="Final review:")
+ def final_review(self, prev_result: HumanFeedbackResult):
+ return f"Final content based on: {prev_result.feedback}"
+
+ flow = MultiStepFlow()
+
+ with patch.object(
+ flow, "_request_human_feedback", side_effect=["Good draft", "Approved"]
+ ):
+ flow.kickoff()
+
+ # Both feedbacks should be recorded
+ assert len(flow.human_feedback_history) == 2
+ assert flow.human_feedback_history[0].method_name == "draft"
+ assert flow.human_feedback_history[0].feedback == "Good draft"
+ assert flow.human_feedback_history[1].method_name == "final_review"
+ assert flow.human_feedback_history[1].feedback == "Approved"
+
+ @patch("builtins.input", return_value="feedback")
+ @patch("builtins.print")
+ def test_mixed_feedback_and_regular_methods(self, mock_print, mock_input):
+ """Test flow with both @human_feedback and regular methods."""
+ execution_order = []
+
+ class MixedFlow(Flow):
+ @start()
+ def generate(self):
+ execution_order.append("generate")
+ return "generated"
+
+ @listen(generate)
+ @human_feedback(message="Review:")
+ def review(self):
+ execution_order.append("review")
+ return "reviewed"
+
+ @listen(review)
+ def finalize(self, result):
+ execution_order.append("finalize")
+ return "finalized"
+
+ flow = MixedFlow()
+
+ with patch.object(flow, "_request_human_feedback", return_value="feedback"):
+ flow.kickoff()
+
+ assert execution_order == ["generate", "review", "finalize"]
+
+
+class TestStateManagement:
+ """Tests for state management with human feedback."""
+
+ @patch("builtins.input", return_value="approved")
+ @patch("builtins.print")
+ def test_feedback_available_in_listener(self, mock_print, mock_input):
+ """Test that feedback is accessible in downstream listeners."""
+ captured_feedback = []
+
+ class StateFlow(Flow):
+ @start()
+ @human_feedback(
+ message="Review:",
+ emit=["approved", "rejected"],
+ llm="gpt-4o-mini",
+ )
+ def review(self):
+ return "Content to review"
+
+ @listen("approved")
+ def on_approved(self):
+ # Access the feedback via property
+ captured_feedback.append(self.last_human_feedback)
+ return "done"
+
+ flow = StateFlow()
+
+ with (
+ patch.object(flow, "_request_human_feedback", return_value="Great content!"),
+ patch.object(flow, "_collapse_to_outcome", return_value="approved"),
+ ):
+ flow.kickoff()
+
+ assert len(captured_feedback) == 1
+ result = captured_feedback[0]
+ assert isinstance(result, HumanFeedbackResult)
+ assert result.output == "Content to review"
+ assert result.feedback == "Great content!"
+ assert result.outcome == "approved"
+
+ @patch("builtins.input", return_value="")
+ @patch("builtins.print")
+ def test_history_preserved_across_steps(self, mock_print, mock_input):
+ """Test that feedback history is preserved across flow execution."""
+
+ class HistoryFlow(Flow):
+ @start()
+ @human_feedback(message="Step 1:")
+ def step1(self):
+ return "Step 1"
+
+ @listen(step1)
+ @human_feedback(message="Step 2:")
+ def step2(self, result):
+ return "Step 2"
+
+ @listen(step2)
+ def final(self, result):
+ # Access history
+ return len(self.human_feedback_history)
+
+ flow = HistoryFlow()
+
+ with patch.object(flow, "_request_human_feedback", return_value="feedback"):
+ result = flow.kickoff()
+
+ # Final method should see 2 feedback entries
+ assert result == 2
+
+
+class TestAsyncFlowIntegration:
+ """Tests for async flow integration."""
+
+ @pytest.mark.asyncio
+ async def test_async_flow_with_human_feedback(self):
+ """Test that @human_feedback works with async flows."""
+ executed = []
+
+ class AsyncFlow(Flow):
+ @start()
+ @human_feedback(message="Review:")
+ async def async_review(self):
+ executed.append("async_review")
+ await asyncio.sleep(0.01) # Simulate async work
+ return "async content"
+
+ flow = AsyncFlow()
+
+ with patch.object(flow, "_request_human_feedback", return_value="feedback"):
+ await flow.kickoff_async()
+
+ assert "async_review" in executed
+ assert flow.last_human_feedback is not None
+ assert flow.last_human_feedback.output == "async content"
+
+
+class TestWithStructuredState:
+ """Tests for flows with structured (Pydantic) state."""
+
+ @patch("builtins.input", return_value="approved")
+ @patch("builtins.print")
+ def test_with_pydantic_state(self, mock_print, mock_input):
+ """Test human feedback with structured Pydantic state."""
+
+ class ReviewState(FlowState):
+ content: str = ""
+ review_count: int = 0
+
+ class StructuredFlow(Flow[ReviewState]):
+ initial_state = ReviewState
+
+ @start()
+ @human_feedback(
+ message="Review:",
+ emit=["approved", "rejected"],
+ llm="gpt-4o-mini",
+ )
+ def review(self):
+ self.state.content = "Generated content"
+ self.state.review_count += 1
+ return self.state.content
+
+ @listen("approved")
+ def on_approved(self):
+ return f"Approved: {self.state.content}"
+
+ flow = StructuredFlow()
+
+ with (
+ patch.object(flow, "_request_human_feedback", return_value="LGTM"),
+ patch.object(flow, "_collapse_to_outcome", return_value="approved"),
+ ):
+ result = flow.kickoff()
+
+ assert flow.state.review_count == 1
+ assert flow.last_human_feedback is not None
+ assert flow.last_human_feedback.feedback == "LGTM"
+
+
+class TestMetadataPassthrough:
+ """Tests for metadata passthrough functionality."""
+
+ @patch("builtins.input", return_value="")
+ @patch("builtins.print")
+ def test_metadata_included_in_result(self, mock_print, mock_input):
+ """Test that metadata is passed through to HumanFeedbackResult."""
+
+ class MetadataFlow(Flow):
+ @start()
+ @human_feedback(
+ message="Review:",
+ metadata={"channel": "slack", "priority": "high"},
+ )
+ def review(self):
+ return "content"
+
+ flow = MetadataFlow()
+
+ with patch.object(flow, "_request_human_feedback", return_value="feedback"):
+ flow.kickoff()
+
+ result = flow.last_human_feedback
+ assert result is not None
+ assert result.metadata == {"channel": "slack", "priority": "high"}
+
+
+class TestEventEmission:
+ """Tests for event emission during human feedback."""
+
+ @patch("builtins.input", return_value="test feedback")
+ @patch("builtins.print")
+ def test_events_emitted_on_feedback_request(self, mock_print, mock_input):
+ """Test that events are emitted when feedback is requested."""
+ from crewai.events.event_listener import event_listener
+
+ class EventFlow(Flow):
+ @start()
+ @human_feedback(message="Review:")
+ def review(self):
+ return "content"
+
+ flow = EventFlow()
+
+ # We can't easily capture events in tests, but we can verify
+ # the flow executes without errors
+ with (
+ patch.object(
+ event_listener.formatter, "pause_live_updates", return_value=None
+ ),
+ patch.object(
+ event_listener.formatter, "resume_live_updates", return_value=None
+ ),
+ ):
+ flow.kickoff()
+
+ assert flow.last_human_feedback is not None
+
+
+class TestEdgeCases:
+ """Tests for edge cases and error handling."""
+
+ @patch("builtins.input", return_value="")
+ @patch("builtins.print")
+ def test_empty_feedback_first_outcome_fallback(self, mock_print, mock_input):
+ """Test that empty feedback without default uses first outcome."""
+
+ class FallbackFlow(Flow):
+ @start()
+ @human_feedback(
+ message="Review:",
+ emit=["first", "second", "third"],
+ llm="gpt-4o-mini",
+ # No default_outcome specified
+ )
+ def review(self):
+ return "content"
+
+ flow = FallbackFlow()
+
+ with patch.object(flow, "_request_human_feedback", return_value=""):
+ result = flow.kickoff()
+
+ assert result == "first" # Falls back to first outcome
+
+ @patch("builtins.input", return_value="whitespace only ")
+ @patch("builtins.print")
+ def test_whitespace_only_feedback_treated_as_empty(self, mock_print, mock_input):
+ """Test that whitespace-only feedback is treated as empty."""
+
+ class WhitespaceFlow(Flow):
+ @start()
+ @human_feedback(
+ message="Review:",
+ emit=["approve", "reject"],
+ llm="gpt-4o-mini",
+ default_outcome="reject",
+ )
+ def review(self):
+ return "content"
+
+ flow = WhitespaceFlow()
+
+ with patch.object(flow, "_request_human_feedback", return_value=" "):
+ result = flow.kickoff()
+
+ assert result == "reject" # Uses default because feedback is empty after strip
+
+ @patch("builtins.input", return_value="feedback")
+ @patch("builtins.print")
+ def test_feedback_result_without_routing(self, mock_print, mock_input):
+ """Test that HumanFeedbackResult is returned when not routing."""
+
+ class NoRoutingFlow(Flow):
+ @start()
+ @human_feedback(message="Review:")
+ def review(self):
+ return "content"
+
+ flow = NoRoutingFlow()
+
+ with patch.object(flow, "_request_human_feedback", return_value="feedback"):
+ result = flow.kickoff()
+
+ # Result should be HumanFeedbackResult when not routing
+ assert isinstance(result, HumanFeedbackResult)
+ assert result.output == "content"
+ assert result.feedback == "feedback"
+ assert result.outcome is None # No routing, no outcome