mirror of
https://github.com/AtsushiSakai/PythonRobotics.git
synced 2026-01-10 05:28:07 -05:00
* consolidate Node definition
* add base class for single agent planner
* add base class for single agent planner
* its working
* use single agent plotting util
* cleanup, bug fix, add some results to docs
* remove seeding from sta* - it happens in Node
* remove stale todo
* rename CA* and speed up plotting
* paper
* proper paper (ofc its csail)
* some cleanup
* update docs
* add unit test
* add logic for saving animation as gif
* address github bot
* Revert "add logic for saving animation as gif"
This reverts commit 639167795c.
* fix tests
* docs lint
* add gifs
* copilot review
* appease mypy
49 lines
1.4 KiB
Python
49 lines
1.4 KiB
Python
from abc import ABC, abstractmethod
|
|
from dataclasses import dataclass
|
|
from PathPlanning.TimeBasedPathPlanning.GridWithDynamicObstacles import (
|
|
Grid,
|
|
Position,
|
|
)
|
|
from PathPlanning.TimeBasedPathPlanning.Node import NodePath
|
|
import random
|
|
import numpy.random as numpy_random
|
|
|
|
# Seed randomness for reproducibility
|
|
RANDOM_SEED = 50
|
|
random.seed(RANDOM_SEED)
|
|
numpy_random.seed(RANDOM_SEED)
|
|
|
|
class SingleAgentPlanner(ABC):
|
|
"""
|
|
Base class for single agent planners
|
|
"""
|
|
|
|
@staticmethod
|
|
@abstractmethod
|
|
def plan(grid: Grid, start: Position, goal: Position, verbose: bool = False) -> NodePath:
|
|
pass
|
|
|
|
@dataclass
|
|
class StartAndGoal:
|
|
# Index of this agent
|
|
index: int
|
|
# Start position of the robot
|
|
start: Position
|
|
# Goal position of the robot
|
|
goal: Position
|
|
|
|
def distance_start_to_goal(self) -> float:
|
|
return pow(self.goal.x - self.start.x, 2) + pow(self.goal.y - self.start.y, 2)
|
|
|
|
class MultiAgentPlanner(ABC):
|
|
"""
|
|
Base class for multi-agent planners
|
|
"""
|
|
|
|
@staticmethod
|
|
@abstractmethod
|
|
def plan(grid: Grid, start_and_goal_positions: list[StartAndGoal], single_agent_planner_class: SingleAgentPlanner, verbose: bool = False) -> tuple[list[StartAndGoal], list[NodePath]]:
|
|
"""
|
|
Plan for all agents. Returned paths are in order corresponding to the returned list of `StartAndGoal` objects
|
|
"""
|
|
pass |